1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x_reg.h"
44 #include "bnx2x_fw_defs.h"
45 #include "bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION);
62 static LIST_HEAD(cnic_dev_list);
63 static DEFINE_RWLOCK(cnic_dev_lock);
64 static DEFINE_MUTEX(cnic_lock);
66 static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
68 static int cnic_service_bnx2(void *, void *);
69 static int cnic_service_bnx2x(void *, void *);
70 static int cnic_ctl(void *, struct cnic_ctl_info *);
72 static struct cnic_ops cnic_bnx2_ops = {
73 .cnic_owner = THIS_MODULE,
74 .cnic_handler = cnic_service_bnx2,
78 static struct cnic_ops cnic_bnx2x_ops = {
79 .cnic_owner = THIS_MODULE,
80 .cnic_handler = cnic_service_bnx2x,
84 static void cnic_shutdown_rings(struct cnic_dev *);
85 static void cnic_init_rings(struct cnic_dev *);
86 static int cnic_cm_set_pg(struct cnic_sock *);
88 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
90 struct cnic_dev *dev = uinfo->priv;
91 struct cnic_local *cp = dev->cnic_priv;
93 if (!capable(CAP_NET_ADMIN))
96 if (cp->uio_dev != -1)
100 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
105 cp->uio_dev = iminor(inode);
107 cnic_init_rings(dev);
113 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
115 struct cnic_dev *dev = uinfo->priv;
116 struct cnic_local *cp = dev->cnic_priv;
118 cnic_shutdown_rings(dev);
124 static inline void cnic_hold(struct cnic_dev *dev)
126 atomic_inc(&dev->ref_count);
129 static inline void cnic_put(struct cnic_dev *dev)
131 atomic_dec(&dev->ref_count);
134 static inline void csk_hold(struct cnic_sock *csk)
136 atomic_inc(&csk->ref_count);
139 static inline void csk_put(struct cnic_sock *csk)
141 atomic_dec(&csk->ref_count);
144 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
146 struct cnic_dev *cdev;
148 read_lock(&cnic_dev_lock);
149 list_for_each_entry(cdev, &cnic_dev_list, list) {
150 if (netdev == cdev->netdev) {
152 read_unlock(&cnic_dev_lock);
156 read_unlock(&cnic_dev_lock);
160 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
162 atomic_inc(&ulp_ops->ref_count);
165 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
167 atomic_dec(&ulp_ops->ref_count);
170 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
172 struct cnic_local *cp = dev->cnic_priv;
173 struct cnic_eth_dev *ethdev = cp->ethdev;
174 struct drv_ctl_info info;
175 struct drv_ctl_io *io = &info.data.io;
177 info.cmd = DRV_CTL_CTX_WR_CMD;
178 io->cid_addr = cid_addr;
181 ethdev->drv_ctl(dev->netdev, &info);
184 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
186 struct cnic_local *cp = dev->cnic_priv;
187 struct cnic_eth_dev *ethdev = cp->ethdev;
188 struct drv_ctl_info info;
189 struct drv_ctl_io *io = &info.data.io;
191 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
194 ethdev->drv_ctl(dev->netdev, &info);
197 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
199 struct cnic_local *cp = dev->cnic_priv;
200 struct cnic_eth_dev *ethdev = cp->ethdev;
201 struct drv_ctl_info info;
202 struct drv_ctl_l2_ring *ring = &info.data.ring;
205 info.cmd = DRV_CTL_START_L2_CMD;
207 info.cmd = DRV_CTL_STOP_L2_CMD;
210 ring->client_id = cl_id;
211 ethdev->drv_ctl(dev->netdev, &info);
214 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
216 struct cnic_local *cp = dev->cnic_priv;
217 struct cnic_eth_dev *ethdev = cp->ethdev;
218 struct drv_ctl_info info;
219 struct drv_ctl_io *io = &info.data.io;
221 info.cmd = DRV_CTL_IO_WR_CMD;
224 ethdev->drv_ctl(dev->netdev, &info);
227 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
229 struct cnic_local *cp = dev->cnic_priv;
230 struct cnic_eth_dev *ethdev = cp->ethdev;
231 struct drv_ctl_info info;
232 struct drv_ctl_io *io = &info.data.io;
234 info.cmd = DRV_CTL_IO_RD_CMD;
236 ethdev->drv_ctl(dev->netdev, &info);
240 static int cnic_in_use(struct cnic_sock *csk)
242 return test_bit(SK_F_INUSE, &csk->flags);
245 static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
247 struct cnic_local *cp = dev->cnic_priv;
248 struct cnic_eth_dev *ethdev = cp->ethdev;
249 struct drv_ctl_info info;
251 info.cmd = DRV_CTL_COMPLETION_CMD;
252 info.data.comp.comp_count = count;
253 ethdev->drv_ctl(dev->netdev, &info);
256 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
261 if (cp->ctx_tbl[i].cid == cid) {
269 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
270 struct cnic_sock *csk)
272 struct iscsi_path path_req;
275 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
276 struct cnic_ulp_ops *ulp_ops;
278 if (cp->uio_dev == -1)
282 len = sizeof(path_req);
283 buf = (char *) &path_req;
284 memset(&path_req, 0, len);
286 msg_type = ISCSI_KEVENT_PATH_REQ;
287 path_req.handle = (u64) csk->l5_cid;
288 if (test_bit(SK_F_IPV6, &csk->flags)) {
289 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
290 sizeof(struct in6_addr));
291 path_req.ip_addr_len = 16;
293 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
294 sizeof(struct in_addr));
295 path_req.ip_addr_len = 4;
297 path_req.vlan_id = csk->vlan_id;
298 path_req.pmtu = csk->mtu;
302 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
304 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
309 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
315 case ISCSI_UEVENT_PATH_UPDATE: {
316 struct cnic_local *cp;
318 struct cnic_sock *csk;
319 struct iscsi_path *path_resp;
321 if (len < sizeof(*path_resp))
324 path_resp = (struct iscsi_path *) buf;
326 l5_cid = (u32) path_resp->handle;
327 if (l5_cid >= MAX_CM_SK_TBL_SZ)
331 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
336 csk = &cp->csk_tbl[l5_cid];
338 if (cnic_in_use(csk)) {
339 memcpy(csk->ha, path_resp->mac_addr, 6);
340 if (test_bit(SK_F_IPV6, &csk->flags))
341 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
342 sizeof(struct in6_addr));
344 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
345 sizeof(struct in_addr));
346 if (is_valid_ether_addr(csk->ha))
358 static int cnic_offld_prep(struct cnic_sock *csk)
360 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
363 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
364 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
371 static int cnic_close_prep(struct cnic_sock *csk)
373 clear_bit(SK_F_CONNECT_START, &csk->flags);
374 smp_mb__after_clear_bit();
376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
377 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
385 static int cnic_abort_prep(struct cnic_sock *csk)
387 clear_bit(SK_F_CONNECT_START, &csk->flags);
388 smp_mb__after_clear_bit();
390 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
394 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
401 static void cnic_uio_stop(void)
403 struct cnic_dev *dev;
405 read_lock(&cnic_dev_lock);
406 list_for_each_entry(dev, &cnic_dev_list, list) {
407 struct cnic_local *cp = dev->cnic_priv;
410 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
412 read_unlock(&cnic_dev_lock);
415 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
417 struct cnic_dev *dev;
419 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
420 pr_err("%s: Bad type %d\n", __func__, ulp_type);
423 mutex_lock(&cnic_lock);
424 if (cnic_ulp_tbl[ulp_type]) {
425 pr_err("%s: Type %d has already been registered\n",
427 mutex_unlock(&cnic_lock);
431 read_lock(&cnic_dev_lock);
432 list_for_each_entry(dev, &cnic_dev_list, list) {
433 struct cnic_local *cp = dev->cnic_priv;
435 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
437 read_unlock(&cnic_dev_lock);
439 atomic_set(&ulp_ops->ref_count, 0);
440 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
441 mutex_unlock(&cnic_lock);
443 /* Prevent race conditions with netdev_event */
445 read_lock(&cnic_dev_lock);
446 list_for_each_entry(dev, &cnic_dev_list, list) {
447 struct cnic_local *cp = dev->cnic_priv;
449 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
450 ulp_ops->cnic_init(dev);
452 read_unlock(&cnic_dev_lock);
458 int cnic_unregister_driver(int ulp_type)
460 struct cnic_dev *dev;
461 struct cnic_ulp_ops *ulp_ops;
464 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
465 pr_err("%s: Bad type %d\n", __func__, ulp_type);
468 mutex_lock(&cnic_lock);
469 ulp_ops = cnic_ulp_tbl[ulp_type];
471 pr_err("%s: Type %d has not been registered\n",
475 read_lock(&cnic_dev_lock);
476 list_for_each_entry(dev, &cnic_dev_list, list) {
477 struct cnic_local *cp = dev->cnic_priv;
479 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
480 pr_err("%s: Type %d still has devices registered\n",
482 read_unlock(&cnic_dev_lock);
486 read_unlock(&cnic_dev_lock);
488 if (ulp_type == CNIC_ULP_ISCSI)
491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
493 mutex_unlock(&cnic_lock);
495 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
500 if (atomic_read(&ulp_ops->ref_count) != 0)
501 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
505 mutex_unlock(&cnic_lock);
509 static int cnic_start_hw(struct cnic_dev *);
510 static void cnic_stop_hw(struct cnic_dev *);
512 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
515 struct cnic_local *cp = dev->cnic_priv;
516 struct cnic_ulp_ops *ulp_ops;
518 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
519 pr_err("%s: Bad type %d\n", __func__, ulp_type);
522 mutex_lock(&cnic_lock);
523 if (cnic_ulp_tbl[ulp_type] == NULL) {
524 pr_err("%s: Driver with type %d has not been registered\n",
526 mutex_unlock(&cnic_lock);
529 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
530 pr_err("%s: Type %d has already been registered to this device\n",
532 mutex_unlock(&cnic_lock);
536 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
537 cp->ulp_handle[ulp_type] = ulp_ctx;
538 ulp_ops = cnic_ulp_tbl[ulp_type];
539 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
542 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
543 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
544 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
546 mutex_unlock(&cnic_lock);
551 EXPORT_SYMBOL(cnic_register_driver);
553 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
555 struct cnic_local *cp = dev->cnic_priv;
558 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
559 pr_err("%s: Bad type %d\n", __func__, ulp_type);
562 mutex_lock(&cnic_lock);
563 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
564 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
567 pr_err("%s: device not registered to this ulp type %d\n",
569 mutex_unlock(&cnic_lock);
572 mutex_unlock(&cnic_lock);
576 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
581 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
582 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
586 EXPORT_SYMBOL(cnic_unregister_driver);
588 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
590 id_tbl->start = start_id;
593 spin_lock_init(&id_tbl->lock);
594 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
601 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
603 kfree(id_tbl->table);
604 id_tbl->table = NULL;
607 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
612 if (id >= id_tbl->max)
615 spin_lock(&id_tbl->lock);
616 if (!test_bit(id, id_tbl->table)) {
617 set_bit(id, id_tbl->table);
620 spin_unlock(&id_tbl->lock);
624 /* Returns -1 if not successful */
625 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
629 spin_lock(&id_tbl->lock);
630 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
631 if (id >= id_tbl->max) {
633 if (id_tbl->next != 0) {
634 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
635 if (id >= id_tbl->next)
640 if (id < id_tbl->max) {
641 set_bit(id, id_tbl->table);
642 id_tbl->next = (id + 1) & (id_tbl->max - 1);
646 spin_unlock(&id_tbl->lock);
651 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
657 if (id >= id_tbl->max)
660 clear_bit(id, id_tbl->table);
663 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
670 for (i = 0; i < dma->num_pages; i++) {
671 if (dma->pg_arr[i]) {
672 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
673 dma->pg_arr[i], dma->pg_map_arr[i]);
674 dma->pg_arr[i] = NULL;
678 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
679 dma->pgtbl, dma->pgtbl_map);
687 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
690 u32 *page_table = dma->pgtbl;
692 for (i = 0; i < dma->num_pages; i++) {
693 /* Each entry needs to be in big endian format. */
694 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
696 *page_table = (u32) dma->pg_map_arr[i];
701 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
704 u32 *page_table = dma->pgtbl;
706 for (i = 0; i < dma->num_pages; i++) {
707 /* Each entry needs to be in little endian format. */
708 *page_table = dma->pg_map_arr[i] & 0xffffffff;
710 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
715 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
716 int pages, int use_pg_tbl)
719 struct cnic_local *cp = dev->cnic_priv;
721 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
722 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
723 if (dma->pg_arr == NULL)
726 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
727 dma->num_pages = pages;
729 for (i = 0; i < pages; i++) {
730 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
734 if (dma->pg_arr[i] == NULL)
740 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
741 ~(BCM_PAGE_SIZE - 1);
742 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
743 &dma->pgtbl_map, GFP_ATOMIC);
744 if (dma->pgtbl == NULL)
747 cp->setup_pgtbl(dev, dma);
752 cnic_free_dma(dev, dma);
756 static void cnic_free_context(struct cnic_dev *dev)
758 struct cnic_local *cp = dev->cnic_priv;
761 for (i = 0; i < cp->ctx_blks; i++) {
762 if (cp->ctx_arr[i].ctx) {
763 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
765 cp->ctx_arr[i].mapping);
766 cp->ctx_arr[i].ctx = NULL;
771 static void cnic_free_resc(struct cnic_dev *dev)
773 struct cnic_local *cp = dev->cnic_priv;
776 if (cp->cnic_uinfo) {
777 while (cp->uio_dev != -1 && i < 15) {
781 uio_unregister_device(cp->cnic_uinfo);
782 kfree(cp->cnic_uinfo);
783 cp->cnic_uinfo = NULL;
787 dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size,
788 cp->l2_buf, cp->l2_buf_map);
793 dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size,
794 cp->l2_ring, cp->l2_ring_map);
798 cnic_free_context(dev);
803 cnic_free_dma(dev, &cp->gbl_buf_info);
804 cnic_free_dma(dev, &cp->conn_buf_info);
805 cnic_free_dma(dev, &cp->kwq_info);
806 cnic_free_dma(dev, &cp->kwq_16_data_info);
807 cnic_free_dma(dev, &cp->kcq_info);
808 kfree(cp->iscsi_tbl);
809 cp->iscsi_tbl = NULL;
813 cnic_free_id_tbl(&cp->cid_tbl);
816 static int cnic_alloc_context(struct cnic_dev *dev)
818 struct cnic_local *cp = dev->cnic_priv;
820 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
823 cp->ctx_blk_size = BCM_PAGE_SIZE;
824 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
825 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
826 sizeof(struct cnic_ctx);
827 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
828 if (cp->ctx_arr == NULL)
832 for (i = 0; i < 2; i++) {
833 u32 j, reg, off, lo, hi;
836 off = BNX2_PG_CTX_MAP;
838 off = BNX2_ISCSI_CTX_MAP;
840 reg = cnic_reg_rd_ind(dev, off);
843 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
844 cp->ctx_arr[k].cid = j;
848 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
853 for (i = 0; i < cp->ctx_blks; i++) {
855 dma_alloc_coherent(&dev->pcidev->dev,
857 &cp->ctx_arr[i].mapping,
859 if (cp->ctx_arr[i].ctx == NULL)
866 static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
868 struct cnic_local *cp = dev->cnic_priv;
870 cp->l2_ring_size = pages * BCM_PAGE_SIZE;
871 cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size,
873 GFP_KERNEL | __GFP_COMP);
877 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
878 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
879 cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size,
881 GFP_KERNEL | __GFP_COMP);
888 static int cnic_alloc_uio(struct cnic_dev *dev) {
889 struct cnic_local *cp = dev->cnic_priv;
890 struct uio_info *uinfo;
893 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
897 uinfo->mem[0].addr = dev->netdev->base_addr;
898 uinfo->mem[0].internal_addr = dev->regview;
899 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
900 uinfo->mem[0].memtype = UIO_MEM_PHYS;
902 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
903 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
905 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
906 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
908 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
910 uinfo->name = "bnx2_cnic";
911 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
912 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
914 uinfo->mem[1].size = sizeof(struct host_def_status_block);
916 uinfo->name = "bnx2x_cnic";
919 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
921 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
922 uinfo->mem[2].size = cp->l2_ring_size;
923 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
925 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
926 uinfo->mem[3].size = cp->l2_buf_size;
927 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
929 uinfo->version = CNIC_MODULE_VERSION;
930 uinfo->irq = UIO_IRQ_CUSTOM;
932 uinfo->open = cnic_uio_open;
933 uinfo->release = cnic_uio_close;
937 ret = uio_register_device(&dev->pcidev->dev, uinfo);
943 cp->cnic_uinfo = uinfo;
947 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
949 struct cnic_local *cp = dev->cnic_priv;
952 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
955 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
957 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
960 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
962 ret = cnic_alloc_context(dev);
966 ret = cnic_alloc_l2_rings(dev, 2);
970 ret = cnic_alloc_uio(dev);
981 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
983 struct cnic_local *cp = dev->cnic_priv;
984 struct cnic_eth_dev *ethdev = cp->ethdev;
985 int ctx_blk_size = cp->ethdev->ctx_blk_size;
986 int total_mem, blks, i, cid_space;
988 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
991 cid_space = MAX_ISCSI_TBL_SZ +
992 (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
994 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
995 blks = total_mem / ctx_blk_size;
996 if (total_mem % ctx_blk_size)
999 if (blks > cp->ethdev->ctx_tbl_len)
1002 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
1003 if (cp->ctx_arr == NULL)
1006 cp->ctx_blks = blks;
1007 cp->ctx_blk_size = ctx_blk_size;
1008 if (BNX2X_CHIP_IS_E1H(cp->chip_id))
1011 cp->ctx_align = ctx_blk_size;
1013 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1015 for (i = 0; i < blks; i++) {
1016 cp->ctx_arr[i].ctx =
1017 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1018 &cp->ctx_arr[i].mapping,
1020 if (cp->ctx_arr[i].ctx == NULL)
1023 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1024 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1025 cnic_free_context(dev);
1026 cp->ctx_blk_size += cp->ctx_align;
1035 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1037 struct cnic_local *cp = dev->cnic_priv;
1038 int i, j, n, ret, pages;
1039 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1041 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1046 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1047 MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
1051 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1052 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1053 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1056 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
1059 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1063 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1064 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1065 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1067 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1068 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1071 if ((i % n) == (n - 1))
1075 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
1078 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1080 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1081 struct bnx2x_bd_chain_next *next =
1082 (struct bnx2x_bd_chain_next *)
1083 &cp->kcq[i][MAX_KCQE_CNT];
1086 if (j >= KCQ_PAGE_CNT)
1088 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1089 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1092 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1093 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1094 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1098 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1099 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1103 ret = cnic_alloc_bnx2x_context(dev);
1107 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1109 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
1111 cp->l2_rx_ring_size = 15;
1113 ret = cnic_alloc_l2_rings(dev, 4);
1117 ret = cnic_alloc_uio(dev);
1124 cnic_free_resc(dev);
1128 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1130 return cp->max_kwq_idx -
1131 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1134 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1137 struct cnic_local *cp = dev->cnic_priv;
1138 struct kwqe *prod_qe;
1139 u16 prod, sw_prod, i;
1141 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1142 return -EAGAIN; /* bnx2 is down */
1144 spin_lock_bh(&cp->cnic_ulp_lock);
1145 if (num_wqes > cnic_kwq_avail(cp) &&
1146 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1147 spin_unlock_bh(&cp->cnic_ulp_lock);
1151 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1153 prod = cp->kwq_prod_idx;
1154 sw_prod = prod & MAX_KWQ_IDX;
1155 for (i = 0; i < num_wqes; i++) {
1156 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1157 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1159 sw_prod = prod & MAX_KWQ_IDX;
1161 cp->kwq_prod_idx = prod;
1163 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1165 spin_unlock_bh(&cp->cnic_ulp_lock);
1169 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1170 union l5cm_specific_data *l5_data)
1172 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1175 map = ctx->kwqe_data_mapping;
1176 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1177 l5_data->phy_address.hi = (u64) map >> 32;
1178 return ctx->kwqe_data;
1181 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1182 u32 type, union l5cm_specific_data *l5_data)
1184 struct cnic_local *cp = dev->cnic_priv;
1185 struct l5cm_spe kwqe;
1186 struct kwqe_16 *kwq[1];
1189 kwqe.hdr.conn_and_cmd_data =
1190 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1191 BNX2X_HW_CID(cid, cp->func)));
1192 kwqe.hdr.type = cpu_to_le16(type);
1193 kwqe.hdr.reserved = 0;
1194 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1195 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1197 kwq[0] = (struct kwqe_16 *) &kwqe;
1199 spin_lock_bh(&cp->cnic_ulp_lock);
1200 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1201 spin_unlock_bh(&cp->cnic_ulp_lock);
1209 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1210 struct kcqe *cqes[], u32 num_cqes)
1212 struct cnic_local *cp = dev->cnic_priv;
1213 struct cnic_ulp_ops *ulp_ops;
1216 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1217 if (likely(ulp_ops)) {
1218 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1224 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1226 struct cnic_local *cp = dev->cnic_priv;
1227 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1228 int func = cp->func, pages;
1231 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1232 cp->num_ccells = req1->num_ccells_per_conn;
1233 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1234 cp->num_iscsi_tasks;
1235 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1236 BNX2X_ISCSI_R2TQE_SIZE;
1237 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1238 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1239 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1240 cp->num_cqs = req1->num_cqs;
1242 if (!dev->max_iscsi_conn)
1245 /* init Tstorm RAM */
1246 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
1248 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1250 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1251 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1252 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1253 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1254 req1->num_tasks_per_conn);
1256 /* init Ustorm RAM */
1257 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1258 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
1259 req1->rq_buffer_size);
1260 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1262 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1263 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1264 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1265 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1266 req1->num_tasks_per_conn);
1267 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
1269 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
1271 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1272 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1274 /* init Xstorm RAM */
1275 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1277 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1278 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1279 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1280 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1281 req1->num_tasks_per_conn);
1282 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1284 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
1285 req1->num_tasks_per_conn);
1286 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1287 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1289 /* init Cstorm RAM */
1290 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1292 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1293 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1294 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1295 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1296 req1->num_tasks_per_conn);
1297 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
1299 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1305 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1307 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1308 struct cnic_local *cp = dev->cnic_priv;
1309 int func = cp->func;
1310 struct iscsi_kcqe kcqe;
1311 struct kcqe *cqes[1];
1313 memset(&kcqe, 0, sizeof(kcqe));
1314 if (!dev->max_iscsi_conn) {
1315 kcqe.completion_status =
1316 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1320 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1321 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1322 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1323 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1324 req2->error_bit_map[1]);
1326 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1327 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1328 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1329 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1330 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1331 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1332 req2->error_bit_map[1]);
1334 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1335 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1337 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1340 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1341 cqes[0] = (struct kcqe *) &kcqe;
1342 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1347 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1349 struct cnic_local *cp = dev->cnic_priv;
1350 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1352 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1353 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1355 cnic_free_dma(dev, &iscsi->hq_info);
1356 cnic_free_dma(dev, &iscsi->r2tq_info);
1357 cnic_free_dma(dev, &iscsi->task_array_info);
1359 cnic_free_id(&cp->cid_tbl, ctx->cid);
1363 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1367 struct cnic_local *cp = dev->cnic_priv;
1368 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1369 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1371 cid = cnic_alloc_new_id(&cp->cid_tbl);
1378 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1380 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1384 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1385 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1389 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1390 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1397 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1401 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1402 struct regpair *ctx_addr)
1404 struct cnic_local *cp = dev->cnic_priv;
1405 struct cnic_eth_dev *ethdev = cp->ethdev;
1406 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1407 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1408 unsigned long align_off = 0;
1412 if (cp->ctx_align) {
1413 unsigned long mask = cp->ctx_align - 1;
1415 if (cp->ctx_arr[blk].mapping & mask)
1416 align_off = cp->ctx_align -
1417 (cp->ctx_arr[blk].mapping & mask);
1419 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1420 (off * BNX2X_CONTEXT_MEM_SIZE);
1421 ctx = cp->ctx_arr[blk].ctx + align_off +
1422 (off * BNX2X_CONTEXT_MEM_SIZE);
1424 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1426 ctx_addr->lo = ctx_map & 0xffffffff;
1427 ctx_addr->hi = (u64) ctx_map >> 32;
1431 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1434 struct cnic_local *cp = dev->cnic_priv;
1435 struct iscsi_kwqe_conn_offload1 *req1 =
1436 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1437 struct iscsi_kwqe_conn_offload2 *req2 =
1438 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1439 struct iscsi_kwqe_conn_offload3 *req3;
1440 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1441 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1443 u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
1444 struct iscsi_context *ictx;
1445 struct regpair context_addr;
1446 int i, j, n = 2, n_max;
1449 if (!req2->num_additional_wqes)
1452 n_max = req2->num_additional_wqes + 2;
1454 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1458 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1460 ictx->xstorm_ag_context.hq_prod = 1;
1462 ictx->xstorm_st_context.iscsi.first_burst_length =
1463 ISCSI_DEF_FIRST_BURST_LEN;
1464 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1465 ISCSI_DEF_MAX_RECV_SEG_LEN;
1466 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1467 req1->sq_page_table_addr_lo;
1468 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1469 req1->sq_page_table_addr_hi;
1470 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1471 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1472 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1473 iscsi->hq_info.pgtbl_map & 0xffffffff;
1474 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1475 (u64) iscsi->hq_info.pgtbl_map >> 32;
1476 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1477 iscsi->hq_info.pgtbl[0];
1478 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1479 iscsi->hq_info.pgtbl[1];
1480 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1481 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1482 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1483 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1484 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1485 iscsi->r2tq_info.pgtbl[0];
1486 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1487 iscsi->r2tq_info.pgtbl[1];
1488 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1489 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1490 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1491 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1492 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1493 BNX2X_ISCSI_PBL_NOT_CACHED;
1494 ictx->xstorm_st_context.iscsi.flags.flags |=
1495 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1496 ictx->xstorm_st_context.iscsi.flags.flags |=
1497 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1499 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1500 /* TSTORM requires the base address of RQ DB & not PTE */
1501 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1502 req2->rq_page_table_addr_lo & PAGE_MASK;
1503 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1504 req2->rq_page_table_addr_hi;
1505 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1506 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1507 ictx->tstorm_st_context.tcp.flags2 |=
1508 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1510 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1512 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1513 req2->rq_page_table_addr_lo;
1514 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1515 req2->rq_page_table_addr_hi;
1516 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1517 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1518 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1519 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1520 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1521 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1522 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1523 iscsi->r2tq_info.pgtbl[0];
1524 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1525 iscsi->r2tq_info.pgtbl[1];
1526 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1527 req1->cq_page_table_addr_lo;
1528 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1529 req1->cq_page_table_addr_hi;
1530 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1531 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1532 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1533 ictx->ustorm_st_context.task_pbe_cache_index =
1534 BNX2X_ISCSI_PBL_NOT_CACHED;
1535 ictx->ustorm_st_context.task_pdu_cache_index =
1536 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1538 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1542 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1545 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1546 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1547 req3->qp_first_pte[j].hi;
1548 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1549 req3->qp_first_pte[j].lo;
1552 ictx->ustorm_st_context.task_pbl_base.lo =
1553 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1554 ictx->ustorm_st_context.task_pbl_base.hi =
1555 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1556 ictx->ustorm_st_context.tce_phy_addr.lo =
1557 iscsi->task_array_info.pgtbl[0];
1558 ictx->ustorm_st_context.tce_phy_addr.hi =
1559 iscsi->task_array_info.pgtbl[1];
1560 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1561 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1562 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1563 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1564 ISCSI_DEF_MAX_BURST_LEN;
1565 ictx->ustorm_st_context.negotiated_rx |=
1566 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1567 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1569 ictx->cstorm_st_context.hq_pbl_base.lo =
1570 iscsi->hq_info.pgtbl_map & 0xffffffff;
1571 ictx->cstorm_st_context.hq_pbl_base.hi =
1572 (u64) iscsi->hq_info.pgtbl_map >> 32;
1573 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1574 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1575 ictx->cstorm_st_context.task_pbl_base.lo =
1576 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1577 ictx->cstorm_st_context.task_pbl_base.hi =
1578 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1579 /* CSTORM and USTORM initialization is different, CSTORM requires
1580 * CQ DB base & not PTE addr */
1581 ictx->cstorm_st_context.cq_db_base.lo =
1582 req1->cq_page_table_addr_lo & PAGE_MASK;
1583 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1584 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1585 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1586 for (i = 0; i < cp->num_cqs; i++) {
1587 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1589 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1593 ictx->xstorm_ag_context.cdu_reserved =
1594 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1595 ISCSI_CONNECTION_TYPE);
1596 ictx->ustorm_ag_context.cdu_usage =
1597 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1598 ISCSI_CONNECTION_TYPE);
1603 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1606 struct iscsi_kwqe_conn_offload1 *req1;
1607 struct iscsi_kwqe_conn_offload2 *req2;
1608 struct cnic_local *cp = dev->cnic_priv;
1609 struct iscsi_kcqe kcqe;
1610 struct kcqe *cqes[1];
1619 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1620 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1621 if ((num - 2) < req2->num_additional_wqes) {
1625 *work = 2 + req2->num_additional_wqes;;
1627 l5_cid = req1->iscsi_conn_id;
1628 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1631 memset(&kcqe, 0, sizeof(kcqe));
1632 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1633 kcqe.iscsi_conn_id = l5_cid;
1634 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1636 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1637 atomic_dec(&cp->iscsi_conn);
1641 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1643 atomic_dec(&cp->iscsi_conn);
1647 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1649 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1650 atomic_dec(&cp->iscsi_conn);
1654 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1655 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
1659 cqes[0] = (struct kcqe *) &kcqe;
1660 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1665 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1667 struct cnic_local *cp = dev->cnic_priv;
1668 struct iscsi_kwqe_conn_update *req =
1669 (struct iscsi_kwqe_conn_update *) kwqe;
1671 union l5cm_specific_data l5_data;
1672 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1675 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1678 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1682 memcpy(data, kwqe, sizeof(struct kwqe));
1684 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1685 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1689 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1691 struct cnic_local *cp = dev->cnic_priv;
1692 struct iscsi_kwqe_conn_destroy *req =
1693 (struct iscsi_kwqe_conn_destroy *) kwqe;
1694 union l5cm_specific_data l5_data;
1695 u32 l5_cid = req->reserved0;
1696 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1698 struct iscsi_kcqe kcqe;
1699 struct kcqe *cqes[1];
1701 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
1702 goto skip_cfc_delete;
1704 while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
1707 init_waitqueue_head(&ctx->waitq);
1709 memset(&l5_data, 0, sizeof(l5_data));
1710 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
1712 ETH_CONNECTION_TYPE |
1713 (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
1716 wait_event(ctx->waitq, ctx->wait_cond);
1719 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1721 atomic_dec(&cp->iscsi_conn);
1723 memset(&kcqe, 0, sizeof(kcqe));
1724 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1725 kcqe.iscsi_conn_id = l5_cid;
1726 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1727 kcqe.iscsi_conn_context_id = req->context_id;
1729 cqes[0] = (struct kcqe *) &kcqe;
1730 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1735 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1736 struct l4_kwq_connect_req1 *kwqe1,
1737 struct l4_kwq_connect_req3 *kwqe3,
1738 struct l5cm_active_conn_buffer *conn_buf)
1740 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1741 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1742 &conn_buf->xstorm_conn_buffer;
1743 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1744 &conn_buf->tstorm_conn_buffer;
1745 struct regpair context_addr;
1746 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1747 struct in6_addr src_ip, dst_ip;
1751 addrp = (u32 *) &conn_addr->local_ip_addr;
1752 for (i = 0; i < 4; i++, addrp++)
1753 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1755 addrp = (u32 *) &conn_addr->remote_ip_addr;
1756 for (i = 0; i < 4; i++, addrp++)
1757 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1759 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1761 xstorm_buf->context_addr.hi = context_addr.hi;
1762 xstorm_buf->context_addr.lo = context_addr.lo;
1763 xstorm_buf->mss = 0xffff;
1764 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1765 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1766 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1767 xstorm_buf->pseudo_header_checksum =
1768 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1770 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1771 tstorm_buf->params |=
1772 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1773 if (kwqe3->ka_timeout) {
1774 tstorm_buf->ka_enable = 1;
1775 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1776 tstorm_buf->ka_interval = kwqe3->ka_interval;
1777 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1779 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1780 tstorm_buf->snd_buf = kwqe3->snd_buf;
1781 tstorm_buf->max_rt_time = 0xffffffff;
1784 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1786 struct cnic_local *cp = dev->cnic_priv;
1787 int func = CNIC_FUNC(cp);
1788 u8 *mac = dev->mac_addr;
1790 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
1792 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1793 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
1794 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1795 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
1796 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1797 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
1798 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1799 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
1800 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1801 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
1803 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1804 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
1805 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1806 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1808 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1809 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
1810 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1811 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1813 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1814 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
1816 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1817 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
1821 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1823 struct cnic_local *cp = dev->cnic_priv;
1824 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1825 u16 tstorm_flags = 0;
1828 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1829 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1832 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1833 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
1835 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1836 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
1839 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1842 struct cnic_local *cp = dev->cnic_priv;
1843 struct l4_kwq_connect_req1 *kwqe1 =
1844 (struct l4_kwq_connect_req1 *) wqes[0];
1845 struct l4_kwq_connect_req3 *kwqe3;
1846 struct l5cm_active_conn_buffer *conn_buf;
1847 struct l5cm_conn_addr_params *conn_addr;
1848 union l5cm_specific_data l5_data;
1849 u32 l5_cid = kwqe1->pg_cid;
1850 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1851 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1859 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1869 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
1870 netdev_err(dev->netdev, "conn_buf size too big\n");
1873 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1877 memset(conn_buf, 0, sizeof(*conn_buf));
1879 conn_addr = &conn_buf->conn_addr_buf;
1880 conn_addr->remote_addr_0 = csk->ha[0];
1881 conn_addr->remote_addr_1 = csk->ha[1];
1882 conn_addr->remote_addr_2 = csk->ha[2];
1883 conn_addr->remote_addr_3 = csk->ha[3];
1884 conn_addr->remote_addr_4 = csk->ha[4];
1885 conn_addr->remote_addr_5 = csk->ha[5];
1887 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
1888 struct l4_kwq_connect_req2 *kwqe2 =
1889 (struct l4_kwq_connect_req2 *) wqes[1];
1891 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
1892 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
1893 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
1895 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
1896 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
1897 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
1898 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
1900 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
1902 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
1903 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
1904 conn_addr->local_tcp_port = kwqe1->src_port;
1905 conn_addr->remote_tcp_port = kwqe1->dst_port;
1907 conn_addr->pmtu = kwqe3->pmtu;
1908 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
1910 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1911 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
1913 cnic_bnx2x_set_tcp_timestamp(dev,
1914 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
1916 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
1917 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1919 ctx->ctx_flags |= CTX_FL_OFFLD_START;
1924 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
1926 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
1927 union l5cm_specific_data l5_data;
1930 memset(&l5_data, 0, sizeof(l5_data));
1931 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
1932 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1936 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
1938 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
1939 union l5cm_specific_data l5_data;
1942 memset(&l5_data, 0, sizeof(l5_data));
1943 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
1944 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1947 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1949 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
1951 struct kcqe *cqes[1];
1953 memset(&kcqe, 0, sizeof(kcqe));
1954 kcqe.pg_host_opaque = req->host_opaque;
1955 kcqe.pg_cid = req->host_opaque;
1956 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
1957 cqes[0] = (struct kcqe *) &kcqe;
1958 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1962 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1964 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
1966 struct kcqe *cqes[1];
1968 memset(&kcqe, 0, sizeof(kcqe));
1969 kcqe.pg_host_opaque = req->pg_host_opaque;
1970 kcqe.pg_cid = req->pg_cid;
1971 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
1972 cqes[0] = (struct kcqe *) &kcqe;
1973 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1977 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1984 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1985 return -EAGAIN; /* bnx2 is down */
1987 for (i = 0; i < num_wqes; ) {
1989 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
1993 case ISCSI_KWQE_OPCODE_INIT1:
1994 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
1996 case ISCSI_KWQE_OPCODE_INIT2:
1997 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
1999 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2000 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2001 num_wqes - i, &work);
2003 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2004 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2006 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2007 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2009 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2010 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2013 case L4_KWQE_OPCODE_VALUE_CLOSE:
2014 ret = cnic_bnx2x_close(dev, kwqe);
2016 case L4_KWQE_OPCODE_VALUE_RESET:
2017 ret = cnic_bnx2x_reset(dev, kwqe);
2019 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2020 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2022 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2023 ret = cnic_bnx2x_update_pg(dev, kwqe);
2025 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2030 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2035 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2042 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2044 struct cnic_local *cp = dev->cnic_priv;
2050 struct cnic_ulp_ops *ulp_ops;
2052 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2053 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
2055 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2056 cnic_kwq_completion(dev, 1);
2058 while (j < num_cqes) {
2059 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2061 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
2064 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2065 cnic_kwq_completion(dev, 1);
2069 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2070 ulp_type = CNIC_ULP_RDMA;
2071 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2072 ulp_type = CNIC_ULP_ISCSI;
2073 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2074 ulp_type = CNIC_ULP_L4;
2075 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2078 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2084 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2085 if (likely(ulp_ops)) {
2086 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2087 cp->completed_kcq + i, j);
2097 static u16 cnic_bnx2_next_idx(u16 idx)
2102 static u16 cnic_bnx2_hw_idx(u16 idx)
2107 static u16 cnic_bnx2x_next_idx(u16 idx)
2110 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2116 static u16 cnic_bnx2x_hw_idx(u16 idx)
2118 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2123 static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2125 struct cnic_local *cp = dev->cnic_priv;
2128 int kcqe_cnt = 0, last_cnt = 0;
2130 i = ri = last = *sw_prod;
2133 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2134 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2135 cp->completed_kcq[kcqe_cnt++] = kcqe;
2136 i = cp->next_idx(i);
2137 ri = i & MAX_KCQ_IDX;
2138 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2139 last_cnt = kcqe_cnt;
2148 static int cnic_l2_completion(struct cnic_local *cp)
2150 u16 hw_cons, sw_cons;
2151 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2152 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
2156 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2159 hw_cons = *cp->rx_cons_ptr;
2160 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2163 sw_cons = cp->rx_cons;
2164 while (sw_cons != hw_cons) {
2167 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2168 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2169 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2170 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2171 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2172 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2173 cmd == RAMROD_CMD_ID_ETH_HALT)
2176 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2181 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2183 u16 rx_cons = *cp->rx_cons_ptr;
2184 u16 tx_cons = *cp->tx_cons_ptr;
2187 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2188 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2189 comp = cnic_l2_completion(cp);
2191 cp->tx_cons = tx_cons;
2192 cp->rx_cons = rx_cons;
2194 uio_event_notify(cp->cnic_uinfo);
2197 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2200 static int cnic_service_bnx2(void *data, void *status_blk)
2202 struct cnic_dev *dev = data;
2203 struct status_block *sblk = status_blk;
2204 struct cnic_local *cp = dev->cnic_priv;
2205 u32 status_idx = sblk->status_idx;
2206 u16 hw_prod, sw_prod;
2209 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2212 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2214 hw_prod = sblk->status_completion_producer_index;
2215 sw_prod = cp->kcq_prod_idx;
2216 while (sw_prod != hw_prod) {
2217 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2221 service_kcqes(dev, kcqe_cnt);
2223 /* Tell compiler that status_blk fields can change. */
2225 if (status_idx != sblk->status_idx) {
2226 status_idx = sblk->status_idx;
2227 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2228 hw_prod = sblk->status_completion_producer_index;
2234 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
2236 cp->kcq_prod_idx = sw_prod;
2238 cnic_chk_pkt_rings(cp);
2242 static void cnic_service_bnx2_msix(unsigned long data)
2244 struct cnic_dev *dev = (struct cnic_dev *) data;
2245 struct cnic_local *cp = dev->cnic_priv;
2246 struct status_block_msix *status_blk = cp->status_blk.bnx2;
2247 u32 status_idx = status_blk->status_idx;
2248 u16 hw_prod, sw_prod;
2251 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2253 hw_prod = status_blk->status_completion_producer_index;
2254 sw_prod = cp->kcq_prod_idx;
2255 while (sw_prod != hw_prod) {
2256 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2260 service_kcqes(dev, kcqe_cnt);
2262 /* Tell compiler that status_blk fields can change. */
2264 if (status_idx != status_blk->status_idx) {
2265 status_idx = status_blk->status_idx;
2266 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2267 hw_prod = status_blk->status_completion_producer_index;
2273 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
2274 cp->kcq_prod_idx = sw_prod;
2276 cnic_chk_pkt_rings(cp);
2278 cp->last_status_idx = status_idx;
2279 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2280 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2283 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2285 struct cnic_dev *dev = dev_instance;
2286 struct cnic_local *cp = dev->cnic_priv;
2287 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2292 prefetch(cp->status_blk.gen);
2293 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2295 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2296 tasklet_schedule(&cp->cnic_irq_task);
2301 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2302 u16 index, u8 op, u8 update)
2304 struct cnic_local *cp = dev->cnic_priv;
2305 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2306 COMMAND_REG_INT_ACK);
2307 struct igu_ack_register igu_ack;
2309 igu_ack.status_block_index = index;
2310 igu_ack.sb_id_and_flags =
2311 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2312 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2313 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2314 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2316 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2319 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2321 struct cnic_local *cp = dev->cnic_priv;
2323 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
2324 IGU_INT_DISABLE, 0);
2327 static void cnic_service_bnx2x_bh(unsigned long data)
2329 struct cnic_dev *dev = (struct cnic_dev *) data;
2330 struct cnic_local *cp = dev->cnic_priv;
2331 u16 hw_prod, sw_prod;
2332 struct cstorm_status_block_c *sblk =
2333 &cp->status_blk.bnx2x->c_status_block;
2334 u32 status_idx = sblk->status_block_index;
2337 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2340 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2341 hw_prod = cp->hw_idx(hw_prod);
2342 sw_prod = cp->kcq_prod_idx;
2343 while (sw_prod != hw_prod) {
2344 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2348 service_kcqes(dev, kcqe_cnt);
2350 /* Tell compiler that sblk fields can change. */
2352 if (status_idx == sblk->status_block_index)
2355 status_idx = sblk->status_block_index;
2356 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2357 hw_prod = cp->hw_idx(hw_prod);
2361 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
2362 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2363 status_idx, IGU_INT_ENABLE, 1);
2365 cp->kcq_prod_idx = sw_prod;
2368 static int cnic_service_bnx2x(void *data, void *status_blk)
2370 struct cnic_dev *dev = data;
2371 struct cnic_local *cp = dev->cnic_priv;
2372 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2374 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2375 prefetch(cp->status_blk.bnx2x);
2376 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2378 tasklet_schedule(&cp->cnic_irq_task);
2379 cnic_chk_pkt_rings(cp);
2385 static void cnic_ulp_stop(struct cnic_dev *dev)
2387 struct cnic_local *cp = dev->cnic_priv;
2391 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2393 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2394 struct cnic_ulp_ops *ulp_ops;
2396 mutex_lock(&cnic_lock);
2397 ulp_ops = cp->ulp_ops[if_type];
2399 mutex_unlock(&cnic_lock);
2402 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2403 mutex_unlock(&cnic_lock);
2405 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2406 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2408 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2412 static void cnic_ulp_start(struct cnic_dev *dev)
2414 struct cnic_local *cp = dev->cnic_priv;
2417 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2418 struct cnic_ulp_ops *ulp_ops;
2420 mutex_lock(&cnic_lock);
2421 ulp_ops = cp->ulp_ops[if_type];
2422 if (!ulp_ops || !ulp_ops->cnic_start) {
2423 mutex_unlock(&cnic_lock);
2426 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2427 mutex_unlock(&cnic_lock);
2429 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2430 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
2432 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2436 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
2438 struct cnic_dev *dev = data;
2440 switch (info->cmd) {
2441 case CNIC_CTL_STOP_CMD:
2449 case CNIC_CTL_START_CMD:
2452 if (!cnic_start_hw(dev))
2453 cnic_ulp_start(dev);
2457 case CNIC_CTL_COMPLETION_CMD: {
2458 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2460 struct cnic_local *cp = dev->cnic_priv;
2462 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2463 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2466 wake_up(&ctx->waitq);
2476 static void cnic_ulp_init(struct cnic_dev *dev)
2479 struct cnic_local *cp = dev->cnic_priv;
2481 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2482 struct cnic_ulp_ops *ulp_ops;
2484 mutex_lock(&cnic_lock);
2485 ulp_ops = cnic_ulp_tbl[i];
2486 if (!ulp_ops || !ulp_ops->cnic_init) {
2487 mutex_unlock(&cnic_lock);
2491 mutex_unlock(&cnic_lock);
2493 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2494 ulp_ops->cnic_init(dev);
2500 static void cnic_ulp_exit(struct cnic_dev *dev)
2503 struct cnic_local *cp = dev->cnic_priv;
2505 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2506 struct cnic_ulp_ops *ulp_ops;
2508 mutex_lock(&cnic_lock);
2509 ulp_ops = cnic_ulp_tbl[i];
2510 if (!ulp_ops || !ulp_ops->cnic_exit) {
2511 mutex_unlock(&cnic_lock);
2515 mutex_unlock(&cnic_lock);
2517 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2518 ulp_ops->cnic_exit(dev);
2524 static int cnic_cm_offload_pg(struct cnic_sock *csk)
2526 struct cnic_dev *dev = csk->dev;
2527 struct l4_kwq_offload_pg *l4kwqe;
2528 struct kwqe *wqes[1];
2530 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
2531 memset(l4kwqe, 0, sizeof(*l4kwqe));
2532 wqes[0] = (struct kwqe *) l4kwqe;
2534 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
2536 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
2537 l4kwqe->l2hdr_nbytes = ETH_HLEN;
2539 l4kwqe->da0 = csk->ha[0];
2540 l4kwqe->da1 = csk->ha[1];
2541 l4kwqe->da2 = csk->ha[2];
2542 l4kwqe->da3 = csk->ha[3];
2543 l4kwqe->da4 = csk->ha[4];
2544 l4kwqe->da5 = csk->ha[5];
2546 l4kwqe->sa0 = dev->mac_addr[0];
2547 l4kwqe->sa1 = dev->mac_addr[1];
2548 l4kwqe->sa2 = dev->mac_addr[2];
2549 l4kwqe->sa3 = dev->mac_addr[3];
2550 l4kwqe->sa4 = dev->mac_addr[4];
2551 l4kwqe->sa5 = dev->mac_addr[5];
2553 l4kwqe->etype = ETH_P_IP;
2554 l4kwqe->ipid_start = DEF_IPID_START;
2555 l4kwqe->host_opaque = csk->l5_cid;
2558 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
2559 l4kwqe->vlan_tag = csk->vlan_id;
2560 l4kwqe->l2hdr_nbytes += 4;
2563 return dev->submit_kwqes(dev, wqes, 1);
2566 static int cnic_cm_update_pg(struct cnic_sock *csk)
2568 struct cnic_dev *dev = csk->dev;
2569 struct l4_kwq_update_pg *l4kwqe;
2570 struct kwqe *wqes[1];
2572 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
2573 memset(l4kwqe, 0, sizeof(*l4kwqe));
2574 wqes[0] = (struct kwqe *) l4kwqe;
2576 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
2578 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
2579 l4kwqe->pg_cid = csk->pg_cid;
2581 l4kwqe->da0 = csk->ha[0];
2582 l4kwqe->da1 = csk->ha[1];
2583 l4kwqe->da2 = csk->ha[2];
2584 l4kwqe->da3 = csk->ha[3];
2585 l4kwqe->da4 = csk->ha[4];
2586 l4kwqe->da5 = csk->ha[5];
2588 l4kwqe->pg_host_opaque = csk->l5_cid;
2589 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
2591 return dev->submit_kwqes(dev, wqes, 1);
2594 static int cnic_cm_upload_pg(struct cnic_sock *csk)
2596 struct cnic_dev *dev = csk->dev;
2597 struct l4_kwq_upload *l4kwqe;
2598 struct kwqe *wqes[1];
2600 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
2601 memset(l4kwqe, 0, sizeof(*l4kwqe));
2602 wqes[0] = (struct kwqe *) l4kwqe;
2604 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
2606 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
2607 l4kwqe->cid = csk->pg_cid;
2609 return dev->submit_kwqes(dev, wqes, 1);
2612 static int cnic_cm_conn_req(struct cnic_sock *csk)
2614 struct cnic_dev *dev = csk->dev;
2615 struct l4_kwq_connect_req1 *l4kwqe1;
2616 struct l4_kwq_connect_req2 *l4kwqe2;
2617 struct l4_kwq_connect_req3 *l4kwqe3;
2618 struct kwqe *wqes[3];
2622 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
2623 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
2624 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
2625 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
2626 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
2627 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
2629 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
2631 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
2632 l4kwqe3->ka_timeout = csk->ka_timeout;
2633 l4kwqe3->ka_interval = csk->ka_interval;
2634 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
2635 l4kwqe3->tos = csk->tos;
2636 l4kwqe3->ttl = csk->ttl;
2637 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
2638 l4kwqe3->pmtu = csk->mtu;
2639 l4kwqe3->rcv_buf = csk->rcv_buf;
2640 l4kwqe3->snd_buf = csk->snd_buf;
2641 l4kwqe3->seed = csk->seed;
2643 wqes[0] = (struct kwqe *) l4kwqe1;
2644 if (test_bit(SK_F_IPV6, &csk->flags)) {
2645 wqes[1] = (struct kwqe *) l4kwqe2;
2646 wqes[2] = (struct kwqe *) l4kwqe3;
2649 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
2650 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
2652 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
2653 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
2654 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
2655 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
2656 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
2657 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
2658 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
2659 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
2660 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
2661 sizeof(struct tcphdr);
2663 wqes[1] = (struct kwqe *) l4kwqe3;
2664 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
2665 sizeof(struct tcphdr);
2668 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
2670 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
2671 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
2672 l4kwqe1->cid = csk->cid;
2673 l4kwqe1->pg_cid = csk->pg_cid;
2674 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
2675 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
2676 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
2677 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
2678 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
2679 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
2680 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
2681 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
2682 if (csk->tcp_flags & SK_TCP_NAGLE)
2683 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
2684 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
2685 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
2686 if (csk->tcp_flags & SK_TCP_SACK)
2687 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
2688 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
2689 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
2691 l4kwqe1->tcp_flags = tcp_flags;
2693 return dev->submit_kwqes(dev, wqes, num_wqes);
2696 static int cnic_cm_close_req(struct cnic_sock *csk)
2698 struct cnic_dev *dev = csk->dev;
2699 struct l4_kwq_close_req *l4kwqe;
2700 struct kwqe *wqes[1];
2702 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
2703 memset(l4kwqe, 0, sizeof(*l4kwqe));
2704 wqes[0] = (struct kwqe *) l4kwqe;
2706 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
2707 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
2708 l4kwqe->cid = csk->cid;
2710 return dev->submit_kwqes(dev, wqes, 1);
2713 static int cnic_cm_abort_req(struct cnic_sock *csk)
2715 struct cnic_dev *dev = csk->dev;
2716 struct l4_kwq_reset_req *l4kwqe;
2717 struct kwqe *wqes[1];
2719 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
2720 memset(l4kwqe, 0, sizeof(*l4kwqe));
2721 wqes[0] = (struct kwqe *) l4kwqe;
2723 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
2724 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
2725 l4kwqe->cid = csk->cid;
2727 return dev->submit_kwqes(dev, wqes, 1);
2730 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
2731 u32 l5_cid, struct cnic_sock **csk, void *context)
2733 struct cnic_local *cp = dev->cnic_priv;
2734 struct cnic_sock *csk1;
2736 if (l5_cid >= MAX_CM_SK_TBL_SZ)
2739 csk1 = &cp->csk_tbl[l5_cid];
2740 if (atomic_read(&csk1->ref_count))
2743 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
2748 csk1->l5_cid = l5_cid;
2749 csk1->ulp_type = ulp_type;
2750 csk1->context = context;
2752 csk1->ka_timeout = DEF_KA_TIMEOUT;
2753 csk1->ka_interval = DEF_KA_INTERVAL;
2754 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
2755 csk1->tos = DEF_TOS;
2756 csk1->ttl = DEF_TTL;
2757 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
2758 csk1->rcv_buf = DEF_RCV_BUF;
2759 csk1->snd_buf = DEF_SND_BUF;
2760 csk1->seed = DEF_SEED;
2766 static void cnic_cm_cleanup(struct cnic_sock *csk)
2768 if (csk->src_port) {
2769 struct cnic_dev *dev = csk->dev;
2770 struct cnic_local *cp = dev->cnic_priv;
2772 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
2777 static void cnic_close_conn(struct cnic_sock *csk)
2779 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
2780 cnic_cm_upload_pg(csk);
2781 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
2783 cnic_cm_cleanup(csk);
2786 static int cnic_cm_destroy(struct cnic_sock *csk)
2788 if (!cnic_in_use(csk))
2792 clear_bit(SK_F_INUSE, &csk->flags);
2793 smp_mb__after_clear_bit();
2794 while (atomic_read(&csk->ref_count) != 1)
2796 cnic_cm_cleanup(csk);
2803 static inline u16 cnic_get_vlan(struct net_device *dev,
2804 struct net_device **vlan_dev)
2806 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2807 *vlan_dev = vlan_dev_real_dev(dev);
2808 return vlan_dev_vlan_id(dev);
2814 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2815 struct dst_entry **dst)
2817 #if defined(CONFIG_INET)
2822 memset(&fl, 0, sizeof(fl));
2823 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
2825 err = ip_route_output_key(&init_net, &rt, &fl);
2830 return -ENETUNREACH;
2834 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
2835 struct dst_entry **dst)
2837 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2840 memset(&fl, 0, sizeof(fl));
2841 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
2842 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
2843 fl.oif = dst_addr->sin6_scope_id;
2845 *dst = ip6_route_output(&init_net, NULL, &fl);
2850 return -ENETUNREACH;
2853 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
2856 struct cnic_dev *dev = NULL;
2857 struct dst_entry *dst;
2858 struct net_device *netdev = NULL;
2859 int err = -ENETUNREACH;
2861 if (dst_addr->sin_family == AF_INET)
2862 err = cnic_get_v4_route(dst_addr, &dst);
2863 else if (dst_addr->sin_family == AF_INET6) {
2864 struct sockaddr_in6 *dst_addr6 =
2865 (struct sockaddr_in6 *) dst_addr;
2867 err = cnic_get_v6_route(dst_addr6, &dst);
2877 cnic_get_vlan(dst->dev, &netdev);
2879 dev = cnic_from_netdev(netdev);
2888 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2890 struct cnic_dev *dev = csk->dev;
2891 struct cnic_local *cp = dev->cnic_priv;
2893 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
2896 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2898 struct cnic_dev *dev = csk->dev;
2899 struct cnic_local *cp = dev->cnic_priv;
2901 struct dst_entry *dst = NULL;
2902 struct net_device *realdev;
2905 if (saddr->local.v6.sin6_family == AF_INET6 &&
2906 saddr->remote.v6.sin6_family == AF_INET6)
2908 else if (saddr->local.v4.sin_family == AF_INET &&
2909 saddr->remote.v4.sin_family == AF_INET)
2914 clear_bit(SK_F_IPV6, &csk->flags);
2917 set_bit(SK_F_IPV6, &csk->flags);
2918 cnic_get_v6_route(&saddr->remote.v6, &dst);
2920 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
2921 sizeof(struct in6_addr));
2922 csk->dst_port = saddr->remote.v6.sin6_port;
2923 local_port = saddr->local.v6.sin6_port;
2926 cnic_get_v4_route(&saddr->remote.v4, &dst);
2928 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
2929 csk->dst_port = saddr->remote.v4.sin_port;
2930 local_port = saddr->local.v4.sin_port;
2934 csk->mtu = dev->netdev->mtu;
2935 if (dst && dst->dev) {
2936 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
2937 if (realdev == dev->netdev) {
2938 csk->vlan_id = vlan;
2939 csk->mtu = dst_mtu(dst);
2943 if (local_port >= CNIC_LOCAL_PORT_MIN &&
2944 local_port < CNIC_LOCAL_PORT_MAX) {
2945 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
2951 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
2952 if (local_port == -1) {
2957 csk->src_port = local_port;
2964 static void cnic_init_csk_state(struct cnic_sock *csk)
2967 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
2968 clear_bit(SK_F_CLOSING, &csk->flags);
2971 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2975 if (!cnic_in_use(csk))
2978 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
2981 cnic_init_csk_state(csk);
2983 err = cnic_get_route(csk, saddr);
2987 err = cnic_resolve_addr(csk, saddr);
2992 clear_bit(SK_F_CONNECT_START, &csk->flags);
2996 static int cnic_cm_abort(struct cnic_sock *csk)
2998 struct cnic_local *cp = csk->dev->cnic_priv;
3001 if (!cnic_in_use(csk))
3004 if (cnic_abort_prep(csk))
3005 return cnic_cm_abort_req(csk);
3007 /* Getting here means that we haven't started connect, or
3008 * connect was not successful.
3011 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3012 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3013 opcode = csk->state;
3015 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3016 cp->close_conn(csk, opcode);
3021 static int cnic_cm_close(struct cnic_sock *csk)
3023 if (!cnic_in_use(csk))
3026 if (cnic_close_prep(csk)) {
3027 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3028 return cnic_cm_close_req(csk);
3035 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3038 struct cnic_ulp_ops *ulp_ops;
3039 int ulp_type = csk->ulp_type;
3042 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3044 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3045 ulp_ops->cm_connect_complete(csk);
3046 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3047 ulp_ops->cm_close_complete(csk);
3048 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3049 ulp_ops->cm_remote_abort(csk);
3050 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3051 ulp_ops->cm_abort_complete(csk);
3052 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3053 ulp_ops->cm_remote_close(csk);
3058 static int cnic_cm_set_pg(struct cnic_sock *csk)
3060 if (cnic_offld_prep(csk)) {
3061 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3062 cnic_cm_update_pg(csk);
3064 cnic_cm_offload_pg(csk);
3069 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3071 struct cnic_local *cp = dev->cnic_priv;
3072 u32 l5_cid = kcqe->pg_host_opaque;
3073 u8 opcode = kcqe->op_code;
3074 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3077 if (!cnic_in_use(csk))
3080 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3081 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3084 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3085 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3086 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3087 cnic_cm_upcall(cp, csk,
3088 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3092 csk->pg_cid = kcqe->pg_cid;
3093 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3094 cnic_cm_conn_req(csk);
3100 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3102 struct cnic_local *cp = dev->cnic_priv;
3103 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3104 u8 opcode = l4kcqe->op_code;
3106 struct cnic_sock *csk;
3108 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3109 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3110 cnic_cm_process_offld_pg(dev, l4kcqe);
3114 l5_cid = l4kcqe->conn_id;
3116 l5_cid = l4kcqe->cid;
3117 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3120 csk = &cp->csk_tbl[l5_cid];
3123 if (!cnic_in_use(csk)) {
3129 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3130 if (l4kcqe->status != 0) {
3131 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3132 cnic_cm_upcall(cp, csk,
3133 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3136 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3137 if (l4kcqe->status == 0)
3138 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3140 smp_mb__before_clear_bit();
3141 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3142 cnic_cm_upcall(cp, csk, opcode);
3145 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3146 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
3147 cnic_cm_upcall(cp, csk, opcode);
3149 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
3150 csk->state = opcode;
3152 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3153 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3154 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3155 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3156 cp->close_conn(csk, opcode);
3159 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3160 cnic_cm_upcall(cp, csk, opcode);
3166 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3168 struct cnic_dev *dev = data;
3171 for (i = 0; i < num; i++)
3172 cnic_cm_process_kcqe(dev, kcqe[i]);
3175 static struct cnic_ulp_ops cm_ulp_ops = {
3176 .indicate_kcqes = cnic_cm_indicate_kcqe,
3179 static void cnic_cm_free_mem(struct cnic_dev *dev)
3181 struct cnic_local *cp = dev->cnic_priv;
3185 cnic_free_id_tbl(&cp->csk_port_tbl);
3188 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3190 struct cnic_local *cp = dev->cnic_priv;
3192 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3197 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3198 CNIC_LOCAL_PORT_MIN)) {
3199 cnic_cm_free_mem(dev);
3205 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3207 if ((opcode == csk->state) ||
3208 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
3209 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
3210 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
3213 /* 57710+ only workaround to handle unsolicited RESET_COMP
3214 * which will be treated like a RESET RCVD notification
3215 * which triggers the clean up procedure
3217 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3218 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3219 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3226 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3228 struct cnic_dev *dev = csk->dev;
3229 struct cnic_local *cp = dev->cnic_priv;
3231 clear_bit(SK_F_CONNECT_START, &csk->flags);
3232 cnic_close_conn(csk);
3233 cnic_cm_upcall(cp, csk, opcode);
3236 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3240 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3244 get_random_bytes(&seed, 4);
3245 cnic_ctx_wr(dev, 45, 0, seed);
3249 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3251 struct cnic_dev *dev = csk->dev;
3252 struct cnic_local *cp = dev->cnic_priv;
3253 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3254 union l5cm_specific_data l5_data;
3256 int close_complete = 0;
3259 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3260 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3261 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3262 if (cnic_ready_to_close(csk, opcode))
3263 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3265 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3266 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3268 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3273 memset(&l5_data, 0, sizeof(l5_data));
3275 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3277 } else if (close_complete) {
3278 ctx->timestamp = jiffies;
3279 cnic_close_conn(csk);
3280 cnic_cm_upcall(cp, csk, csk->state);
3284 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3288 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3290 struct cnic_local *cp = dev->cnic_priv;
3291 int func = CNIC_FUNC(cp);
3293 cnic_init_bnx2x_mac(dev);
3294 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3296 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3297 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
3299 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3300 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
3301 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3302 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
3305 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3306 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
3307 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3308 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
3309 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3310 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
3311 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3312 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
3314 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
3319 static int cnic_cm_open(struct cnic_dev *dev)
3321 struct cnic_local *cp = dev->cnic_priv;
3324 err = cnic_cm_alloc_mem(dev);
3328 err = cp->start_cm(dev);
3333 dev->cm_create = cnic_cm_create;
3334 dev->cm_destroy = cnic_cm_destroy;
3335 dev->cm_connect = cnic_cm_connect;
3336 dev->cm_abort = cnic_cm_abort;
3337 dev->cm_close = cnic_cm_close;
3338 dev->cm_select_dev = cnic_cm_select_dev;
3340 cp->ulp_handle[CNIC_ULP_L4] = dev;
3341 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
3345 cnic_cm_free_mem(dev);
3349 static int cnic_cm_shutdown(struct cnic_dev *dev)
3351 struct cnic_local *cp = dev->cnic_priv;
3359 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
3360 struct cnic_sock *csk = &cp->csk_tbl[i];
3362 clear_bit(SK_F_INUSE, &csk->flags);
3363 cnic_cm_cleanup(csk);
3365 cnic_cm_free_mem(dev);
3370 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3375 cid_addr = GET_CID_ADDR(cid);
3377 for (i = 0; i < CTX_SIZE; i += 4)
3378 cnic_ctx_wr(dev, cid_addr, i, 0);
3381 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
3383 struct cnic_local *cp = dev->cnic_priv;
3385 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
3387 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3390 for (i = 0; i < cp->ctx_blks; i++) {
3392 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
3395 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
3397 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
3398 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
3399 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
3400 (u64) cp->ctx_arr[i].mapping >> 32);
3401 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
3402 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3403 for (j = 0; j < 10; j++) {
3405 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
3406 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
3410 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
3418 static void cnic_free_irq(struct cnic_dev *dev)
3420 struct cnic_local *cp = dev->cnic_priv;
3421 struct cnic_eth_dev *ethdev = cp->ethdev;
3423 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3424 cp->disable_int_sync(dev);
3425 tasklet_disable(&cp->cnic_irq_task);
3426 free_irq(ethdev->irq_arr[0].vector, dev);
3430 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3432 struct cnic_local *cp = dev->cnic_priv;
3433 struct cnic_eth_dev *ethdev = cp->ethdev;
3435 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3437 int sblk_num = cp->status_blk_num;
3438 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
3439 BNX2_HC_SB_CONFIG_1;
3441 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
3443 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
3444 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
3445 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
3447 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
3448 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
3449 (unsigned long) dev);
3450 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3453 tasklet_disable(&cp->cnic_irq_task);
3456 while (cp->status_blk.bnx2->status_completion_producer_index &&
3458 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
3459 1 << (11 + sblk_num));
3464 if (cp->status_blk.bnx2->status_completion_producer_index) {
3470 struct status_block *sblk = cp->status_blk.gen;
3471 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
3474 while (sblk->status_completion_producer_index && i < 10) {
3475 CNIC_WR(dev, BNX2_HC_COMMAND,
3476 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3481 if (sblk->status_completion_producer_index)
3488 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
3492 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
3494 struct cnic_local *cp = dev->cnic_priv;
3495 struct cnic_eth_dev *ethdev = cp->ethdev;
3497 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3500 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3504 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
3506 struct cnic_local *cp = dev->cnic_priv;
3507 struct cnic_eth_dev *ethdev = cp->ethdev;
3509 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3512 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3513 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3514 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
3515 synchronize_irq(ethdev->irq_arr[0].vector);
3518 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3520 struct cnic_local *cp = dev->cnic_priv;
3521 struct cnic_eth_dev *ethdev = cp->ethdev;
3522 u32 cid_addr, tx_cid, sb_id;
3523 u32 val, offset0, offset1, offset2, offset3;
3527 struct status_block *s_blk = cp->status_blk.gen;
3529 sb_id = cp->status_blk_num;
3531 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3532 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3533 struct status_block_msix *sblk = cp->status_blk.bnx2;
3535 tx_cid = TX_TSS_CID + sb_id - 1;
3536 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3538 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
3540 cp->tx_cons = *cp->tx_cons_ptr;
3542 cid_addr = GET_CID_ADDR(tx_cid);
3543 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
3544 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
3546 for (i = 0; i < PHY_CTX_SIZE; i += 4)
3547 cnic_ctx_wr(dev, cid_addr2, i, 0);
3549 offset0 = BNX2_L2CTX_TYPE_XI;
3550 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3551 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3552 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3554 cnic_init_context(dev, tx_cid);
3555 cnic_init_context(dev, tx_cid + 1);
3557 offset0 = BNX2_L2CTX_TYPE;
3558 offset1 = BNX2_L2CTX_CMD_TYPE;
3559 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3560 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3562 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3563 cnic_ctx_wr(dev, cid_addr, offset0, val);
3565 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3566 cnic_ctx_wr(dev, cid_addr, offset1, val);
3568 txbd = (struct tx_bd *) cp->l2_ring;
3570 buf_map = cp->l2_buf_map;
3571 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
3572 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
3573 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3575 val = (u64) cp->l2_ring_map >> 32;
3576 cnic_ctx_wr(dev, cid_addr, offset2, val);
3577 txbd->tx_bd_haddr_hi = val;
3579 val = (u64) cp->l2_ring_map & 0xffffffff;
3580 cnic_ctx_wr(dev, cid_addr, offset3, val);
3581 txbd->tx_bd_haddr_lo = val;
3584 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3586 struct cnic_local *cp = dev->cnic_priv;
3587 struct cnic_eth_dev *ethdev = cp->ethdev;
3588 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3591 struct status_block *s_blk = cp->status_blk.gen;
3593 sb_id = cp->status_blk_num;
3594 cnic_init_context(dev, 2);
3595 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
3596 coal_reg = BNX2_HC_COMMAND;
3597 coal_val = CNIC_RD(dev, coal_reg);
3598 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3599 struct status_block_msix *sblk = cp->status_blk.bnx2;
3601 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
3602 coal_reg = BNX2_HC_COALESCE_NOW;
3603 coal_val = 1 << (11 + sb_id);
3606 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
3607 CNIC_WR(dev, coal_reg, coal_val);
3612 cp->rx_cons = *cp->rx_cons_ptr;
3614 cid_addr = GET_CID_ADDR(2);
3615 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3616 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3617 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
3620 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
3622 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
3623 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3625 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
3626 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3628 int n = (i % cp->l2_rx_ring_size) + 1;
3630 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3631 rxbd->rx_bd_len = cp->l2_single_buf_size;
3632 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3633 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
3634 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3636 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3637 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
3638 rxbd->rx_bd_haddr_hi = val;
3640 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3641 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
3642 rxbd->rx_bd_haddr_lo = val;
3644 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
3645 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
3648 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
3650 struct kwqe *wqes[1], l2kwqe;
3652 memset(&l2kwqe, 0, sizeof(l2kwqe));
3654 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
3655 (L2_KWQE_OPCODE_VALUE_FLUSH <<
3656 KWQE_OPCODE_SHIFT) | 2;
3657 dev->submit_kwqes(dev, wqes, 1);
3660 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
3662 struct cnic_local *cp = dev->cnic_priv;
3665 val = cp->func << 2;
3667 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
3669 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3670 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
3671 dev->mac_addr[0] = (u8) (val >> 8);
3672 dev->mac_addr[1] = (u8) val;
3674 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
3676 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3677 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
3678 dev->mac_addr[2] = (u8) (val >> 24);
3679 dev->mac_addr[3] = (u8) (val >> 16);
3680 dev->mac_addr[4] = (u8) (val >> 8);
3681 dev->mac_addr[5] = (u8) val;
3683 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
3685 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
3686 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3687 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
3689 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
3690 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
3691 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
3694 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3696 struct cnic_local *cp = dev->cnic_priv;
3697 struct cnic_eth_dev *ethdev = cp->ethdev;
3698 struct status_block *sblk = cp->status_blk.gen;
3702 cnic_set_bnx2_mac(dev);
3704 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
3705 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3706 if (BCM_PAGE_BITS > 12)
3707 val |= (12 - 8) << 4;
3709 val |= (BCM_PAGE_BITS - 8) << 4;
3711 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
3713 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
3714 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
3715 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
3717 err = cnic_setup_5709_context(dev, 1);
3721 cnic_init_context(dev, KWQ_CID);
3722 cnic_init_context(dev, KCQ_CID);
3724 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
3725 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3727 cp->max_kwq_idx = MAX_KWQ_IDX;
3728 cp->kwq_prod_idx = 0;
3729 cp->kwq_con_idx = 0;
3730 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
3732 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
3733 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
3735 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
3737 /* Initialize the kernel work queue context. */
3738 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3739 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3740 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
3742 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
3743 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3745 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
3746 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3748 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
3749 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3751 val = (u32) cp->kwq_info.pgtbl_map;
3752 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3754 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3755 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
3757 cp->kcq_prod_idx = 0;
3759 /* Initialize the kernel complete queue context. */
3760 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3761 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3762 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
3764 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
3765 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3767 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
3768 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3770 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
3771 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3773 val = (u32) cp->kcq_info.pgtbl_map;
3774 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3777 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3778 u32 sb_id = cp->status_blk_num;
3779 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
3781 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
3782 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3783 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3786 /* Enable Commnad Scheduler notification when we write to the
3787 * host producer index of the kernel contexts. */
3788 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
3790 /* Enable Command Scheduler notification when we write to either
3791 * the Send Queue or Receive Queue producer indexes of the kernel
3792 * bypass contexts. */
3793 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
3794 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
3796 /* Notify COM when the driver post an application buffer. */
3797 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
3799 /* Set the CP and COM doorbells. These two processors polls the
3800 * doorbell for a non zero value before running. This must be done
3801 * after setting up the kernel queue contexts. */
3802 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
3803 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
3805 cnic_init_bnx2_tx_ring(dev);
3806 cnic_init_bnx2_rx_ring(dev);
3808 err = cnic_init_bnx2_irq(dev);
3810 netdev_err(dev->netdev, "cnic_init_irq failed\n");
3811 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
3812 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
3819 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
3821 struct cnic_local *cp = dev->cnic_priv;
3822 struct cnic_eth_dev *ethdev = cp->ethdev;
3823 u32 start_offset = ethdev->ctx_tbl_offset;
3826 for (i = 0; i < cp->ctx_blks; i++) {
3827 struct cnic_ctx *ctx = &cp->ctx_arr[i];
3828 dma_addr_t map = ctx->mapping;
3830 if (cp->ctx_align) {
3831 unsigned long mask = cp->ctx_align - 1;
3833 map = (map + mask) & ~mask;
3836 cnic_ctx_tbl_wr(dev, start_offset + i, map);
3840 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3842 struct cnic_local *cp = dev->cnic_priv;
3843 struct cnic_eth_dev *ethdev = cp->ethdev;
3846 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
3847 (unsigned long) dev);
3848 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3849 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3852 tasklet_disable(&cp->cnic_irq_task);
3857 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3859 struct cnic_local *cp = dev->cnic_priv;
3860 u8 sb_id = cp->status_blk_num;
3861 int port = CNIC_PORT(cp);
3863 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3864 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
3865 HC_INDEX_C_ISCSI_EQ_CONS),
3867 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
3868 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
3869 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3872 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3876 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3878 struct cnic_local *cp = dev->cnic_priv;
3879 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
3880 struct eth_context *context;
3881 struct regpair context_addr;
3883 int func = CNIC_FUNC(cp);
3884 int port = CNIC_PORT(cp);
3886 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3889 memset(txbd, 0, BCM_PAGE_SIZE);
3891 buf_map = cp->l2_buf_map;
3892 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
3893 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
3894 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
3896 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3897 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3898 reg_bd->addr_hi = start_bd->addr_hi;
3899 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
3900 start_bd->nbytes = cpu_to_le16(0x10);
3901 start_bd->nbd = cpu_to_le16(3);
3902 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3903 start_bd->general_data = (UNICAST_ADDRESS <<
3904 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3905 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3908 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3910 val = (u64) cp->l2_ring_map >> 32;
3911 txbd->next_bd.addr_hi = cpu_to_le32(val);
3913 context->xstorm_st_context.tx_bd_page_base_hi = val;
3915 val = (u64) cp->l2_ring_map & 0xffffffff;
3916 txbd->next_bd.addr_lo = cpu_to_le32(val);
3918 context->xstorm_st_context.tx_bd_page_base_lo = val;
3920 context->cstorm_st_context.sb_index_number =
3921 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3922 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3924 context->xstorm_st_context.statistics_data = (cli |
3925 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3927 context->xstorm_ag_context.cdu_reserved =
3928 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3929 CDU_REGION_NUMBER_XCM_AG,
3930 ETH_CONNECTION_TYPE);
3932 /* reset xstorm per client statistics */
3933 val = BAR_XSTRORM_INTMEM +
3934 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3935 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
3936 CNIC_WR(dev, val + i * 4, 0);
3939 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[
3940 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3943 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
3945 struct cnic_local *cp = dev->cnic_priv;
3946 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
3948 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3949 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
3950 struct eth_context *context;
3951 struct regpair context_addr;
3953 int port = CNIC_PORT(cp);
3954 int func = CNIC_FUNC(cp);
3955 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3957 struct tstorm_eth_client_config tstorm_client = {0};
3959 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3961 int n = (i % cp->l2_rx_ring_size) + 1;
3963 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3964 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3965 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3967 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3969 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3970 rxbd->addr_hi = cpu_to_le32(val);
3972 context->ustorm_st_context.common.bd_page_base_hi = val;
3974 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3975 rxbd->addr_lo = cpu_to_le32(val);
3977 context->ustorm_st_context.common.bd_page_base_lo = val;
3979 context->ustorm_st_context.common.sb_index_numbers =
3980 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3981 context->ustorm_st_context.common.clientId = cli;
3982 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3983 context->ustorm_st_context.common.flags =
3984 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3985 context->ustorm_st_context.common.statistics_counter_id = cli;
3986 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3987 context->ustorm_st_context.common.bd_buff_size =
3988 cp->l2_single_buf_size;
3990 context->ustorm_ag_context.cdu_usage =
3991 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3992 CDU_REGION_NUMBER_UCM_AG,
3993 ETH_CONNECTION_TYPE);
3995 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
3996 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
3997 rxcqe->addr_hi = cpu_to_le32(val);
3999 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4000 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
4002 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4003 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
4005 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4006 rxcqe->addr_lo = cpu_to_le32(val);
4008 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4009 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
4011 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4012 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
4014 /* client tstorm info */
4015 tstorm_client.mtu = cp->l2_single_buf_size - 14;
4016 tstorm_client.config_flags =
4017 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
4018 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
4019 tstorm_client.statistics_counter_id = cli;
4021 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4022 TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
4023 ((u32 *)&tstorm_client)[0]);
4024 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4025 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
4026 ((u32 *)&tstorm_client)[1]);
4028 /* reset tstorm per client statistics */
4029 val = BAR_TSTRORM_INTMEM +
4030 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4031 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4032 CNIC_WR(dev, val + i * 4, 0);
4034 /* reset ustorm per client statistics */
4035 val = BAR_USTRORM_INTMEM +
4036 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4037 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4038 CNIC_WR(dev, val + i * 4, 0);
4041 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[
4042 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
4045 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4047 struct cnic_local *cp = dev->cnic_priv;
4048 u32 base, addr, val;
4049 int port = CNIC_PORT(cp);
4051 dev->max_iscsi_conn = 0;
4052 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4053 if (base < 0xa0000 || base >= 0xc0000)
4056 addr = BNX2X_SHMEM_ADDR(base,
4057 dev_info.port_hw_config[port].iscsi_mac_upper);
4059 val = CNIC_RD(dev, addr);
4061 dev->mac_addr[0] = (u8) (val >> 8);
4062 dev->mac_addr[1] = (u8) val;
4064 addr = BNX2X_SHMEM_ADDR(base,
4065 dev_info.port_hw_config[port].iscsi_mac_lower);
4067 val = CNIC_RD(dev, addr);
4069 dev->mac_addr[2] = (u8) (val >> 24);
4070 dev->mac_addr[3] = (u8) (val >> 16);
4071 dev->mac_addr[4] = (u8) (val >> 8);
4072 dev->mac_addr[5] = (u8) val;
4074 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4075 val = CNIC_RD(dev, addr);
4077 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4080 addr = BNX2X_SHMEM_ADDR(base,
4081 drv_lic_key[port].max_iscsi_init_conn);
4082 val16 = CNIC_RD16(dev, addr);
4086 dev->max_iscsi_conn = val16;
4088 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
4089 int func = CNIC_FUNC(cp);
4091 addr = BNX2X_SHMEM_ADDR(base,
4092 mf_cfg.func_mf_config[func].e1hov_tag);
4093 val = CNIC_RD(dev, addr);
4094 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4095 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4096 addr = BNX2X_SHMEM_ADDR(base,
4097 mf_cfg.func_mf_config[func].config);
4098 val = CNIC_RD(dev, addr);
4099 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4100 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4101 dev->max_iscsi_conn = 0;
4106 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4108 struct cnic_local *cp = dev->cnic_priv;
4109 int func = CNIC_FUNC(cp), ret, i;
4110 int port = CNIC_PORT(cp);
4112 u8 sb_id = cp->status_blk_num;
4114 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4115 BNX2X_ISCSI_START_CID);
4120 cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
4121 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4122 cp->kcq_prod_idx = 0;
4124 cnic_get_bnx2x_iscsi_info(dev);
4127 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
4128 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4129 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4130 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4131 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4132 cp->kcq_info.pg_map_arr[1] & 0xffffffff);
4133 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4134 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4135 (u64) cp->kcq_info.pg_map_arr[1] >> 32);
4136 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4137 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4138 cp->kcq_info.pg_map_arr[0] & 0xffffffff);
4139 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4140 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4141 (u64) cp->kcq_info.pg_map_arr[0] >> 32);
4142 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4143 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4144 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4145 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
4146 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4147 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
4148 HC_INDEX_C_ISCSI_EQ_CONS);
4150 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4151 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4152 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
4153 cp->conn_buf_info.pgtbl[2 * i]);
4154 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4155 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
4156 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4159 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4160 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
4161 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4162 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4163 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
4164 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4166 cnic_setup_bnx2x_context(dev);
4168 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4169 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4170 offsetof(struct cstorm_status_block_c,
4171 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4173 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
4176 ret = cnic_init_bnx2x_irq(dev);
4180 cnic_init_bnx2x_tx_ring(dev);
4181 cnic_init_bnx2x_rx_ring(dev);
4186 static void cnic_init_rings(struct cnic_dev *dev)
4188 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4189 cnic_init_bnx2_tx_ring(dev);
4190 cnic_init_bnx2_rx_ring(dev);
4191 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4192 struct cnic_local *cp = dev->cnic_priv;
4193 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4194 union l5cm_specific_data l5_data;
4195 struct ustorm_eth_rx_producers rx_prods = {0};
4198 rx_prods.bd_prod = 0;
4199 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4202 off = BAR_USTRORM_INTMEM +
4203 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
4205 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4206 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4208 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4210 cnic_init_bnx2x_tx_ring(dev);
4211 cnic_init_bnx2x_rx_ring(dev);
4213 l5_data.phy_address.lo = cli;
4214 l5_data.phy_address.hi = 0;
4215 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4216 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4218 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4222 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4223 netdev_err(dev->netdev,
4224 "iSCSI CLIENT_SETUP did not complete\n");
4225 cnic_kwq_completion(dev, 1);
4226 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4230 static void cnic_shutdown_rings(struct cnic_dev *dev)
4232 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4233 cnic_shutdown_bnx2_rx_ring(dev);
4234 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4235 struct cnic_local *cp = dev->cnic_priv;
4236 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4237 union l5cm_specific_data l5_data;
4240 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4242 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4244 l5_data.phy_address.lo = cli;
4245 l5_data.phy_address.hi = 0;
4246 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4247 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4249 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4253 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4254 netdev_err(dev->netdev,
4255 "iSCSI CLIENT_HALT did not complete\n");
4256 cnic_kwq_completion(dev, 1);
4258 memset(&l5_data, 0, sizeof(l5_data));
4259 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
4260 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE |
4261 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data);
4266 static int cnic_register_netdev(struct cnic_dev *dev)
4268 struct cnic_local *cp = dev->cnic_priv;
4269 struct cnic_eth_dev *ethdev = cp->ethdev;
4275 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
4278 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
4280 netdev_err(dev->netdev, "register_cnic failed\n");
4285 static void cnic_unregister_netdev(struct cnic_dev *dev)
4287 struct cnic_local *cp = dev->cnic_priv;
4288 struct cnic_eth_dev *ethdev = cp->ethdev;
4293 ethdev->drv_unregister_cnic(dev->netdev);
4296 static int cnic_start_hw(struct cnic_dev *dev)
4298 struct cnic_local *cp = dev->cnic_priv;
4299 struct cnic_eth_dev *ethdev = cp->ethdev;
4302 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
4305 dev->regview = ethdev->io_base;
4306 cp->chip_id = ethdev->chip_id;
4307 pci_dev_get(dev->pcidev);
4308 cp->func = PCI_FUNC(dev->pcidev->devfn);
4309 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
4310 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
4312 err = cp->alloc_resc(dev);
4314 netdev_err(dev->netdev, "allocate resource failure\n");
4318 err = cp->start_hw(dev);
4322 err = cnic_cm_open(dev);
4326 set_bit(CNIC_F_CNIC_UP, &dev->flags);
4328 cp->enable_int(dev);
4334 pci_dev_put(dev->pcidev);
4338 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4340 cnic_disable_bnx2_int_sync(dev);
4342 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4343 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4345 cnic_init_context(dev, KWQ_CID);
4346 cnic_init_context(dev, KCQ_CID);
4348 cnic_setup_5709_context(dev, 0);
4351 cnic_free_resc(dev);
4355 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4357 struct cnic_local *cp = dev->cnic_priv;
4358 u8 sb_id = cp->status_blk_num;
4359 int port = CNIC_PORT(cp);
4362 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4363 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4364 offsetof(struct cstorm_status_block_c,
4365 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4367 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4368 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
4369 CNIC_WR16(dev, cp->kcq_io_addr, 0);
4370 cnic_free_resc(dev);
4373 static void cnic_stop_hw(struct cnic_dev *dev)
4375 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4376 struct cnic_local *cp = dev->cnic_priv;
4379 /* Need to wait for the ring shutdown event to complete
4380 * before clearing the CNIC_UP flag.
4382 while (cp->uio_dev != -1 && i < 15) {
4386 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4387 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4389 cnic_cm_shutdown(dev);
4391 pci_dev_put(dev->pcidev);
4395 static void cnic_free_dev(struct cnic_dev *dev)
4399 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
4403 if (atomic_read(&dev->ref_count) != 0)
4404 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
4406 netdev_info(dev->netdev, "Removed CNIC device\n");
4407 dev_put(dev->netdev);
4411 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4412 struct pci_dev *pdev)
4414 struct cnic_dev *cdev;
4415 struct cnic_local *cp;
4418 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
4420 cdev = kzalloc(alloc_size , GFP_KERNEL);
4422 netdev_err(dev, "allocate dev struct failure\n");
4427 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
4428 cdev->register_device = cnic_register_device;
4429 cdev->unregister_device = cnic_unregister_device;
4430 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
4432 cp = cdev->cnic_priv;
4435 cp->l2_single_buf_size = 0x400;
4436 cp->l2_rx_ring_size = 3;
4438 spin_lock_init(&cp->cnic_ulp_lock);
4440 netdev_info(dev, "Added CNIC device\n");
4445 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
4447 struct pci_dev *pdev;
4448 struct cnic_dev *cdev;
4449 struct cnic_local *cp;
4450 struct cnic_eth_dev *ethdev = NULL;
4451 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4453 probe = symbol_get(bnx2_cnic_probe);
4455 ethdev = (*probe)(dev);
4456 symbol_put(bnx2_cnic_probe);
4461 pdev = ethdev->pdev;
4467 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
4468 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
4471 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
4479 cdev = cnic_alloc_dev(dev, pdev);
4483 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
4484 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
4486 cp = cdev->cnic_priv;
4487 cp->ethdev = ethdev;
4488 cdev->pcidev = pdev;
4490 cp->cnic_ops = &cnic_bnx2_ops;
4491 cp->start_hw = cnic_start_bnx2_hw;
4492 cp->stop_hw = cnic_stop_bnx2_hw;
4493 cp->setup_pgtbl = cnic_setup_page_tbl;
4494 cp->alloc_resc = cnic_alloc_bnx2_resc;
4495 cp->free_resc = cnic_free_resc;
4496 cp->start_cm = cnic_cm_init_bnx2_hw;
4497 cp->stop_cm = cnic_cm_stop_bnx2_hw;
4498 cp->enable_int = cnic_enable_bnx2_int;
4499 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
4500 cp->close_conn = cnic_close_bnx2_conn;
4501 cp->next_idx = cnic_bnx2_next_idx;
4502 cp->hw_idx = cnic_bnx2_hw_idx;
4510 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4512 struct pci_dev *pdev;
4513 struct cnic_dev *cdev;
4514 struct cnic_local *cp;
4515 struct cnic_eth_dev *ethdev = NULL;
4516 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4518 probe = symbol_get(bnx2x_cnic_probe);
4520 ethdev = (*probe)(dev);
4521 symbol_put(bnx2x_cnic_probe);
4526 pdev = ethdev->pdev;
4531 cdev = cnic_alloc_dev(dev, pdev);
4537 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4538 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4540 cp = cdev->cnic_priv;
4541 cp->ethdev = ethdev;
4542 cdev->pcidev = pdev;
4544 cp->cnic_ops = &cnic_bnx2x_ops;
4545 cp->start_hw = cnic_start_bnx2x_hw;
4546 cp->stop_hw = cnic_stop_bnx2x_hw;
4547 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4548 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4549 cp->free_resc = cnic_free_resc;
4550 cp->start_cm = cnic_cm_init_bnx2x_hw;
4551 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4552 cp->enable_int = cnic_enable_bnx2x_int;
4553 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4554 cp->ack_int = cnic_ack_bnx2x_msix;
4555 cp->close_conn = cnic_close_bnx2x_conn;
4556 cp->next_idx = cnic_bnx2x_next_idx;
4557 cp->hw_idx = cnic_bnx2x_hw_idx;
4561 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
4563 struct ethtool_drvinfo drvinfo;
4564 struct cnic_dev *cdev = NULL;
4566 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
4567 memset(&drvinfo, 0, sizeof(drvinfo));
4568 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
4570 if (!strcmp(drvinfo.driver, "bnx2"))
4571 cdev = init_bnx2_cnic(dev);
4572 if (!strcmp(drvinfo.driver, "bnx2x"))
4573 cdev = init_bnx2x_cnic(dev);
4575 write_lock(&cnic_dev_lock);
4576 list_add(&cdev->list, &cnic_dev_list);
4577 write_unlock(&cnic_dev_lock);
4584 * netdev event handler
4586 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
4589 struct net_device *netdev = ptr;
4590 struct cnic_dev *dev;
4594 dev = cnic_from_netdev(netdev);
4596 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
4597 /* Check for the hot-plug device */
4598 dev = is_cnic_dev(netdev);
4605 struct cnic_local *cp = dev->cnic_priv;
4609 else if (event == NETDEV_UNREGISTER)
4612 if (event == NETDEV_UP) {
4613 if (cnic_register_netdev(dev) != 0) {
4617 if (!cnic_start_hw(dev))
4618 cnic_ulp_start(dev);
4622 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
4623 struct cnic_ulp_ops *ulp_ops;
4626 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
4627 if (!ulp_ops || !ulp_ops->indicate_netevent)
4630 ctx = cp->ulp_handle[if_type];
4632 ulp_ops->indicate_netevent(ctx, event);
4636 if (event == NETDEV_GOING_DOWN) {
4639 cnic_unregister_netdev(dev);
4640 } else if (event == NETDEV_UNREGISTER) {
4641 write_lock(&cnic_dev_lock);
4642 list_del_init(&dev->list);
4643 write_unlock(&cnic_dev_lock);
4655 static struct notifier_block cnic_netdev_notifier = {
4656 .notifier_call = cnic_netdev_event
4659 static void cnic_release(void)
4661 struct cnic_dev *dev;
4663 while (!list_empty(&cnic_dev_list)) {
4664 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
4665 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4671 cnic_unregister_netdev(dev);
4672 list_del_init(&dev->list);
4677 static int __init cnic_init(void)
4681 pr_info("%s", version);
4683 rc = register_netdevice_notifier(&cnic_netdev_notifier);
4692 static void __exit cnic_exit(void)
4694 unregister_netdevice_notifier(&cnic_netdev_notifier);
4698 module_init(cnic_init);
4699 module_exit(cnic_exit);