1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: pointer to the object
40 * @owner: pointer to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
52 exe_q_optimize optimize,
56 memset(o, 0, sizeof(*o));
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
61 spin_lock_init(&o->lock);
63 o->exe_chunk_len = exe_len;
66 /* Owner specific callbacks */
67 o->validate = validate;
69 o->optimize = optimize;
73 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
86 struct bnx2x_exeq_elem *elem;
89 spin_lock_bh(&o->lock);
91 list_for_each_entry(elem, &o->exe_queue, link)
94 spin_unlock_bh(&o->lock);
100 * bnx2x_exe_queue_add - add a new element to the execution queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
107 * If the element is optimized or is illegal, frees it.
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
116 spin_lock_bh(&o->lock);
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
135 spin_unlock_bh(&o->lock);
140 bnx2x_exe_queue_free_elem(bp, elem);
142 spin_unlock_bh(&o->lock);
147 static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x_exe_queue_obj *o)
151 struct bnx2x_exeq_elem *elem;
153 while (!list_empty(&o->pending_comp)) {
154 elem = list_first_entry(&o->pending_comp,
155 struct bnx2x_exeq_elem, link);
157 list_del(&elem->link);
158 bnx2x_exe_queue_free_elem(bp, elem);
163 * bnx2x_exe_queue_step - execute one execution chunk atomically
167 * @ramrod_flags: flags
169 * (Should be called while holding the exe_queue->lock).
171 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172 struct bnx2x_exe_queue_obj *o,
173 unsigned long *ramrod_flags)
175 struct bnx2x_exeq_elem *elem, spacer;
178 memset(&spacer, 0, sizeof(spacer));
180 /* Next step should not be performed until the current is finished,
181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182 * properly clear object internals without sending any command to the FW
183 * which also implies there won't be any completion to clear the
186 if (!list_empty(&o->pending_comp)) {
187 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
189 __bnx2x_exe_queue_reset_pending(bp, o);
195 /* Run through the pending commands list and create a next
198 while (!list_empty(&o->exe_queue)) {
199 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
201 WARN_ON(!elem->cmd_len);
203 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204 cur_len += elem->cmd_len;
205 /* Prevent from both lists being empty when moving an
206 * element. This will allow the call of
207 * bnx2x_exe_queue_empty() without locking.
209 list_add_tail(&spacer.link, &o->pending_comp);
211 list_move_tail(&elem->link, &o->pending_comp);
212 list_del(&spacer.link);
221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
223 /* In case of an error return the commands back to the queue
224 * and reset the pending_comp.
226 list_splice_init(&o->pending_comp, &o->exe_queue);
228 /* If zero is returned, means there are no outstanding pending
229 * completions and we may dismiss the pending list.
231 __bnx2x_exe_queue_reset_pending(bp, o);
236 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
238 bool empty = list_empty(&o->exe_queue);
240 /* Don't reorder!!! */
243 return empty && list_empty(&o->pending_comp);
246 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
249 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
253 /************************ raw_obj functions ***********************************/
254 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
256 return !!test_bit(o->state, o->pstate);
259 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
261 smp_mb__before_clear_bit();
262 clear_bit(o->state, o->pstate);
263 smp_mb__after_clear_bit();
266 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
268 smp_mb__before_clear_bit();
269 set_bit(o->state, o->pstate);
270 smp_mb__after_clear_bit();
274 * bnx2x_state_wait - wait until the given bit(state) is cleared
277 * @state: state which is to be cleared
278 * @state_p: state buffer
281 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282 unsigned long *pstate)
284 /* can take a while if any port is running */
287 if (CHIP_REV_IS_EMUL(bp))
290 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
294 if (!test_bit(state, pstate)) {
295 #ifdef BNX2X_STOP_ON_ERROR
296 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
301 usleep_range(1000, 2000);
308 BNX2X_ERR("timeout waiting for state %d\n", state);
309 #ifdef BNX2X_STOP_ON_ERROR
316 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
318 return bnx2x_state_wait(bp, raw->state, raw->pstate);
321 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322 /* credit handling callbacks */
323 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
325 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
329 return mp->get_entry(mp, offset);
332 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
334 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
338 return mp->get(mp, 1);
341 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
343 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
347 return vp->get_entry(vp, offset);
350 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
352 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
356 return vp->get(vp, 1);
359 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
367 if (!vp->get(vp, 1)) {
375 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
377 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
379 return mp->put_entry(mp, offset);
382 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
384 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
386 return mp->put(mp, 1);
389 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
393 return vp->put_entry(vp, offset);
396 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
398 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
400 return vp->put(vp, 1);
403 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
405 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
411 if (!vp->put(vp, 1)) {
420 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
423 * @o: vlan_mac object
425 * @details: Non-blocking implementation; should be called under execution
428 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 struct bnx2x_vlan_mac_obj *o)
431 if (o->head_reader) {
432 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
436 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
441 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
444 * @o: vlan_mac object
446 * @details Should be called under execution queue lock; notice it might release
447 * and reclaim it during its run.
449 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 struct bnx2x_vlan_mac_obj *o)
453 unsigned long ramrod_flags = o->saved_ramrod_flags;
455 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
457 o->head_exe_request = false;
458 o->saved_ramrod_flags = 0;
459 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
461 BNX2X_ERR("execution of pending commands failed with rc %d\n",
463 #ifdef BNX2X_STOP_ON_ERROR
470 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
473 * @o: vlan_mac object
474 * @ramrod_flags: ramrod flags of missed execution
476 * @details Should be called under execution queue lock.
478 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 unsigned long ramrod_flags)
482 o->head_exe_request = true;
483 o->saved_ramrod_flags = ramrod_flags;
484 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
489 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
492 * @o: vlan_mac object
494 * @details Should be called under execution queue lock. Notice if a pending
495 * execution exists, it would perform it - possibly releasing and
496 * reclaiming the execution queue lock.
498 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 struct bnx2x_vlan_mac_obj *o)
501 /* It's possible a new pending execution was added since this writer
502 * executed. If so, execute again. [Ad infinitum]
504 while (o->head_exe_request) {
505 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 __bnx2x_vlan_mac_h_exec_pending(bp, o);
511 * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
514 * @o: vlan_mac object
516 * @details Notice if a pending execution exists, it would perform it -
517 * possibly releasing and reclaiming the execution queue lock.
519 void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 struct bnx2x_vlan_mac_obj *o)
522 spin_lock_bh(&o->exe_queue.lock);
523 __bnx2x_vlan_mac_h_write_unlock(bp, o);
524 spin_unlock_bh(&o->exe_queue.lock);
528 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
531 * @o: vlan_mac object
533 * @details Should be called under the execution queue lock. May sleep. May
534 * release and reclaim execution queue lock during its run.
536 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 struct bnx2x_vlan_mac_obj *o)
539 /* If we got here, we're holding lock --> no WRITER exists */
541 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
548 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
551 * @o: vlan_mac object
553 * @details May sleep. Claims and releases execution queue lock during its run.
555 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 struct bnx2x_vlan_mac_obj *o)
560 spin_lock_bh(&o->exe_queue.lock);
561 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 spin_unlock_bh(&o->exe_queue.lock);
568 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
571 * @o: vlan_mac object
573 * @details Should be called under execution queue lock. Notice if a pending
574 * execution exists, it would be performed if this was the last
575 * reader. possibly releasing and reclaiming the execution queue lock.
577 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 struct bnx2x_vlan_mac_obj *o)
580 if (!o->head_reader) {
581 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582 #ifdef BNX2X_STOP_ON_ERROR
587 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
591 /* It's possible a new pending execution was added, and that this reader
592 * was last - if so we need to execute the command.
594 if (!o->head_reader && o->head_exe_request) {
595 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
597 /* Writer release will do the trick */
598 __bnx2x_vlan_mac_h_write_unlock(bp, o);
603 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
606 * @o: vlan_mac object
608 * @details Notice if a pending execution exists, it would be performed if this
609 * was the last reader. Claims and releases the execution queue lock
612 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 struct bnx2x_vlan_mac_obj *o)
615 spin_lock_bh(&o->exe_queue.lock);
616 __bnx2x_vlan_mac_h_read_unlock(bp, o);
617 spin_unlock_bh(&o->exe_queue.lock);
620 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
621 int n, u8 *base, u8 stride, u8 size)
623 struct bnx2x_vlan_mac_registry_elem *pos;
628 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
631 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
634 list_for_each_entry(pos, &o->head, link) {
636 memcpy(next, &pos->u, size);
638 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
640 next += stride + size;
644 if (read_lock == 0) {
645 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 bnx2x_vlan_mac_h_read_unlock(bp, o);
649 return counter * ETH_ALEN;
652 /* check_add() callbacks */
653 static int bnx2x_check_mac_add(struct bnx2x *bp,
654 struct bnx2x_vlan_mac_obj *o,
655 union bnx2x_classification_ramrod_data *data)
657 struct bnx2x_vlan_mac_registry_elem *pos;
659 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
661 if (!is_valid_ether_addr(data->mac.mac))
664 /* Check if a requested MAC already exists */
665 list_for_each_entry(pos, &o->head, link)
666 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
667 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
673 static int bnx2x_check_vlan_add(struct bnx2x *bp,
674 struct bnx2x_vlan_mac_obj *o,
675 union bnx2x_classification_ramrod_data *data)
677 struct bnx2x_vlan_mac_registry_elem *pos;
679 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
681 list_for_each_entry(pos, &o->head, link)
682 if (data->vlan.vlan == pos->u.vlan.vlan)
688 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
689 struct bnx2x_vlan_mac_obj *o,
690 union bnx2x_classification_ramrod_data *data)
692 struct bnx2x_vlan_mac_registry_elem *pos;
694 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
695 data->vlan_mac.mac, data->vlan_mac.vlan);
697 list_for_each_entry(pos, &o->head, link)
698 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
699 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
701 (data->vlan_mac.is_inner_mac ==
702 pos->u.vlan_mac.is_inner_mac))
708 /* check_del() callbacks */
709 static struct bnx2x_vlan_mac_registry_elem *
710 bnx2x_check_mac_del(struct bnx2x *bp,
711 struct bnx2x_vlan_mac_obj *o,
712 union bnx2x_classification_ramrod_data *data)
714 struct bnx2x_vlan_mac_registry_elem *pos;
716 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
718 list_for_each_entry(pos, &o->head, link)
719 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
720 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
726 static struct bnx2x_vlan_mac_registry_elem *
727 bnx2x_check_vlan_del(struct bnx2x *bp,
728 struct bnx2x_vlan_mac_obj *o,
729 union bnx2x_classification_ramrod_data *data)
731 struct bnx2x_vlan_mac_registry_elem *pos;
733 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
735 list_for_each_entry(pos, &o->head, link)
736 if (data->vlan.vlan == pos->u.vlan.vlan)
742 static struct bnx2x_vlan_mac_registry_elem *
743 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
744 struct bnx2x_vlan_mac_obj *o,
745 union bnx2x_classification_ramrod_data *data)
747 struct bnx2x_vlan_mac_registry_elem *pos;
749 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
750 data->vlan_mac.mac, data->vlan_mac.vlan);
752 list_for_each_entry(pos, &o->head, link)
753 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
754 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
756 (data->vlan_mac.is_inner_mac ==
757 pos->u.vlan_mac.is_inner_mac))
763 /* check_move() callback */
764 static bool bnx2x_check_move(struct bnx2x *bp,
765 struct bnx2x_vlan_mac_obj *src_o,
766 struct bnx2x_vlan_mac_obj *dst_o,
767 union bnx2x_classification_ramrod_data *data)
769 struct bnx2x_vlan_mac_registry_elem *pos;
772 /* Check if we can delete the requested configuration from the first
775 pos = src_o->check_del(bp, src_o, data);
777 /* check if configuration can be added */
778 rc = dst_o->check_add(bp, dst_o, data);
780 /* If this classification can not be added (is already set)
781 * or can't be deleted - return an error.
789 static bool bnx2x_check_move_always_err(
791 struct bnx2x_vlan_mac_obj *src_o,
792 struct bnx2x_vlan_mac_obj *dst_o,
793 union bnx2x_classification_ramrod_data *data)
798 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
800 struct bnx2x_raw_obj *raw = &o->raw;
803 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
804 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
805 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
807 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
808 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
809 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
814 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
815 bool add, unsigned char *dev_addr, int index)
818 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
819 NIG_REG_LLH0_FUNC_MEM;
821 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
824 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
827 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
828 (add ? "ADD" : "DELETE"), index);
831 /* LLH_FUNC_MEM is a u64 WB register */
832 reg_offset += 8*index;
834 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
835 (dev_addr[4] << 8) | dev_addr[5]);
836 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
838 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
841 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
842 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
846 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
849 * @o: queue for which we want to configure this rule
850 * @add: if true the command is an ADD command, DEL otherwise
851 * @opcode: CLASSIFY_RULE_OPCODE_XXX
852 * @hdr: pointer to a header to setup
855 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
856 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
857 struct eth_classify_cmd_header *hdr)
859 struct bnx2x_raw_obj *raw = &o->raw;
861 hdr->client_id = raw->cl_id;
862 hdr->func_id = raw->func_id;
864 /* Rx or/and Tx (internal switching) configuration ? */
865 hdr->cmd_general_data |=
866 bnx2x_vlan_mac_get_rx_tx_flag(o);
869 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
871 hdr->cmd_general_data |=
872 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
876 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
878 * @cid: connection id
879 * @type: BNX2X_FILTER_XXX_PENDING
880 * @hdr: pointer to header to setup
883 * currently we always configure one rule and echo field to contain a CID and an
886 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
887 struct eth_classify_header *hdr, int rule_cnt)
889 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
890 (type << BNX2X_SWCID_SHIFT));
891 hdr->rule_cnt = (u8)rule_cnt;
894 /* hw_config() callbacks */
895 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
896 struct bnx2x_vlan_mac_obj *o,
897 struct bnx2x_exeq_elem *elem, int rule_idx,
900 struct bnx2x_raw_obj *raw = &o->raw;
901 struct eth_classify_rules_ramrod_data *data =
902 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
903 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
904 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
905 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
907 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
909 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
910 * relevant. In addition, current implementation is tuned for a
913 * When multiple unicast ETH MACs PF configuration in switch
914 * independent mode is required (NetQ, multiple netdev MACs,
915 * etc.), consider better utilisation of 8 per function MAC
916 * entries in the LLH register. There is also
917 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
918 * total number of CAM entries to 16.
920 * Currently we won't configure NIG for MACs other than a primary ETH
921 * MAC and iSCSI L2 MAC.
923 * If this MAC is moving from one Queue to another, no need to change
926 if (cmd != BNX2X_VLAN_MAC_MOVE) {
927 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
928 bnx2x_set_mac_in_nig(bp, add, mac,
929 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
930 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
931 bnx2x_set_mac_in_nig(bp, add, mac,
932 BNX2X_LLH_CAM_ETH_LINE);
935 /* Reset the ramrod data buffer for the first rule */
937 memset(data, 0, sizeof(*data));
939 /* Setup a command header */
940 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
941 &rule_entry->mac.header);
943 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
944 (add ? "add" : "delete"), mac, raw->cl_id);
946 /* Set a MAC itself */
947 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
948 &rule_entry->mac.mac_mid,
949 &rule_entry->mac.mac_lsb, mac);
950 rule_entry->mac.inner_mac =
951 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
953 /* MOVE: Add a rule that will add this MAC to the target Queue */
954 if (cmd == BNX2X_VLAN_MAC_MOVE) {
958 /* Setup ramrod data */
959 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
960 elem->cmd_data.vlan_mac.target_obj,
961 true, CLASSIFY_RULE_OPCODE_MAC,
962 &rule_entry->mac.header);
964 /* Set a MAC itself */
965 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
966 &rule_entry->mac.mac_mid,
967 &rule_entry->mac.mac_lsb, mac);
968 rule_entry->mac.inner_mac =
969 cpu_to_le16(elem->cmd_data.vlan_mac.
973 /* Set the ramrod data header */
974 /* TODO: take this to the higher level in order to prevent multiple
976 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
981 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
986 * @cam_offset: offset in cam memory
987 * @hdr: pointer to a header to setup
991 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
992 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
993 struct mac_configuration_hdr *hdr)
995 struct bnx2x_raw_obj *r = &o->raw;
998 hdr->offset = (u8)cam_offset;
999 hdr->client_id = cpu_to_le16(0xff);
1000 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
1001 (type << BNX2X_SWCID_SHIFT));
1004 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
1005 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
1006 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1008 struct bnx2x_raw_obj *r = &o->raw;
1009 u32 cl_bit_vec = (1 << r->cl_id);
1011 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
1012 cfg_entry->pf_id = r->func_id;
1013 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1016 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1017 T_ETH_MAC_COMMAND_SET);
1018 SET_FLAG(cfg_entry->flags,
1019 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1021 /* Set a MAC in a ramrod data */
1022 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1023 &cfg_entry->middle_mac_addr,
1024 &cfg_entry->lsb_mac_addr, mac);
1026 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1027 T_ETH_MAC_COMMAND_INVALIDATE);
1030 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1031 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1032 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1034 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1035 struct bnx2x_raw_obj *raw = &o->raw;
1037 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1039 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1042 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
1043 (add ? "setting" : "clearing"),
1044 mac, raw->cl_id, cam_offset);
1048 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1050 * @bp: device handle
1051 * @o: bnx2x_vlan_mac_obj
1052 * @elem: bnx2x_exeq_elem
1053 * @rule_idx: rule_idx
1054 * @cam_offset: cam_offset
1056 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1057 struct bnx2x_vlan_mac_obj *o,
1058 struct bnx2x_exeq_elem *elem, int rule_idx,
1061 struct bnx2x_raw_obj *raw = &o->raw;
1062 struct mac_configuration_cmd *config =
1063 (struct mac_configuration_cmd *)(raw->rdata);
1064 /* 57710 and 57711 do not support MOVE command,
1065 * so it's either ADD or DEL
1067 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1070 /* Reset the ramrod data buffer */
1071 memset(config, 0, sizeof(*config));
1073 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
1075 elem->cmd_data.vlan_mac.u.mac.mac, 0,
1076 ETH_VLAN_FILTER_ANY_VLAN, config);
1079 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1080 struct bnx2x_vlan_mac_obj *o,
1081 struct bnx2x_exeq_elem *elem, int rule_idx,
1084 struct bnx2x_raw_obj *raw = &o->raw;
1085 struct eth_classify_rules_ramrod_data *data =
1086 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1087 int rule_cnt = rule_idx + 1;
1088 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1089 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1090 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1091 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1093 /* Reset the ramrod data buffer for the first rule */
1095 memset(data, 0, sizeof(*data));
1097 /* Set a rule header */
1098 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1099 &rule_entry->vlan.header);
1101 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1104 /* Set a VLAN itself */
1105 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1107 /* MOVE: Add a rule that will add this MAC to the target Queue */
1108 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1112 /* Setup ramrod data */
1113 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1114 elem->cmd_data.vlan_mac.target_obj,
1115 true, CLASSIFY_RULE_OPCODE_VLAN,
1116 &rule_entry->vlan.header);
1118 /* Set a VLAN itself */
1119 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1122 /* Set the ramrod data header */
1123 /* TODO: take this to the higher level in order to prevent multiple
1125 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1129 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1130 struct bnx2x_vlan_mac_obj *o,
1131 struct bnx2x_exeq_elem *elem,
1132 int rule_idx, int cam_offset)
1134 struct bnx2x_raw_obj *raw = &o->raw;
1135 struct eth_classify_rules_ramrod_data *data =
1136 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1137 int rule_cnt = rule_idx + 1;
1138 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1139 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1140 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1141 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1142 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1144 /* Reset the ramrod data buffer for the first rule */
1146 memset(data, 0, sizeof(*data));
1148 /* Set a rule header */
1149 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1150 &rule_entry->pair.header);
1152 /* Set VLAN and MAC themselves */
1153 rule_entry->pair.vlan = cpu_to_le16(vlan);
1154 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1155 &rule_entry->pair.mac_mid,
1156 &rule_entry->pair.mac_lsb, mac);
1157 rule_entry->pair.inner_mac =
1158 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
1159 /* MOVE: Add a rule that will add this MAC to the target Queue */
1160 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1164 /* Setup ramrod data */
1165 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1166 elem->cmd_data.vlan_mac.target_obj,
1167 true, CLASSIFY_RULE_OPCODE_PAIR,
1168 &rule_entry->pair.header);
1170 /* Set a VLAN itself */
1171 rule_entry->pair.vlan = cpu_to_le16(vlan);
1172 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1173 &rule_entry->pair.mac_mid,
1174 &rule_entry->pair.mac_lsb, mac);
1175 rule_entry->pair.inner_mac =
1176 cpu_to_le16(elem->cmd_data.vlan_mac.u.
1177 vlan_mac.is_inner_mac);
1180 /* Set the ramrod data header */
1181 /* TODO: take this to the higher level in order to prevent multiple
1183 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1188 * bnx2x_set_one_vlan_mac_e1h -
1190 * @bp: device handle
1191 * @o: bnx2x_vlan_mac_obj
1192 * @elem: bnx2x_exeq_elem
1193 * @rule_idx: rule_idx
1194 * @cam_offset: cam_offset
1196 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1197 struct bnx2x_vlan_mac_obj *o,
1198 struct bnx2x_exeq_elem *elem,
1199 int rule_idx, int cam_offset)
1201 struct bnx2x_raw_obj *raw = &o->raw;
1202 struct mac_configuration_cmd *config =
1203 (struct mac_configuration_cmd *)(raw->rdata);
1204 /* 57710 and 57711 do not support MOVE command,
1205 * so it's either ADD or DEL
1207 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1210 /* Reset the ramrod data buffer */
1211 memset(config, 0, sizeof(*config));
1213 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1215 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1216 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1217 ETH_VLAN_FILTER_CLASSIFY, config);
1220 #define list_next_entry(pos, member) \
1221 list_entry((pos)->member.next, typeof(*(pos)), member)
1224 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1226 * @bp: device handle
1227 * @p: command parameters
1228 * @ppos: pointer to the cookie
1230 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1231 * previously configured elements list.
1233 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1236 * pointer to the cookie - that should be given back in the next call to make
1237 * function handle the next element. If *ppos is set to NULL it will restart the
1238 * iterator. If returned *ppos == NULL this means that the last element has been
1242 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1243 struct bnx2x_vlan_mac_ramrod_params *p,
1244 struct bnx2x_vlan_mac_registry_elem **ppos)
1246 struct bnx2x_vlan_mac_registry_elem *pos;
1247 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1249 /* If list is empty - there is nothing to do here */
1250 if (list_empty(&o->head)) {
1255 /* make a step... */
1257 *ppos = list_first_entry(&o->head,
1258 struct bnx2x_vlan_mac_registry_elem,
1261 *ppos = list_next_entry(*ppos, link);
1265 /* If it's the last step - return NULL */
1266 if (list_is_last(&pos->link, &o->head))
1269 /* Prepare a 'user_req' */
1270 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1272 /* Set the command */
1273 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1275 /* Set vlan_mac_flags */
1276 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1278 /* Set a restore bit */
1279 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1281 return bnx2x_config_vlan_mac(bp, p);
1284 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1285 * pointer to an element with a specific criteria and NULL if such an element
1286 * hasn't been found.
1288 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1289 struct bnx2x_exe_queue_obj *o,
1290 struct bnx2x_exeq_elem *elem)
1292 struct bnx2x_exeq_elem *pos;
1293 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1295 /* Check pending for execution commands */
1296 list_for_each_entry(pos, &o->exe_queue, link)
1297 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1299 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1305 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1306 struct bnx2x_exe_queue_obj *o,
1307 struct bnx2x_exeq_elem *elem)
1309 struct bnx2x_exeq_elem *pos;
1310 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1312 /* Check pending for execution commands */
1313 list_for_each_entry(pos, &o->exe_queue, link)
1314 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1316 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1322 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1323 struct bnx2x_exe_queue_obj *o,
1324 struct bnx2x_exeq_elem *elem)
1326 struct bnx2x_exeq_elem *pos;
1327 struct bnx2x_vlan_mac_ramrod_data *data =
1328 &elem->cmd_data.vlan_mac.u.vlan_mac;
1330 /* Check pending for execution commands */
1331 list_for_each_entry(pos, &o->exe_queue, link)
1332 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1334 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1341 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1343 * @bp: device handle
1344 * @qo: bnx2x_qable_obj
1345 * @elem: bnx2x_exeq_elem
1347 * Checks that the requested configuration can be added. If yes and if
1348 * requested, consume CAM credit.
1350 * The 'validate' is run after the 'optimize'.
1353 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1354 union bnx2x_qable_obj *qo,
1355 struct bnx2x_exeq_elem *elem)
1357 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1358 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1361 /* Check the registry */
1362 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1364 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1368 /* Check if there is a pending ADD command for this
1369 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1371 if (exeq->get(exeq, elem)) {
1372 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1376 /* TODO: Check the pending MOVE from other objects where this
1377 * object is a destination object.
1380 /* Consume the credit if not requested not to */
1381 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1382 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1390 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1392 * @bp: device handle
1393 * @qo: quable object to check
1394 * @elem: element that needs to be deleted
1396 * Checks that the requested configuration can be deleted. If yes and if
1397 * requested, returns a CAM credit.
1399 * The 'validate' is run after the 'optimize'.
1401 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1402 union bnx2x_qable_obj *qo,
1403 struct bnx2x_exeq_elem *elem)
1405 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1406 struct bnx2x_vlan_mac_registry_elem *pos;
1407 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1408 struct bnx2x_exeq_elem query_elem;
1410 /* If this classification can not be deleted (doesn't exist)
1411 * - return a BNX2X_EXIST.
1413 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1415 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1419 /* Check if there are pending DEL or MOVE commands for this
1420 * MAC/VLAN/VLAN-MAC. Return an error if so.
1422 memcpy(&query_elem, elem, sizeof(query_elem));
1424 /* Check for MOVE commands */
1425 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1426 if (exeq->get(exeq, &query_elem)) {
1427 BNX2X_ERR("There is a pending MOVE command already\n");
1431 /* Check for DEL commands */
1432 if (exeq->get(exeq, elem)) {
1433 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1437 /* Return the credit to the credit pool if not requested not to */
1438 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1439 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1440 o->put_credit(o))) {
1441 BNX2X_ERR("Failed to return a credit\n");
1449 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1451 * @bp: device handle
1452 * @qo: quable object to check (source)
1453 * @elem: element that needs to be moved
1455 * Checks that the requested configuration can be moved. If yes and if
1456 * requested, returns a CAM credit.
1458 * The 'validate' is run after the 'optimize'.
1460 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1461 union bnx2x_qable_obj *qo,
1462 struct bnx2x_exeq_elem *elem)
1464 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1465 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1466 struct bnx2x_exeq_elem query_elem;
1467 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1468 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1470 /* Check if we can perform this operation based on the current registry
1473 if (!src_o->check_move(bp, src_o, dest_o,
1474 &elem->cmd_data.vlan_mac.u)) {
1475 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1479 /* Check if there is an already pending DEL or MOVE command for the
1480 * source object or ADD command for a destination object. Return an
1483 memcpy(&query_elem, elem, sizeof(query_elem));
1485 /* Check DEL on source */
1486 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1487 if (src_exeq->get(src_exeq, &query_elem)) {
1488 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1492 /* Check MOVE on source */
1493 if (src_exeq->get(src_exeq, elem)) {
1494 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1498 /* Check ADD on destination */
1499 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1500 if (dest_exeq->get(dest_exeq, &query_elem)) {
1501 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1505 /* Consume the credit if not requested not to */
1506 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1507 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1508 dest_o->get_credit(dest_o)))
1511 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1512 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1513 src_o->put_credit(src_o))) {
1514 /* return the credit taken from dest... */
1515 dest_o->put_credit(dest_o);
1522 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1523 union bnx2x_qable_obj *qo,
1524 struct bnx2x_exeq_elem *elem)
1526 switch (elem->cmd_data.vlan_mac.cmd) {
1527 case BNX2X_VLAN_MAC_ADD:
1528 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1529 case BNX2X_VLAN_MAC_DEL:
1530 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1531 case BNX2X_VLAN_MAC_MOVE:
1532 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1538 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1539 union bnx2x_qable_obj *qo,
1540 struct bnx2x_exeq_elem *elem)
1544 /* If consumption wasn't required, nothing to do */
1545 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1546 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1549 switch (elem->cmd_data.vlan_mac.cmd) {
1550 case BNX2X_VLAN_MAC_ADD:
1551 case BNX2X_VLAN_MAC_MOVE:
1552 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1554 case BNX2X_VLAN_MAC_DEL:
1555 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1568 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1570 * @bp: device handle
1571 * @o: bnx2x_vlan_mac_obj
1574 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1575 struct bnx2x_vlan_mac_obj *o)
1578 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1579 struct bnx2x_raw_obj *raw = &o->raw;
1582 /* Wait for the current command to complete */
1583 rc = raw->wait_comp(bp, raw);
1587 /* Wait until there are no pending commands */
1588 if (!bnx2x_exe_queue_empty(exeq))
1589 usleep_range(1000, 2000);
1597 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1598 struct bnx2x_vlan_mac_obj *o,
1599 unsigned long *ramrod_flags)
1603 spin_lock_bh(&o->exe_queue.lock);
1605 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1606 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1609 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1611 /* Calling function should not diffrentiate between this case
1612 * and the case in which there is already a pending ramrod
1616 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1618 spin_unlock_bh(&o->exe_queue.lock);
1624 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1626 * @bp: device handle
1627 * @o: bnx2x_vlan_mac_obj
1629 * @cont: if true schedule next execution chunk
1632 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1633 struct bnx2x_vlan_mac_obj *o,
1634 union event_ring_elem *cqe,
1635 unsigned long *ramrod_flags)
1637 struct bnx2x_raw_obj *r = &o->raw;
1640 /* Clearing the pending list & raw state should be made
1641 * atomically (as execution flow assumes they represent the same).
1643 spin_lock_bh(&o->exe_queue.lock);
1645 /* Reset pending list */
1646 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1649 r->clear_pending(r);
1651 spin_unlock_bh(&o->exe_queue.lock);
1653 /* If ramrod failed this is most likely a SW bug */
1654 if (cqe->message.error)
1657 /* Run the next bulk of pending commands if requested */
1658 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1659 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1665 /* If there is more work to do return PENDING */
1666 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1673 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1675 * @bp: device handle
1676 * @o: bnx2x_qable_obj
1677 * @elem: bnx2x_exeq_elem
1679 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1680 union bnx2x_qable_obj *qo,
1681 struct bnx2x_exeq_elem *elem)
1683 struct bnx2x_exeq_elem query, *pos;
1684 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1685 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1687 memcpy(&query, elem, sizeof(query));
1689 switch (elem->cmd_data.vlan_mac.cmd) {
1690 case BNX2X_VLAN_MAC_ADD:
1691 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1693 case BNX2X_VLAN_MAC_DEL:
1694 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1697 /* Don't handle anything other than ADD or DEL */
1701 /* If we found the appropriate element - delete it */
1702 pos = exeq->get(exeq, &query);
1705 /* Return the credit of the optimized command */
1706 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1707 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1708 if ((query.cmd_data.vlan_mac.cmd ==
1709 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1710 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1712 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1713 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1718 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1719 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1722 list_del(&pos->link);
1723 bnx2x_exe_queue_free_elem(bp, pos);
1731 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1733 * @bp: device handle
1739 * prepare a registry element according to the current command request.
1741 static inline int bnx2x_vlan_mac_get_registry_elem(
1743 struct bnx2x_vlan_mac_obj *o,
1744 struct bnx2x_exeq_elem *elem,
1746 struct bnx2x_vlan_mac_registry_elem **re)
1748 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1749 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1751 /* Allocate a new registry element if needed. */
1753 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1754 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1758 /* Get a new CAM offset */
1759 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1760 /* This shall never happen, because we have checked the
1761 * CAM availability in the 'validate'.
1768 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1770 /* Set a VLAN-MAC data */
1771 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1772 sizeof(reg_elem->u));
1774 /* Copy the flags (needed for DEL and RESTORE flows) */
1775 reg_elem->vlan_mac_flags =
1776 elem->cmd_data.vlan_mac.vlan_mac_flags;
1777 } else /* DEL, RESTORE */
1778 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1785 * bnx2x_execute_vlan_mac - execute vlan mac command
1787 * @bp: device handle
1792 * go and send a ramrod!
1794 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1795 union bnx2x_qable_obj *qo,
1796 struct list_head *exe_chunk,
1797 unsigned long *ramrod_flags)
1799 struct bnx2x_exeq_elem *elem;
1800 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1801 struct bnx2x_raw_obj *r = &o->raw;
1803 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1804 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1805 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1806 enum bnx2x_vlan_mac_cmd cmd;
1808 /* If DRIVER_ONLY execution is requested, cleanup a registry
1809 * and exit. Otherwise send a ramrod to FW.
1812 WARN_ON(r->check_pending(r));
1817 /* Fill the ramrod data */
1818 list_for_each_entry(elem, exe_chunk, link) {
1819 cmd = elem->cmd_data.vlan_mac.cmd;
1820 /* We will add to the target object in MOVE command, so
1821 * change the object for a CAM search.
1823 if (cmd == BNX2X_VLAN_MAC_MOVE)
1824 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1828 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1836 /* Push a new entry into the registry */
1838 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1839 (cmd == BNX2X_VLAN_MAC_MOVE)))
1840 list_add(®_elem->link, &cam_obj->head);
1842 /* Configure a single command in a ramrod data buffer */
1843 o->set_one_rule(bp, o, elem, idx,
1844 reg_elem->cam_offset);
1846 /* MOVE command consumes 2 entries in the ramrod data */
1847 if (cmd == BNX2X_VLAN_MAC_MOVE)
1853 /* No need for an explicit memory barrier here as long we would
1854 * need to ensure the ordering of writing to the SPQ element
1855 * and updating of the SPQ producer which involves a memory
1856 * read and we will have to put a full memory barrier there
1857 * (inside bnx2x_sp_post()).
1860 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1861 U64_HI(r->rdata_mapping),
1862 U64_LO(r->rdata_mapping),
1863 ETH_CONNECTION_TYPE);
1868 /* Now, when we are done with the ramrod - clean up the registry */
1869 list_for_each_entry(elem, exe_chunk, link) {
1870 cmd = elem->cmd_data.vlan_mac.cmd;
1871 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1872 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1873 reg_elem = o->check_del(bp, o,
1874 &elem->cmd_data.vlan_mac.u);
1878 o->put_cam_offset(o, reg_elem->cam_offset);
1879 list_del(®_elem->link);
1890 r->clear_pending(r);
1892 /* Cleanup a registry in case of a failure */
1893 list_for_each_entry(elem, exe_chunk, link) {
1894 cmd = elem->cmd_data.vlan_mac.cmd;
1896 if (cmd == BNX2X_VLAN_MAC_MOVE)
1897 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1901 /* Delete all newly added above entries */
1903 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1904 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1905 reg_elem = o->check_del(bp, cam_obj,
1906 &elem->cmd_data.vlan_mac.u);
1908 list_del(®_elem->link);
1917 static inline int bnx2x_vlan_mac_push_new_cmd(
1919 struct bnx2x_vlan_mac_ramrod_params *p)
1921 struct bnx2x_exeq_elem *elem;
1922 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1923 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1925 /* Allocate the execution queue element */
1926 elem = bnx2x_exe_queue_alloc_elem(bp);
1930 /* Set the command 'length' */
1931 switch (p->user_req.cmd) {
1932 case BNX2X_VLAN_MAC_MOVE:
1939 /* Fill the object specific info */
1940 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1942 /* Try to add a new command to the pending list */
1943 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1947 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1949 * @bp: device handle
1953 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1954 struct bnx2x_vlan_mac_ramrod_params *p)
1957 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1958 unsigned long *ramrod_flags = &p->ramrod_flags;
1959 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1960 struct bnx2x_raw_obj *raw = &o->raw;
1963 * Add new elements to the execution list for commands that require it.
1966 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1971 /* If nothing will be executed further in this iteration we want to
1972 * return PENDING if there are pending commands
1974 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1977 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1978 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1979 raw->clear_pending(raw);
1982 /* Execute commands if required */
1983 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1984 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1985 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1991 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1992 * then user want to wait until the last command is done.
1994 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1995 /* Wait maximum for the current exe_queue length iterations plus
1996 * one (for the current pending command).
1998 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
2000 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
2003 /* Wait for the current command to complete */
2004 rc = raw->wait_comp(bp, raw);
2008 /* Make a next step */
2009 rc = __bnx2x_vlan_mac_execute_step(bp,
2023 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2025 * @bp: device handle
2028 * @ramrod_flags: execution flags to be used for this deletion
2030 * if the last operation has completed successfully and there are no
2031 * more elements left, positive value if the last operation has completed
2032 * successfully and there are more previously configured elements, negative
2033 * value is current operation has failed.
2035 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2036 struct bnx2x_vlan_mac_obj *o,
2037 unsigned long *vlan_mac_flags,
2038 unsigned long *ramrod_flags)
2040 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
2041 struct bnx2x_vlan_mac_ramrod_params p;
2042 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2043 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2047 /* Clear pending commands first */
2049 spin_lock_bh(&exeq->lock);
2051 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2052 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2054 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2056 BNX2X_ERR("Failed to remove command\n");
2057 spin_unlock_bh(&exeq->lock);
2060 list_del(&exeq_pos->link);
2061 bnx2x_exe_queue_free_elem(bp, exeq_pos);
2065 spin_unlock_bh(&exeq->lock);
2067 /* Prepare a command request */
2068 memset(&p, 0, sizeof(p));
2070 p.ramrod_flags = *ramrod_flags;
2071 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2073 /* Add all but the last VLAN-MAC to the execution queue without actually
2074 * execution anything.
2076 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2077 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2078 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
2080 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2081 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2085 list_for_each_entry(pos, &o->head, link) {
2086 if (pos->vlan_mac_flags == *vlan_mac_flags) {
2087 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2088 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2089 rc = bnx2x_config_vlan_mac(bp, &p);
2091 BNX2X_ERR("Failed to add a new DEL command\n");
2092 bnx2x_vlan_mac_h_read_unlock(bp, o);
2098 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2099 bnx2x_vlan_mac_h_read_unlock(bp, o);
2101 p.ramrod_flags = *ramrod_flags;
2102 __set_bit(RAMROD_CONT, &p.ramrod_flags);
2104 return bnx2x_config_vlan_mac(bp, &p);
2107 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2108 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2109 unsigned long *pstate, bnx2x_obj_type type)
2111 raw->func_id = func_id;
2115 raw->rdata_mapping = rdata_mapping;
2117 raw->pstate = pstate;
2118 raw->obj_type = type;
2119 raw->check_pending = bnx2x_raw_check_pending;
2120 raw->clear_pending = bnx2x_raw_clear_pending;
2121 raw->set_pending = bnx2x_raw_set_pending;
2122 raw->wait_comp = bnx2x_raw_wait;
2125 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2126 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2127 int state, unsigned long *pstate, bnx2x_obj_type type,
2128 struct bnx2x_credit_pool_obj *macs_pool,
2129 struct bnx2x_credit_pool_obj *vlans_pool)
2131 INIT_LIST_HEAD(&o->head);
2133 o->head_exe_request = false;
2134 o->saved_ramrod_flags = 0;
2136 o->macs_pool = macs_pool;
2137 o->vlans_pool = vlans_pool;
2139 o->delete_all = bnx2x_vlan_mac_del_all;
2140 o->restore = bnx2x_vlan_mac_restore;
2141 o->complete = bnx2x_complete_vlan_mac;
2142 o->wait = bnx2x_wait_vlan_mac;
2144 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2145 state, pstate, type);
2148 void bnx2x_init_mac_obj(struct bnx2x *bp,
2149 struct bnx2x_vlan_mac_obj *mac_obj,
2150 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2151 dma_addr_t rdata_mapping, int state,
2152 unsigned long *pstate, bnx2x_obj_type type,
2153 struct bnx2x_credit_pool_obj *macs_pool)
2155 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2157 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2158 rdata_mapping, state, pstate, type,
2161 /* CAM credit pool handling */
2162 mac_obj->get_credit = bnx2x_get_credit_mac;
2163 mac_obj->put_credit = bnx2x_put_credit_mac;
2164 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2165 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2167 if (CHIP_IS_E1x(bp)) {
2168 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
2169 mac_obj->check_del = bnx2x_check_mac_del;
2170 mac_obj->check_add = bnx2x_check_mac_add;
2171 mac_obj->check_move = bnx2x_check_move_always_err;
2172 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2175 bnx2x_exe_queue_init(bp,
2176 &mac_obj->exe_queue, 1, qable_obj,
2177 bnx2x_validate_vlan_mac,
2178 bnx2x_remove_vlan_mac,
2179 bnx2x_optimize_vlan_mac,
2180 bnx2x_execute_vlan_mac,
2181 bnx2x_exeq_get_mac);
2183 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
2184 mac_obj->check_del = bnx2x_check_mac_del;
2185 mac_obj->check_add = bnx2x_check_mac_add;
2186 mac_obj->check_move = bnx2x_check_move;
2187 mac_obj->ramrod_cmd =
2188 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2189 mac_obj->get_n_elements = bnx2x_get_n_elements;
2192 bnx2x_exe_queue_init(bp,
2193 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2194 qable_obj, bnx2x_validate_vlan_mac,
2195 bnx2x_remove_vlan_mac,
2196 bnx2x_optimize_vlan_mac,
2197 bnx2x_execute_vlan_mac,
2198 bnx2x_exeq_get_mac);
2202 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2203 struct bnx2x_vlan_mac_obj *vlan_obj,
2204 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2205 dma_addr_t rdata_mapping, int state,
2206 unsigned long *pstate, bnx2x_obj_type type,
2207 struct bnx2x_credit_pool_obj *vlans_pool)
2209 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2211 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2212 rdata_mapping, state, pstate, type, NULL,
2215 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2216 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2217 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2218 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2220 if (CHIP_IS_E1x(bp)) {
2221 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2224 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2225 vlan_obj->check_del = bnx2x_check_vlan_del;
2226 vlan_obj->check_add = bnx2x_check_vlan_add;
2227 vlan_obj->check_move = bnx2x_check_move;
2228 vlan_obj->ramrod_cmd =
2229 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2230 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2233 bnx2x_exe_queue_init(bp,
2234 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2235 qable_obj, bnx2x_validate_vlan_mac,
2236 bnx2x_remove_vlan_mac,
2237 bnx2x_optimize_vlan_mac,
2238 bnx2x_execute_vlan_mac,
2239 bnx2x_exeq_get_vlan);
2243 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2244 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2245 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2246 dma_addr_t rdata_mapping, int state,
2247 unsigned long *pstate, bnx2x_obj_type type,
2248 struct bnx2x_credit_pool_obj *macs_pool,
2249 struct bnx2x_credit_pool_obj *vlans_pool)
2251 union bnx2x_qable_obj *qable_obj =
2252 (union bnx2x_qable_obj *)vlan_mac_obj;
2254 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2255 rdata_mapping, state, pstate, type,
2256 macs_pool, vlans_pool);
2258 /* CAM pool handling */
2259 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2260 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2261 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2262 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2263 * will be taken from MACs' pool object only.
2265 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2266 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2268 if (CHIP_IS_E1(bp)) {
2269 BNX2X_ERR("Do not support chips others than E2\n");
2271 } else if (CHIP_IS_E1H(bp)) {
2272 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2273 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2274 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2275 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2276 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2279 bnx2x_exe_queue_init(bp,
2280 &vlan_mac_obj->exe_queue, 1, qable_obj,
2281 bnx2x_validate_vlan_mac,
2282 bnx2x_remove_vlan_mac,
2283 bnx2x_optimize_vlan_mac,
2284 bnx2x_execute_vlan_mac,
2285 bnx2x_exeq_get_vlan_mac);
2287 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2288 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2289 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2290 vlan_mac_obj->check_move = bnx2x_check_move;
2291 vlan_mac_obj->ramrod_cmd =
2292 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2295 bnx2x_exe_queue_init(bp,
2296 &vlan_mac_obj->exe_queue,
2297 CLASSIFY_RULES_COUNT,
2298 qable_obj, bnx2x_validate_vlan_mac,
2299 bnx2x_remove_vlan_mac,
2300 bnx2x_optimize_vlan_mac,
2301 bnx2x_execute_vlan_mac,
2302 bnx2x_exeq_get_vlan_mac);
2306 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2307 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2308 struct tstorm_eth_mac_filter_config *mac_filters,
2311 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2313 u32 addr = BAR_TSTRORM_INTMEM +
2314 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2316 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2319 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2320 struct bnx2x_rx_mode_ramrod_params *p)
2322 /* update the bp MAC filter structure */
2323 u32 mask = (1 << p->cl_id);
2325 struct tstorm_eth_mac_filter_config *mac_filters =
2326 (struct tstorm_eth_mac_filter_config *)p->rdata;
2328 /* initial setting is drop-all */
2329 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2330 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2331 u8 unmatched_unicast = 0;
2333 /* In e1x there we only take into account rx accept flag since tx switching
2335 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2336 /* accept matched ucast */
2339 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2340 /* accept matched mcast */
2343 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2344 /* accept all mcast */
2348 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2349 /* accept all mcast */
2353 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2354 /* accept (all) bcast */
2356 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2357 /* accept unmatched unicasts */
2358 unmatched_unicast = 1;
2360 mac_filters->ucast_drop_all = drop_all_ucast ?
2361 mac_filters->ucast_drop_all | mask :
2362 mac_filters->ucast_drop_all & ~mask;
2364 mac_filters->mcast_drop_all = drop_all_mcast ?
2365 mac_filters->mcast_drop_all | mask :
2366 mac_filters->mcast_drop_all & ~mask;
2368 mac_filters->ucast_accept_all = accp_all_ucast ?
2369 mac_filters->ucast_accept_all | mask :
2370 mac_filters->ucast_accept_all & ~mask;
2372 mac_filters->mcast_accept_all = accp_all_mcast ?
2373 mac_filters->mcast_accept_all | mask :
2374 mac_filters->mcast_accept_all & ~mask;
2376 mac_filters->bcast_accept_all = accp_all_bcast ?
2377 mac_filters->bcast_accept_all | mask :
2378 mac_filters->bcast_accept_all & ~mask;
2380 mac_filters->unmatched_unicast = unmatched_unicast ?
2381 mac_filters->unmatched_unicast | mask :
2382 mac_filters->unmatched_unicast & ~mask;
2384 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2385 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2386 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2387 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2388 mac_filters->bcast_accept_all);
2390 /* write the MAC filter structure*/
2391 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2393 /* The operation is completed */
2394 clear_bit(p->state, p->pstate);
2395 smp_mb__after_clear_bit();
2400 /* Setup ramrod data */
2401 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2402 struct eth_classify_header *hdr,
2405 hdr->echo = cpu_to_le32(cid);
2406 hdr->rule_cnt = rule_cnt;
2409 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2410 unsigned long *accept_flags,
2411 struct eth_filter_rules_cmd *cmd,
2412 bool clear_accept_all)
2416 /* start with 'drop-all' */
2417 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2418 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2420 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2421 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2423 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2424 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2426 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2427 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2428 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2431 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2432 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2433 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2436 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2437 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2439 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2440 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2441 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2444 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2445 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2447 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2448 if (clear_accept_all) {
2449 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2450 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2451 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2452 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2455 cmd->state = cpu_to_le16(state);
2458 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2459 struct bnx2x_rx_mode_ramrod_params *p)
2461 struct eth_filter_rules_ramrod_data *data = p->rdata;
2465 /* Reset the ramrod data buffer */
2466 memset(data, 0, sizeof(*data));
2468 /* Setup ramrod data */
2470 /* Tx (internal switching) */
2471 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2472 data->rules[rule_idx].client_id = p->cl_id;
2473 data->rules[rule_idx].func_id = p->func_id;
2475 data->rules[rule_idx].cmd_general_data =
2476 ETH_FILTER_RULES_CMD_TX_CMD;
2478 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2479 &(data->rules[rule_idx++]),
2484 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2485 data->rules[rule_idx].client_id = p->cl_id;
2486 data->rules[rule_idx].func_id = p->func_id;
2488 data->rules[rule_idx].cmd_general_data =
2489 ETH_FILTER_RULES_CMD_RX_CMD;
2491 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2492 &(data->rules[rule_idx++]),
2496 /* If FCoE Queue configuration has been requested configure the Rx and
2497 * internal switching modes for this queue in separate rules.
2499 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2500 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2502 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2503 /* Tx (internal switching) */
2504 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2505 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2506 data->rules[rule_idx].func_id = p->func_id;
2508 data->rules[rule_idx].cmd_general_data =
2509 ETH_FILTER_RULES_CMD_TX_CMD;
2511 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2512 &(data->rules[rule_idx]),
2518 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2519 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2520 data->rules[rule_idx].func_id = p->func_id;
2522 data->rules[rule_idx].cmd_general_data =
2523 ETH_FILTER_RULES_CMD_RX_CMD;
2525 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2526 &(data->rules[rule_idx]),
2532 /* Set the ramrod header (most importantly - number of rules to
2535 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2537 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2538 data->header.rule_cnt, p->rx_accept_flags,
2539 p->tx_accept_flags);
2541 /* No need for an explicit memory barrier here as long we would
2542 * need to ensure the ordering of writing to the SPQ element
2543 * and updating of the SPQ producer which involves a memory
2544 * read and we will have to put a full memory barrier there
2545 * (inside bnx2x_sp_post()).
2549 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2550 U64_HI(p->rdata_mapping),
2551 U64_LO(p->rdata_mapping),
2552 ETH_CONNECTION_TYPE);
2556 /* Ramrod completion is pending */
2560 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2561 struct bnx2x_rx_mode_ramrod_params *p)
2563 return bnx2x_state_wait(bp, p->state, p->pstate);
2566 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2567 struct bnx2x_rx_mode_ramrod_params *p)
2573 int bnx2x_config_rx_mode(struct bnx2x *bp,
2574 struct bnx2x_rx_mode_ramrod_params *p)
2578 /* Configure the new classification in the chip */
2579 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2583 /* Wait for a ramrod completion if was requested */
2584 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2585 rc = p->rx_mode_obj->wait_comp(bp, p);
2593 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2594 struct bnx2x_rx_mode_obj *o)
2596 if (CHIP_IS_E1x(bp)) {
2597 o->wait_comp = bnx2x_empty_rx_mode_wait;
2598 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2600 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2601 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2605 /********************* Multicast verbs: SET, CLEAR ****************************/
2606 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2608 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2611 struct bnx2x_mcast_mac_elem {
2612 struct list_head link;
2614 u8 pad[2]; /* For a natural alignment of the following buffer */
2617 struct bnx2x_pending_mcast_cmd {
2618 struct list_head link;
2619 int type; /* BNX2X_MCAST_CMD_X */
2621 struct list_head macs_head;
2622 u32 macs_num; /* Needed for DEL command */
2623 int next_bin; /* Needed for RESTORE flow with aprox match */
2626 bool done; /* set to true, when the command has been handled,
2627 * practically used in 57712 handling only, where one pending
2628 * command may be handled in a few operations. As long as for
2629 * other chips every operation handling is completed in a
2630 * single ramrod, there is no need to utilize this field.
2634 static int bnx2x_mcast_wait(struct bnx2x *bp,
2635 struct bnx2x_mcast_obj *o)
2637 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2638 o->raw.wait_comp(bp, &o->raw))
2644 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2645 struct bnx2x_mcast_obj *o,
2646 struct bnx2x_mcast_ramrod_params *p,
2647 enum bnx2x_mcast_cmd cmd)
2650 struct bnx2x_pending_mcast_cmd *new_cmd;
2651 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2652 struct bnx2x_mcast_list_elem *pos;
2653 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2654 p->mcast_list_len : 0);
2656 /* If the command is empty ("handle pending commands only"), break */
2657 if (!p->mcast_list_len)
2660 total_sz = sizeof(*new_cmd) +
2661 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2663 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2664 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2669 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2670 cmd, macs_list_len);
2672 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2674 new_cmd->type = cmd;
2675 new_cmd->done = false;
2678 case BNX2X_MCAST_CMD_ADD:
2679 cur_mac = (struct bnx2x_mcast_mac_elem *)
2680 ((u8 *)new_cmd + sizeof(*new_cmd));
2682 /* Push the MACs of the current command into the pending command
2685 list_for_each_entry(pos, &p->mcast_list, link) {
2686 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2687 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2693 case BNX2X_MCAST_CMD_DEL:
2694 new_cmd->data.macs_num = p->mcast_list_len;
2697 case BNX2X_MCAST_CMD_RESTORE:
2698 new_cmd->data.next_bin = 0;
2703 BNX2X_ERR("Unknown command: %d\n", cmd);
2707 /* Push the new pending command to the tail of the pending list: FIFO */
2708 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2716 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2719 * @last: index to start looking from (including)
2721 * returns the next found (set) bin or a negative value if none is found.
2723 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2725 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2727 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2728 if (o->registry.aprox_match.vec[i])
2729 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2730 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2731 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2744 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2748 * returns the index of the found bin or -1 if none is found
2750 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2752 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2755 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2760 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2762 struct bnx2x_raw_obj *raw = &o->raw;
2765 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2766 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2767 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2769 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2770 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2771 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2776 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2777 struct bnx2x_mcast_obj *o, int idx,
2778 union bnx2x_mcast_config_data *cfg_data,
2779 enum bnx2x_mcast_cmd cmd)
2781 struct bnx2x_raw_obj *r = &o->raw;
2782 struct eth_multicast_rules_ramrod_data *data =
2783 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2784 u8 func_id = r->func_id;
2785 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2788 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2789 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2791 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2793 /* Get a bin and update a bins' vector */
2795 case BNX2X_MCAST_CMD_ADD:
2796 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2797 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2800 case BNX2X_MCAST_CMD_DEL:
2801 /* If there were no more bins to clear
2802 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2803 * clear any (0xff) bin.
2804 * See bnx2x_mcast_validate_e2() for explanation when it may
2807 bin = bnx2x_mcast_clear_first_bin(o);
2810 case BNX2X_MCAST_CMD_RESTORE:
2811 bin = cfg_data->bin;
2815 BNX2X_ERR("Unknown command: %d\n", cmd);
2819 DP(BNX2X_MSG_SP, "%s bin %d\n",
2820 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2821 "Setting" : "Clearing"), bin);
2823 data->rules[idx].bin_id = (u8)bin;
2824 data->rules[idx].func_id = func_id;
2825 data->rules[idx].engine_id = o->engine_id;
2829 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2831 * @bp: device handle
2833 * @start_bin: index in the registry to start from (including)
2834 * @rdata_idx: index in the ramrod data to start from
2836 * returns last handled bin index or -1 if all bins have been handled
2838 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2839 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2842 int cur_bin, cnt = *rdata_idx;
2843 union bnx2x_mcast_config_data cfg_data = {NULL};
2845 /* go through the registry and configure the bins from it */
2846 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2847 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2849 cfg_data.bin = (u8)cur_bin;
2850 o->set_one_rule(bp, o, cnt, &cfg_data,
2851 BNX2X_MCAST_CMD_RESTORE);
2855 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2857 /* Break if we reached the maximum number
2860 if (cnt >= o->max_cmd_len)
2869 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2870 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2873 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2874 int cnt = *line_idx;
2875 union bnx2x_mcast_config_data cfg_data = {NULL};
2877 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2880 cfg_data.mac = &pmac_pos->mac[0];
2881 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2885 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2888 list_del(&pmac_pos->link);
2890 /* Break if we reached the maximum number
2893 if (cnt >= o->max_cmd_len)
2899 /* if no more MACs to configure - we are done */
2900 if (list_empty(&cmd_pos->data.macs_head))
2901 cmd_pos->done = true;
2904 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2905 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2908 int cnt = *line_idx;
2910 while (cmd_pos->data.macs_num) {
2911 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2915 cmd_pos->data.macs_num--;
2917 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2918 cmd_pos->data.macs_num, cnt);
2920 /* Break if we reached the maximum
2923 if (cnt >= o->max_cmd_len)
2929 /* If we cleared all bins - we are done */
2930 if (!cmd_pos->data.macs_num)
2931 cmd_pos->done = true;
2934 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2935 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2938 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2941 if (cmd_pos->data.next_bin < 0)
2942 /* If o->set_restore returned -1 we are done */
2943 cmd_pos->done = true;
2945 /* Start from the next bin next time */
2946 cmd_pos->data.next_bin++;
2949 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2950 struct bnx2x_mcast_ramrod_params *p)
2952 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2954 struct bnx2x_mcast_obj *o = p->mcast_obj;
2956 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2958 switch (cmd_pos->type) {
2959 case BNX2X_MCAST_CMD_ADD:
2960 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2963 case BNX2X_MCAST_CMD_DEL:
2964 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2967 case BNX2X_MCAST_CMD_RESTORE:
2968 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2973 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2977 /* If the command has been completed - remove it from the list
2978 * and free the memory
2980 if (cmd_pos->done) {
2981 list_del(&cmd_pos->link);
2985 /* Break if we reached the maximum number of rules */
2986 if (cnt >= o->max_cmd_len)
2993 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2994 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2997 struct bnx2x_mcast_list_elem *mlist_pos;
2998 union bnx2x_mcast_config_data cfg_data = {NULL};
2999 int cnt = *line_idx;
3001 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3002 cfg_data.mac = mlist_pos->mac;
3003 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3007 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3014 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3015 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3018 int cnt = *line_idx, i;
3020 for (i = 0; i < p->mcast_list_len; i++) {
3021 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3025 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3026 p->mcast_list_len - i - 1);
3033 * bnx2x_mcast_handle_current_cmd -
3035 * @bp: device handle
3038 * @start_cnt: first line in the ramrod data that may be used
3040 * This function is called iff there is enough place for the current command in
3042 * Returns number of lines filled in the ramrod data in total.
3044 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
3045 struct bnx2x_mcast_ramrod_params *p,
3046 enum bnx2x_mcast_cmd cmd,
3049 struct bnx2x_mcast_obj *o = p->mcast_obj;
3050 int cnt = start_cnt;
3052 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3055 case BNX2X_MCAST_CMD_ADD:
3056 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3059 case BNX2X_MCAST_CMD_DEL:
3060 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3063 case BNX2X_MCAST_CMD_RESTORE:
3064 o->hdl_restore(bp, o, 0, &cnt);
3068 BNX2X_ERR("Unknown command: %d\n", cmd);
3072 /* The current command has been handled */
3073 p->mcast_list_len = 0;
3078 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3079 struct bnx2x_mcast_ramrod_params *p,
3080 enum bnx2x_mcast_cmd cmd)
3082 struct bnx2x_mcast_obj *o = p->mcast_obj;
3083 int reg_sz = o->get_registry_size(o);
3086 /* DEL command deletes all currently configured MACs */
3087 case BNX2X_MCAST_CMD_DEL:
3088 o->set_registry_size(o, 0);
3091 /* RESTORE command will restore the entire multicast configuration */
3092 case BNX2X_MCAST_CMD_RESTORE:
3093 /* Here we set the approximate amount of work to do, which in
3094 * fact may be only less as some MACs in postponed ADD
3095 * command(s) scheduled before this command may fall into
3096 * the same bin and the actual number of bins set in the
3097 * registry would be less than we estimated here. See
3098 * bnx2x_mcast_set_one_rule_e2() for further details.
3100 p->mcast_list_len = reg_sz;
3103 case BNX2X_MCAST_CMD_ADD:
3104 case BNX2X_MCAST_CMD_CONT:
3105 /* Here we assume that all new MACs will fall into new bins.
3106 * However we will correct the real registry size after we
3107 * handle all pending commands.
3109 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3113 BNX2X_ERR("Unknown command: %d\n", cmd);
3117 /* Increase the total number of MACs pending to be configured */
3118 o->total_pending_num += p->mcast_list_len;
3123 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3124 struct bnx2x_mcast_ramrod_params *p,
3127 struct bnx2x_mcast_obj *o = p->mcast_obj;
3129 o->set_registry_size(o, old_num_bins);
3130 o->total_pending_num -= p->mcast_list_len;
3134 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3136 * @bp: device handle
3138 * @len: number of rules to handle
3140 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3141 struct bnx2x_mcast_ramrod_params *p,
3144 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3145 struct eth_multicast_rules_ramrod_data *data =
3146 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3148 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3149 (BNX2X_FILTER_MCAST_PENDING <<
3150 BNX2X_SWCID_SHIFT));
3151 data->header.rule_cnt = len;
3155 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3157 * @bp: device handle
3160 * Recalculate the actual number of set bins in the registry using Brian
3161 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3163 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3165 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3166 struct bnx2x_mcast_obj *o)
3171 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3172 elem = o->registry.aprox_match.vec[i];
3177 o->set_registry_size(o, cnt);
3182 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3183 struct bnx2x_mcast_ramrod_params *p,
3184 enum bnx2x_mcast_cmd cmd)
3186 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3187 struct bnx2x_mcast_obj *o = p->mcast_obj;
3188 struct eth_multicast_rules_ramrod_data *data =
3189 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3192 /* Reset the ramrod data buffer */
3193 memset(data, 0, sizeof(*data));
3195 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3197 /* If there are no more pending commands - clear SCHEDULED state */
3198 if (list_empty(&o->pending_cmds_head))
3201 /* The below may be true iff there was enough room in ramrod
3202 * data for all pending commands and for the current
3203 * command. Otherwise the current command would have been added
3204 * to the pending commands and p->mcast_list_len would have been
3207 if (p->mcast_list_len > 0)
3208 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3210 /* We've pulled out some MACs - update the total number of
3213 o->total_pending_num -= cnt;
3216 WARN_ON(o->total_pending_num < 0);
3217 WARN_ON(cnt > o->max_cmd_len);
3219 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3221 /* Update a registry size if there are no more pending operations.
3223 * We don't want to change the value of the registry size if there are
3224 * pending operations because we want it to always be equal to the
3225 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3226 * set bins after the last requested operation in order to properly
3227 * evaluate the size of the next DEL/RESTORE operation.
3229 * Note that we update the registry itself during command(s) handling
3230 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3231 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3232 * with a limited amount of update commands (per MAC/bin) and we don't
3233 * know in this scope what the actual state of bins configuration is
3234 * going to be after this ramrod.
3236 if (!o->total_pending_num)
3237 bnx2x_mcast_refresh_registry_e2(bp, o);
3239 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3240 * RAMROD_PENDING status immediately.
3242 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3243 raw->clear_pending(raw);
3246 /* No need for an explicit memory barrier here as long we would
3247 * need to ensure the ordering of writing to the SPQ element
3248 * and updating of the SPQ producer which involves a memory
3249 * read and we will have to put a full memory barrier there
3250 * (inside bnx2x_sp_post()).
3254 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3255 raw->cid, U64_HI(raw->rdata_mapping),
3256 U64_LO(raw->rdata_mapping),
3257 ETH_CONNECTION_TYPE);
3261 /* Ramrod completion is pending */
3266 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3267 struct bnx2x_mcast_ramrod_params *p,
3268 enum bnx2x_mcast_cmd cmd)
3270 /* Mark, that there is a work to do */
3271 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3272 p->mcast_list_len = 1;
3277 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3278 struct bnx2x_mcast_ramrod_params *p,
3284 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3286 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3289 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3290 struct bnx2x_mcast_obj *o,
3291 struct bnx2x_mcast_ramrod_params *p,
3294 struct bnx2x_mcast_list_elem *mlist_pos;
3297 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3298 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3299 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3301 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3302 mlist_pos->mac, bit);
3304 /* bookkeeping... */
3305 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3310 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3311 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3316 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3318 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3319 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3320 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3324 /* On 57711 we write the multicast MACs' approximate match
3325 * table by directly into the TSTORM's internal RAM. So we don't
3326 * really need to handle any tricks to make it work.
3328 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3329 struct bnx2x_mcast_ramrod_params *p,
3330 enum bnx2x_mcast_cmd cmd)
3333 struct bnx2x_mcast_obj *o = p->mcast_obj;
3334 struct bnx2x_raw_obj *r = &o->raw;
3336 /* If CLEAR_ONLY has been requested - clear the registry
3337 * and clear a pending bit.
3339 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3340 u32 mc_filter[MC_HASH_SIZE] = {0};
3342 /* Set the multicast filter bits before writing it into
3343 * the internal memory.
3346 case BNX2X_MCAST_CMD_ADD:
3347 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3350 case BNX2X_MCAST_CMD_DEL:
3352 "Invalidating multicast MACs configuration\n");
3354 /* clear the registry */
3355 memset(o->registry.aprox_match.vec, 0,
3356 sizeof(o->registry.aprox_match.vec));
3359 case BNX2X_MCAST_CMD_RESTORE:
3360 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3364 BNX2X_ERR("Unknown command: %d\n", cmd);
3368 /* Set the mcast filter in the internal memory */
3369 for (i = 0; i < MC_HASH_SIZE; i++)
3370 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3372 /* clear the registry */
3373 memset(o->registry.aprox_match.vec, 0,
3374 sizeof(o->registry.aprox_match.vec));
3377 r->clear_pending(r);
3382 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3383 struct bnx2x_mcast_ramrod_params *p,
3384 enum bnx2x_mcast_cmd cmd)
3386 struct bnx2x_mcast_obj *o = p->mcast_obj;
3387 int reg_sz = o->get_registry_size(o);
3390 /* DEL command deletes all currently configured MACs */
3391 case BNX2X_MCAST_CMD_DEL:
3392 o->set_registry_size(o, 0);
3395 /* RESTORE command will restore the entire multicast configuration */
3396 case BNX2X_MCAST_CMD_RESTORE:
3397 p->mcast_list_len = reg_sz;
3398 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3399 cmd, p->mcast_list_len);
3402 case BNX2X_MCAST_CMD_ADD:
3403 case BNX2X_MCAST_CMD_CONT:
3404 /* Multicast MACs on 57710 are configured as unicast MACs and
3405 * there is only a limited number of CAM entries for that
3408 if (p->mcast_list_len > o->max_cmd_len) {
3409 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3413 /* Every configured MAC should be cleared if DEL command is
3414 * called. Only the last ADD command is relevant as long as
3415 * every ADD commands overrides the previous configuration.
3417 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3418 if (p->mcast_list_len > 0)
3419 o->set_registry_size(o, p->mcast_list_len);
3424 BNX2X_ERR("Unknown command: %d\n", cmd);
3428 /* We want to ensure that commands are executed one by one for 57710.
3429 * Therefore each none-empty command will consume o->max_cmd_len.
3431 if (p->mcast_list_len)
3432 o->total_pending_num += o->max_cmd_len;
3437 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3438 struct bnx2x_mcast_ramrod_params *p,
3441 struct bnx2x_mcast_obj *o = p->mcast_obj;
3443 o->set_registry_size(o, old_num_macs);
3445 /* If current command hasn't been handled yet and we are
3446 * here means that it's meant to be dropped and we have to
3447 * update the number of outstanding MACs accordingly.
3449 if (p->mcast_list_len)
3450 o->total_pending_num -= o->max_cmd_len;
3453 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3454 struct bnx2x_mcast_obj *o, int idx,
3455 union bnx2x_mcast_config_data *cfg_data,
3456 enum bnx2x_mcast_cmd cmd)
3458 struct bnx2x_raw_obj *r = &o->raw;
3459 struct mac_configuration_cmd *data =
3460 (struct mac_configuration_cmd *)(r->rdata);
3463 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3464 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3465 &data->config_table[idx].middle_mac_addr,
3466 &data->config_table[idx].lsb_mac_addr,
3469 data->config_table[idx].vlan_id = 0;
3470 data->config_table[idx].pf_id = r->func_id;
3471 data->config_table[idx].clients_bit_vector =
3472 cpu_to_le32(1 << r->cl_id);
3474 SET_FLAG(data->config_table[idx].flags,
3475 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476 T_ETH_MAC_COMMAND_SET);
3481 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3483 * @bp: device handle
3485 * @len: number of rules to handle
3487 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3488 struct bnx2x_mcast_ramrod_params *p,
3491 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3492 struct mac_configuration_cmd *data =
3493 (struct mac_configuration_cmd *)(r->rdata);
3495 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3496 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3497 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3499 data->hdr.offset = offset;
3500 data->hdr.client_id = cpu_to_le16(0xff);
3501 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3502 (BNX2X_FILTER_MCAST_PENDING <<
3503 BNX2X_SWCID_SHIFT));
3504 data->hdr.length = len;
3508 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3510 * @bp: device handle
3512 * @start_idx: index in the registry to start from
3513 * @rdata_idx: index in the ramrod data to start from
3515 * restore command for 57710 is like all other commands - always a stand alone
3516 * command - start_idx and rdata_idx will always be 0. This function will always
3518 * returns -1 to comply with 57712 variant.
3520 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3521 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3524 struct bnx2x_mcast_mac_elem *elem;
3526 union bnx2x_mcast_config_data cfg_data = {NULL};
3528 /* go through the registry and configure the MACs from it. */
3529 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3530 cfg_data.mac = &elem->mac[0];
3531 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3535 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3544 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3545 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3547 struct bnx2x_pending_mcast_cmd *cmd_pos;
3548 struct bnx2x_mcast_mac_elem *pmac_pos;
3549 struct bnx2x_mcast_obj *o = p->mcast_obj;
3550 union bnx2x_mcast_config_data cfg_data = {NULL};
3553 /* If nothing to be done - return */
3554 if (list_empty(&o->pending_cmds_head))
3557 /* Handle the first command */
3558 cmd_pos = list_first_entry(&o->pending_cmds_head,
3559 struct bnx2x_pending_mcast_cmd, link);
3561 switch (cmd_pos->type) {
3562 case BNX2X_MCAST_CMD_ADD:
3563 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3564 cfg_data.mac = &pmac_pos->mac[0];
3565 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3569 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3574 case BNX2X_MCAST_CMD_DEL:
3575 cnt = cmd_pos->data.macs_num;
3576 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3579 case BNX2X_MCAST_CMD_RESTORE:
3580 o->hdl_restore(bp, o, 0, &cnt);
3584 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3588 list_del(&cmd_pos->link);
3595 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3602 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3603 __le16 *fw_lo, u8 *mac)
3605 mac[1] = ((u8 *)fw_hi)[0];
3606 mac[0] = ((u8 *)fw_hi)[1];
3607 mac[3] = ((u8 *)fw_mid)[0];
3608 mac[2] = ((u8 *)fw_mid)[1];
3609 mac[5] = ((u8 *)fw_lo)[0];
3610 mac[4] = ((u8 *)fw_lo)[1];
3614 * bnx2x_mcast_refresh_registry_e1 -
3616 * @bp: device handle
3619 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3620 * and update the registry correspondingly: if ADD - allocate a memory and add
3621 * the entries to the registry (list), if DELETE - clear the registry and free
3624 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3625 struct bnx2x_mcast_obj *o)
3627 struct bnx2x_raw_obj *raw = &o->raw;
3628 struct bnx2x_mcast_mac_elem *elem;
3629 struct mac_configuration_cmd *data =
3630 (struct mac_configuration_cmd *)(raw->rdata);
3632 /* If first entry contains a SET bit - the command was ADD,
3633 * otherwise - DEL_ALL
3635 if (GET_FLAG(data->config_table[0].flags,
3636 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3637 int i, len = data->hdr.length;
3639 /* Break if it was a RESTORE command */
3640 if (!list_empty(&o->registry.exact_match.macs))
3643 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3645 BNX2X_ERR("Failed to allocate registry memory\n");
3649 for (i = 0; i < len; i++, elem++) {
3650 bnx2x_get_fw_mac_addr(
3651 &data->config_table[i].msb_mac_addr,
3652 &data->config_table[i].middle_mac_addr,
3653 &data->config_table[i].lsb_mac_addr,
3655 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3657 list_add_tail(&elem->link,
3658 &o->registry.exact_match.macs);
3661 elem = list_first_entry(&o->registry.exact_match.macs,
3662 struct bnx2x_mcast_mac_elem, link);
3663 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3665 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3671 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3672 struct bnx2x_mcast_ramrod_params *p,
3673 enum bnx2x_mcast_cmd cmd)
3675 struct bnx2x_mcast_obj *o = p->mcast_obj;
3676 struct bnx2x_raw_obj *raw = &o->raw;
3677 struct mac_configuration_cmd *data =
3678 (struct mac_configuration_cmd *)(raw->rdata);
3681 /* Reset the ramrod data buffer */
3682 memset(data, 0, sizeof(*data));
3684 /* First set all entries as invalid */
3685 for (i = 0; i < o->max_cmd_len ; i++)
3686 SET_FLAG(data->config_table[i].flags,
3687 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3688 T_ETH_MAC_COMMAND_INVALIDATE);
3690 /* Handle pending commands first */
3691 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3693 /* If there are no more pending commands - clear SCHEDULED state */
3694 if (list_empty(&o->pending_cmds_head))
3697 /* The below may be true iff there were no pending commands */
3699 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3701 /* For 57710 every command has o->max_cmd_len length to ensure that
3702 * commands are done one at a time.
3704 o->total_pending_num -= o->max_cmd_len;
3708 WARN_ON(cnt > o->max_cmd_len);
3710 /* Set ramrod header (in particular, a number of entries to update) */
3711 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3713 /* update a registry: we need the registry contents to be always up
3714 * to date in order to be able to execute a RESTORE opcode. Here
3715 * we use the fact that for 57710 we sent one command at a time
3716 * hence we may take the registry update out of the command handling
3717 * and do it in a simpler way here.
3719 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3723 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3724 * RAMROD_PENDING status immediately.
3726 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3727 raw->clear_pending(raw);
3730 /* No need for an explicit memory barrier here as long we would
3731 * need to ensure the ordering of writing to the SPQ element
3732 * and updating of the SPQ producer which involves a memory
3733 * read and we will have to put a full memory barrier there
3734 * (inside bnx2x_sp_post()).
3738 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3739 U64_HI(raw->rdata_mapping),
3740 U64_LO(raw->rdata_mapping),
3741 ETH_CONNECTION_TYPE);
3745 /* Ramrod completion is pending */
3750 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3752 return o->registry.exact_match.num_macs_set;
3755 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3757 return o->registry.aprox_match.num_bins_set;
3760 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3763 o->registry.exact_match.num_macs_set = n;
3766 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3769 o->registry.aprox_match.num_bins_set = n;
3772 int bnx2x_config_mcast(struct bnx2x *bp,
3773 struct bnx2x_mcast_ramrod_params *p,
3774 enum bnx2x_mcast_cmd cmd)
3776 struct bnx2x_mcast_obj *o = p->mcast_obj;
3777 struct bnx2x_raw_obj *r = &o->raw;
3778 int rc = 0, old_reg_size;
3780 /* This is needed to recover number of currently configured mcast macs
3781 * in case of failure.
3783 old_reg_size = o->get_registry_size(o);
3785 /* Do some calculations and checks */
3786 rc = o->validate(bp, p, cmd);
3790 /* Return if there is no work to do */
3791 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3794 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3795 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3797 /* Enqueue the current command to the pending list if we can't complete
3798 * it in the current iteration
3800 if (r->check_pending(r) ||
3801 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3802 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3806 /* As long as the current command is in a command list we
3807 * don't need to handle it separately.
3809 p->mcast_list_len = 0;
3812 if (!r->check_pending(r)) {
3814 /* Set 'pending' state */
3817 /* Configure the new classification in the chip */
3818 rc = o->config_mcast(bp, p, cmd);
3822 /* Wait for a ramrod completion if was requested */
3823 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3824 rc = o->wait_comp(bp, o);
3830 r->clear_pending(r);
3833 o->revert(bp, p, old_reg_size);
3838 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3840 smp_mb__before_clear_bit();
3841 clear_bit(o->sched_state, o->raw.pstate);
3842 smp_mb__after_clear_bit();
3845 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3847 smp_mb__before_clear_bit();
3848 set_bit(o->sched_state, o->raw.pstate);
3849 smp_mb__after_clear_bit();
3852 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3854 return !!test_bit(o->sched_state, o->raw.pstate);
3857 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3859 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3862 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3863 struct bnx2x_mcast_obj *mcast_obj,
3864 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3865 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3866 int state, unsigned long *pstate, bnx2x_obj_type type)
3868 memset(mcast_obj, 0, sizeof(*mcast_obj));
3870 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3871 rdata, rdata_mapping, state, pstate, type);
3873 mcast_obj->engine_id = engine_id;
3875 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3877 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3878 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3879 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3880 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3882 if (CHIP_IS_E1(bp)) {
3883 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3884 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3885 mcast_obj->hdl_restore =
3886 bnx2x_mcast_handle_restore_cmd_e1;
3887 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3889 if (CHIP_REV_IS_SLOW(bp))
3890 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3892 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3894 mcast_obj->wait_comp = bnx2x_mcast_wait;
3895 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3896 mcast_obj->validate = bnx2x_mcast_validate_e1;
3897 mcast_obj->revert = bnx2x_mcast_revert_e1;
3898 mcast_obj->get_registry_size =
3899 bnx2x_mcast_get_registry_size_exact;
3900 mcast_obj->set_registry_size =
3901 bnx2x_mcast_set_registry_size_exact;
3903 /* 57710 is the only chip that uses the exact match for mcast
3906 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3908 } else if (CHIP_IS_E1H(bp)) {
3909 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3910 mcast_obj->enqueue_cmd = NULL;
3911 mcast_obj->hdl_restore = NULL;
3912 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3914 /* 57711 doesn't send a ramrod, so it has unlimited credit
3917 mcast_obj->max_cmd_len = -1;
3918 mcast_obj->wait_comp = bnx2x_mcast_wait;
3919 mcast_obj->set_one_rule = NULL;
3920 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3921 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3922 mcast_obj->get_registry_size =
3923 bnx2x_mcast_get_registry_size_aprox;
3924 mcast_obj->set_registry_size =
3925 bnx2x_mcast_set_registry_size_aprox;
3927 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3928 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3929 mcast_obj->hdl_restore =
3930 bnx2x_mcast_handle_restore_cmd_e2;
3931 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3932 /* TODO: There should be a proper HSI define for this number!!!
3934 mcast_obj->max_cmd_len = 16;
3935 mcast_obj->wait_comp = bnx2x_mcast_wait;
3936 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3937 mcast_obj->validate = bnx2x_mcast_validate_e2;
3938 mcast_obj->revert = bnx2x_mcast_revert_e2;
3939 mcast_obj->get_registry_size =
3940 bnx2x_mcast_get_registry_size_aprox;
3941 mcast_obj->set_registry_size =
3942 bnx2x_mcast_set_registry_size_aprox;
3946 /*************************** Credit handling **********************************/
3949 * atomic_add_ifless - add if the result is less than a given value.
3951 * @v: pointer of type atomic_t
3952 * @a: the amount to add to v...
3953 * @u: ...if (v + a) is less than u.
3955 * returns true if (v + a) was less than u, and false otherwise.
3958 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3964 if (unlikely(c + a >= u))
3967 old = atomic_cmpxchg((v), c, c + a);
3968 if (likely(old == c))
3977 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3979 * @v: pointer of type atomic_t
3980 * @a: the amount to dec from v...
3981 * @u: ...if (v - a) is more or equal than u.
3983 * returns true if (v - a) was more or equal than u, and false
3986 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3992 if (unlikely(c - a < u))
3995 old = atomic_cmpxchg((v), c, c - a);
3996 if (likely(old == c))
4004 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4009 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4015 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4021 /* Don't let to refill if credit + cnt > pool_sz */
4022 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4029 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4034 cur_credit = atomic_read(&o->credit);
4039 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4045 static bool bnx2x_credit_pool_get_entry(
4046 struct bnx2x_credit_pool_obj *o,
4053 /* Find "internal cam-offset" then add to base for this object... */
4054 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4056 /* Skip the current vector if there are no free entries in it */
4057 if (!o->pool_mirror[vec])
4060 /* If we've got here we are going to find a free entry */
4061 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4062 i < BIT_VEC64_ELEM_SZ; idx++, i++)
4064 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4066 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4067 *offset = o->base_pool_offset + idx;
4075 static bool bnx2x_credit_pool_put_entry(
4076 struct bnx2x_credit_pool_obj *o,
4079 if (offset < o->base_pool_offset)
4082 offset -= o->base_pool_offset;
4084 if (offset >= o->pool_sz)
4087 /* Return the entry to the pool */
4088 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4093 static bool bnx2x_credit_pool_put_entry_always_true(
4094 struct bnx2x_credit_pool_obj *o,
4100 static bool bnx2x_credit_pool_get_entry_always_true(
4101 struct bnx2x_credit_pool_obj *o,
4108 * bnx2x_init_credit_pool - initialize credit pool internals.
4111 * @base: Base entry in the CAM to use.
4112 * @credit: pool size.
4114 * If base is negative no CAM entries handling will be performed.
4115 * If credit is negative pool operations will always succeed (unlimited pool).
4118 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4119 int base, int credit)
4121 /* Zero the object first */
4122 memset(p, 0, sizeof(*p));
4124 /* Set the table to all 1s */
4125 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4127 /* Init a pool as full */
4128 atomic_set(&p->credit, credit);
4130 /* The total poll size */
4131 p->pool_sz = credit;
4133 p->base_pool_offset = base;
4135 /* Commit the change */
4138 p->check = bnx2x_credit_pool_check;
4140 /* if pool credit is negative - disable the checks */
4142 p->put = bnx2x_credit_pool_put;
4143 p->get = bnx2x_credit_pool_get;
4144 p->put_entry = bnx2x_credit_pool_put_entry;
4145 p->get_entry = bnx2x_credit_pool_get_entry;
4147 p->put = bnx2x_credit_pool_always_true;
4148 p->get = bnx2x_credit_pool_always_true;
4149 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4150 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4153 /* If base is negative - disable entries handling */
4155 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4156 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4160 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4161 struct bnx2x_credit_pool_obj *p, u8 func_id,
4164 /* TODO: this will be defined in consts as well... */
4165 #define BNX2X_CAM_SIZE_EMUL 5
4169 if (CHIP_IS_E1(bp)) {
4170 /* In E1, Multicast is saved in cam... */
4171 if (!CHIP_REV_IS_SLOW(bp))
4172 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4174 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4176 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4178 } else if (CHIP_IS_E1H(bp)) {
4179 /* CAM credit is equaly divided between all active functions
4182 if ((func_num > 0)) {
4183 if (!CHIP_REV_IS_SLOW(bp))
4184 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4186 cam_sz = BNX2X_CAM_SIZE_EMUL;
4187 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4189 /* this should never happen! Block MAC operations. */
4190 bnx2x_init_credit_pool(p, 0, 0);
4195 /* CAM credit is equaly divided between all active functions
4198 if ((func_num > 0)) {
4199 if (!CHIP_REV_IS_SLOW(bp))
4200 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4202 cam_sz = BNX2X_CAM_SIZE_EMUL;
4204 /* No need for CAM entries handling for 57712 and
4207 bnx2x_init_credit_pool(p, -1, cam_sz);
4209 /* this should never happen! Block MAC operations. */
4210 bnx2x_init_credit_pool(p, 0, 0);
4215 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4216 struct bnx2x_credit_pool_obj *p,
4220 if (CHIP_IS_E1x(bp)) {
4221 /* There is no VLAN credit in HW on 57710 and 57711 only
4222 * MAC / MAC-VLAN can be set
4224 bnx2x_init_credit_pool(p, 0, -1);
4226 /* CAM credit is equally divided between all active functions
4230 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4231 bnx2x_init_credit_pool(p, func_id * credit, credit);
4233 /* this should never happen! Block VLAN operations. */
4234 bnx2x_init_credit_pool(p, 0, 0);
4238 /****************** RSS Configuration ******************/
4240 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4242 * @bp: driver handle
4243 * @p: pointer to rss configuration
4245 * Prints it when NETIF_MSG_IFUP debug level is configured.
4247 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4248 struct bnx2x_config_rss_params *p)
4252 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4253 DP(BNX2X_MSG_SP, "0x0000: ");
4254 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4255 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4257 /* Print 4 bytes in a line */
4258 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4259 (((i + 1) & 0x3) == 0)) {
4260 DP_CONT(BNX2X_MSG_SP, "\n");
4261 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4265 DP_CONT(BNX2X_MSG_SP, "\n");
4269 * bnx2x_setup_rss - configure RSS
4271 * @bp: device handle
4272 * @p: rss configuration
4274 * sends on UPDATE ramrod for that matter.
4276 static int bnx2x_setup_rss(struct bnx2x *bp,
4277 struct bnx2x_config_rss_params *p)
4279 struct bnx2x_rss_config_obj *o = p->rss_obj;
4280 struct bnx2x_raw_obj *r = &o->raw;
4281 struct eth_rss_update_ramrod_data *data =
4282 (struct eth_rss_update_ramrod_data *)(r->rdata);
4286 memset(data, 0, sizeof(*data));
4288 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4290 /* Set an echo field */
4291 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4292 (r->state << BNX2X_SWCID_SHIFT));
4295 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4296 rss_mode = ETH_RSS_MODE_DISABLED;
4297 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4298 rss_mode = ETH_RSS_MODE_REGULAR;
4300 data->rss_mode = rss_mode;
4302 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4304 /* RSS capabilities */
4305 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4306 data->capabilities |=
4307 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4309 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4310 data->capabilities |=
4311 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4313 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4314 data->capabilities |=
4315 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4317 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4318 data->capabilities |=
4319 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4321 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4322 data->capabilities |=
4323 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4325 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4326 data->capabilities |=
4327 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4330 data->rss_result_mask = p->rss_result_mask;
4333 data->rss_engine_id = o->engine_id;
4335 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4337 /* Indirection table */
4338 memcpy(data->indirection_table, p->ind_table,
4339 T_ETH_INDIRECTION_TABLE_SIZE);
4341 /* Remember the last configuration */
4342 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4344 /* Print the indirection table */
4345 if (netif_msg_ifup(bp))
4346 bnx2x_debug_print_ind_table(bp, p);
4349 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4350 memcpy(&data->rss_key[0], &p->rss_key[0],
4351 sizeof(data->rss_key));
4352 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4355 /* No need for an explicit memory barrier here as long we would
4356 * need to ensure the ordering of writing to the SPQ element
4357 * and updating of the SPQ producer which involves a memory
4358 * read and we will have to put a full memory barrier there
4359 * (inside bnx2x_sp_post()).
4363 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4364 U64_HI(r->rdata_mapping),
4365 U64_LO(r->rdata_mapping),
4366 ETH_CONNECTION_TYPE);
4374 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4377 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4380 int bnx2x_config_rss(struct bnx2x *bp,
4381 struct bnx2x_config_rss_params *p)
4384 struct bnx2x_rss_config_obj *o = p->rss_obj;
4385 struct bnx2x_raw_obj *r = &o->raw;
4387 /* Do nothing if only driver cleanup was requested */
4388 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4393 rc = o->config_rss(bp, p);
4395 r->clear_pending(r);
4399 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4400 rc = r->wait_comp(bp, r);
4405 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4406 struct bnx2x_rss_config_obj *rss_obj,
4407 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4408 void *rdata, dma_addr_t rdata_mapping,
4409 int state, unsigned long *pstate,
4410 bnx2x_obj_type type)
4412 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4413 rdata_mapping, state, pstate, type);
4415 rss_obj->engine_id = engine_id;
4416 rss_obj->config_rss = bnx2x_setup_rss;
4419 int validate_vlan_mac(struct bnx2x *bp,
4420 struct bnx2x_vlan_mac_obj *vlan_mac)
4422 if (!vlan_mac->get_n_elements) {
4423 BNX2X_ERR("vlan mac object was not intialized\n");
4429 /********************** Queue state object ***********************************/
4432 * bnx2x_queue_state_change - perform Queue state change transition
4434 * @bp: device handle
4435 * @params: parameters to perform the transition
4437 * returns 0 in case of successfully completed transition, negative error
4438 * code in case of failure, positive (EBUSY) value if there is a completion
4439 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4440 * not set in params->ramrod_flags for asynchronous commands).
4443 int bnx2x_queue_state_change(struct bnx2x *bp,
4444 struct bnx2x_queue_state_params *params)
4446 struct bnx2x_queue_sp_obj *o = params->q_obj;
4447 int rc, pending_bit;
4448 unsigned long *pending = &o->pending;
4450 /* Check that the requested transition is legal */
4451 rc = o->check_transition(bp, o, params);
4453 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4457 /* Set "pending" bit */
4458 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4459 pending_bit = o->set_pending(o, params);
4460 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4462 /* Don't send a command if only driver cleanup was requested */
4463 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4464 o->complete_cmd(bp, o, pending_bit);
4467 rc = o->send_cmd(bp, params);
4469 o->next_state = BNX2X_Q_STATE_MAX;
4470 clear_bit(pending_bit, pending);
4471 smp_mb__after_clear_bit();
4475 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4476 rc = o->wait_comp(bp, o, pending_bit);
4484 return !!test_bit(pending_bit, pending);
4487 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4488 struct bnx2x_queue_state_params *params)
4490 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4492 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4495 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4496 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4497 bit = BNX2X_Q_CMD_UPDATE;
4501 set_bit(bit, &obj->pending);
4505 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4506 struct bnx2x_queue_sp_obj *o,
4507 enum bnx2x_queue_cmd cmd)
4509 return bnx2x_state_wait(bp, cmd, &o->pending);
4513 * bnx2x_queue_comp_cmd - complete the state change command.
4515 * @bp: device handle
4519 * Checks that the arrived completion is expected.
4521 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4522 struct bnx2x_queue_sp_obj *o,
4523 enum bnx2x_queue_cmd cmd)
4525 unsigned long cur_pending = o->pending;
4527 if (!test_and_clear_bit(cmd, &cur_pending)) {
4528 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4529 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4530 o->state, cur_pending, o->next_state);
4534 if (o->next_tx_only >= o->max_cos)
4535 /* >= because tx only must always be smaller than cos since the
4536 * primary connection supports COS 0
4538 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4539 o->next_tx_only, o->max_cos);
4542 "Completing command %d for queue %d, setting state to %d\n",
4543 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4545 if (o->next_tx_only) /* print num tx-only if any exist */
4546 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4547 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4549 o->state = o->next_state;
4550 o->num_tx_only = o->next_tx_only;
4551 o->next_state = BNX2X_Q_STATE_MAX;
4553 /* It's important that o->state and o->next_state are
4554 * updated before o->pending.
4558 clear_bit(cmd, &o->pending);
4559 smp_mb__after_clear_bit();
4564 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4565 struct bnx2x_queue_state_params *cmd_params,
4566 struct client_init_ramrod_data *data)
4568 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4572 /* IPv6 TPA supported for E2 and above only */
4573 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4574 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4577 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4578 struct bnx2x_queue_sp_obj *o,
4579 struct bnx2x_general_setup_params *params,
4580 struct client_init_general_data *gen_data,
4581 unsigned long *flags)
4583 gen_data->client_id = o->cl_id;
4585 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4586 gen_data->statistics_counter_id =
4588 gen_data->statistics_en_flg = 1;
4589 gen_data->statistics_zero_flg =
4590 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4592 gen_data->statistics_counter_id =
4593 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4595 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4596 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4597 gen_data->sp_client_id = params->spcl_id;
4598 gen_data->mtu = cpu_to_le16(params->mtu);
4599 gen_data->func_id = o->func_id;
4601 gen_data->cos = params->cos;
4603 gen_data->traffic_type =
4604 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4605 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4607 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4608 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4611 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4612 struct bnx2x_txq_setup_params *params,
4613 struct client_init_tx_data *tx_data,
4614 unsigned long *flags)
4616 tx_data->enforce_security_flg =
4617 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4618 tx_data->default_vlan =
4619 cpu_to_le16(params->default_vlan);
4620 tx_data->default_vlan_flg =
4621 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4622 tx_data->tx_switching_flg =
4623 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4624 tx_data->anti_spoofing_flg =
4625 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4626 tx_data->force_default_pri_flg =
4627 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4629 tx_data->tunnel_lso_inc_ip_id =
4630 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4631 tx_data->tunnel_non_lso_pcsum_location =
4632 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4635 tx_data->tx_status_block_id = params->fw_sb_id;
4636 tx_data->tx_sb_index_number = params->sb_cq_index;
4637 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4639 tx_data->tx_bd_page_base.lo =
4640 cpu_to_le32(U64_LO(params->dscr_map));
4641 tx_data->tx_bd_page_base.hi =
4642 cpu_to_le32(U64_HI(params->dscr_map));
4644 /* Don't configure any Tx switching mode during queue SETUP */
4648 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4649 struct rxq_pause_params *params,
4650 struct client_init_rx_data *rx_data)
4652 /* flow control data */
4653 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4654 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4655 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4656 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4657 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4658 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4659 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4662 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4663 struct bnx2x_rxq_setup_params *params,
4664 struct client_init_rx_data *rx_data,
4665 unsigned long *flags)
4667 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4668 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4669 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4670 CLIENT_INIT_RX_DATA_TPA_MODE;
4671 rx_data->vmqueue_mode_en_flg = 0;
4673 rx_data->cache_line_alignment_log_size =
4674 params->cache_line_log;
4675 rx_data->enable_dynamic_hc =
4676 test_bit(BNX2X_Q_FLG_DHC, flags);
4677 rx_data->max_sges_for_packet = params->max_sges_pkt;
4678 rx_data->client_qzone_id = params->cl_qzone_id;
4679 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4681 /* Always start in DROP_ALL mode */
4682 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4683 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4685 /* We don't set drop flags */
4686 rx_data->drop_ip_cs_err_flg = 0;
4687 rx_data->drop_tcp_cs_err_flg = 0;
4688 rx_data->drop_ttl0_flg = 0;
4689 rx_data->drop_udp_cs_err_flg = 0;
4690 rx_data->inner_vlan_removal_enable_flg =
4691 test_bit(BNX2X_Q_FLG_VLAN, flags);
4692 rx_data->outer_vlan_removal_enable_flg =
4693 test_bit(BNX2X_Q_FLG_OV, flags);
4694 rx_data->status_block_id = params->fw_sb_id;
4695 rx_data->rx_sb_index_number = params->sb_cq_index;
4696 rx_data->max_tpa_queues = params->max_tpa_queues;
4697 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4698 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4699 rx_data->bd_page_base.lo =
4700 cpu_to_le32(U64_LO(params->dscr_map));
4701 rx_data->bd_page_base.hi =
4702 cpu_to_le32(U64_HI(params->dscr_map));
4703 rx_data->sge_page_base.lo =
4704 cpu_to_le32(U64_LO(params->sge_map));
4705 rx_data->sge_page_base.hi =
4706 cpu_to_le32(U64_HI(params->sge_map));
4707 rx_data->cqe_page_base.lo =
4708 cpu_to_le32(U64_LO(params->rcq_map));
4709 rx_data->cqe_page_base.hi =
4710 cpu_to_le32(U64_HI(params->rcq_map));
4711 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4713 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4714 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4715 rx_data->is_approx_mcast = 1;
4718 rx_data->rss_engine_id = params->rss_engine_id;
4720 /* silent vlan removal */
4721 rx_data->silent_vlan_removal_flg =
4722 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4723 rx_data->silent_vlan_value =
4724 cpu_to_le16(params->silent_removal_value);
4725 rx_data->silent_vlan_mask =
4726 cpu_to_le16(params->silent_removal_mask);
4729 /* initialize the general, tx and rx parts of a queue object */
4730 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4731 struct bnx2x_queue_state_params *cmd_params,
4732 struct client_init_ramrod_data *data)
4734 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4735 &cmd_params->params.setup.gen_params,
4737 &cmd_params->params.setup.flags);
4739 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4740 &cmd_params->params.setup.txq_params,
4742 &cmd_params->params.setup.flags);
4744 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4745 &cmd_params->params.setup.rxq_params,
4747 &cmd_params->params.setup.flags);
4749 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4750 &cmd_params->params.setup.pause_params,
4754 /* initialize the general and tx parts of a tx-only queue object */
4755 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4756 struct bnx2x_queue_state_params *cmd_params,
4757 struct tx_queue_init_ramrod_data *data)
4759 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4760 &cmd_params->params.tx_only.gen_params,
4762 &cmd_params->params.tx_only.flags);
4764 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4765 &cmd_params->params.tx_only.txq_params,
4767 &cmd_params->params.tx_only.flags);
4769 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4770 cmd_params->q_obj->cids[0],
4771 data->tx.tx_bd_page_base.lo,
4772 data->tx.tx_bd_page_base.hi);
4776 * bnx2x_q_init - init HW/FW queue
4778 * @bp: device handle
4781 * HW/FW initial Queue configuration:
4783 * - CDU context validation
4786 static inline int bnx2x_q_init(struct bnx2x *bp,
4787 struct bnx2x_queue_state_params *params)
4789 struct bnx2x_queue_sp_obj *o = params->q_obj;
4790 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4794 /* Tx HC configuration */
4795 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4796 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4797 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4799 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4800 init->tx.sb_cq_index,
4801 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4805 /* Rx HC configuration */
4806 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4807 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4808 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4810 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4811 init->rx.sb_cq_index,
4812 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4816 /* Set CDU context validation values */
4817 for (cos = 0; cos < o->max_cos; cos++) {
4818 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4820 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4821 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4824 /* As no ramrod is sent, complete the command immediately */
4825 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4833 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4834 struct bnx2x_queue_state_params *params)
4836 struct bnx2x_queue_sp_obj *o = params->q_obj;
4837 struct client_init_ramrod_data *rdata =
4838 (struct client_init_ramrod_data *)o->rdata;
4839 dma_addr_t data_mapping = o->rdata_mapping;
4840 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4842 /* Clear the ramrod data */
4843 memset(rdata, 0, sizeof(*rdata));
4845 /* Fill the ramrod data */
4846 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4848 /* No need for an explicit memory barrier here as long we would
4849 * need to ensure the ordering of writing to the SPQ element
4850 * and updating of the SPQ producer which involves a memory
4851 * read and we will have to put a full memory barrier there
4852 * (inside bnx2x_sp_post()).
4855 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4856 U64_HI(data_mapping),
4857 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4860 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4861 struct bnx2x_queue_state_params *params)
4863 struct bnx2x_queue_sp_obj *o = params->q_obj;
4864 struct client_init_ramrod_data *rdata =
4865 (struct client_init_ramrod_data *)o->rdata;
4866 dma_addr_t data_mapping = o->rdata_mapping;
4867 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4869 /* Clear the ramrod data */
4870 memset(rdata, 0, sizeof(*rdata));
4872 /* Fill the ramrod data */
4873 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4874 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4876 /* No need for an explicit memory barrier here as long we would
4877 * need to ensure the ordering of writing to the SPQ element
4878 * and updating of the SPQ producer which involves a memory
4879 * read and we will have to put a full memory barrier there
4880 * (inside bnx2x_sp_post()).
4883 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4884 U64_HI(data_mapping),
4885 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4888 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4889 struct bnx2x_queue_state_params *params)
4891 struct bnx2x_queue_sp_obj *o = params->q_obj;
4892 struct tx_queue_init_ramrod_data *rdata =
4893 (struct tx_queue_init_ramrod_data *)o->rdata;
4894 dma_addr_t data_mapping = o->rdata_mapping;
4895 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4896 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4897 ¶ms->params.tx_only;
4898 u8 cid_index = tx_only_params->cid_index;
4900 if (cid_index >= o->max_cos) {
4901 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4902 o->cl_id, cid_index);
4906 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4907 tx_only_params->gen_params.cos,
4908 tx_only_params->gen_params.spcl_id);
4910 /* Clear the ramrod data */
4911 memset(rdata, 0, sizeof(*rdata));
4913 /* Fill the ramrod data */
4914 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4916 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4917 o->cids[cid_index], rdata->general.client_id,
4918 rdata->general.sp_client_id, rdata->general.cos);
4920 /* No need for an explicit memory barrier here as long we would
4921 * need to ensure the ordering of writing to the SPQ element
4922 * and updating of the SPQ producer which involves a memory
4923 * read and we will have to put a full memory barrier there
4924 * (inside bnx2x_sp_post()).
4927 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4928 U64_HI(data_mapping),
4929 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4932 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4933 struct bnx2x_queue_sp_obj *obj,
4934 struct bnx2x_queue_update_params *params,
4935 struct client_update_ramrod_data *data)
4937 /* Client ID of the client to update */
4938 data->client_id = obj->cl_id;
4940 /* Function ID of the client to update */
4941 data->func_id = obj->func_id;
4943 /* Default VLAN value */
4944 data->default_vlan = cpu_to_le16(params->def_vlan);
4946 /* Inner VLAN stripping */
4947 data->inner_vlan_removal_enable_flg =
4948 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4949 data->inner_vlan_removal_change_flg =
4950 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4951 ¶ms->update_flags);
4953 /* Outer VLAN stripping */
4954 data->outer_vlan_removal_enable_flg =
4955 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4956 data->outer_vlan_removal_change_flg =
4957 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4958 ¶ms->update_flags);
4960 /* Drop packets that have source MAC that doesn't belong to this
4963 data->anti_spoofing_enable_flg =
4964 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4965 data->anti_spoofing_change_flg =
4966 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4968 /* Activate/Deactivate */
4969 data->activate_flg =
4970 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4971 data->activate_change_flg =
4972 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4974 /* Enable default VLAN */
4975 data->default_vlan_enable_flg =
4976 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4977 data->default_vlan_change_flg =
4978 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4979 ¶ms->update_flags);
4981 /* silent vlan removal */
4982 data->silent_vlan_change_flg =
4983 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4984 ¶ms->update_flags);
4985 data->silent_vlan_removal_flg =
4986 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4987 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4988 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4991 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4992 struct bnx2x_queue_state_params *params)
4994 struct bnx2x_queue_sp_obj *o = params->q_obj;
4995 struct client_update_ramrod_data *rdata =
4996 (struct client_update_ramrod_data *)o->rdata;
4997 dma_addr_t data_mapping = o->rdata_mapping;
4998 struct bnx2x_queue_update_params *update_params =
4999 ¶ms->params.update;
5000 u8 cid_index = update_params->cid_index;
5002 if (cid_index >= o->max_cos) {
5003 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5004 o->cl_id, cid_index);
5008 /* Clear the ramrod data */
5009 memset(rdata, 0, sizeof(*rdata));
5011 /* Fill the ramrod data */
5012 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
5014 /* No need for an explicit memory barrier here as long we would
5015 * need to ensure the ordering of writing to the SPQ element
5016 * and updating of the SPQ producer which involves a memory
5017 * read and we will have to put a full memory barrier there
5018 * (inside bnx2x_sp_post()).
5021 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5022 o->cids[cid_index], U64_HI(data_mapping),
5023 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5027 * bnx2x_q_send_deactivate - send DEACTIVATE command
5029 * @bp: device handle
5032 * implemented using the UPDATE command.
5034 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5035 struct bnx2x_queue_state_params *params)
5037 struct bnx2x_queue_update_params *update = ¶ms->params.update;
5039 memset(update, 0, sizeof(*update));
5041 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5043 return bnx2x_q_send_update(bp, params);
5047 * bnx2x_q_send_activate - send ACTIVATE command
5049 * @bp: device handle
5052 * implemented using the UPDATE command.
5054 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5055 struct bnx2x_queue_state_params *params)
5057 struct bnx2x_queue_update_params *update = ¶ms->params.update;
5059 memset(update, 0, sizeof(*update));
5061 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5062 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5064 return bnx2x_q_send_update(bp, params);
5067 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5068 struct bnx2x_queue_state_params *params)
5070 /* TODO: Not implemented yet. */
5074 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5075 struct bnx2x_queue_state_params *params)
5077 struct bnx2x_queue_sp_obj *o = params->q_obj;
5079 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5080 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
5081 ETH_CONNECTION_TYPE);
5084 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5085 struct bnx2x_queue_state_params *params)
5087 struct bnx2x_queue_sp_obj *o = params->q_obj;
5088 u8 cid_idx = params->params.cfc_del.cid_index;
5090 if (cid_idx >= o->max_cos) {
5091 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5096 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5097 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
5100 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5101 struct bnx2x_queue_state_params *params)
5103 struct bnx2x_queue_sp_obj *o = params->q_obj;
5104 u8 cid_index = params->params.terminate.cid_index;
5106 if (cid_index >= o->max_cos) {
5107 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5108 o->cl_id, cid_index);
5112 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5113 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
5116 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5117 struct bnx2x_queue_state_params *params)
5119 struct bnx2x_queue_sp_obj *o = params->q_obj;
5121 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5122 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
5123 ETH_CONNECTION_TYPE);
5126 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5127 struct bnx2x_queue_state_params *params)
5129 switch (params->cmd) {
5130 case BNX2X_Q_CMD_INIT:
5131 return bnx2x_q_init(bp, params);
5132 case BNX2X_Q_CMD_SETUP_TX_ONLY:
5133 return bnx2x_q_send_setup_tx_only(bp, params);
5134 case BNX2X_Q_CMD_DEACTIVATE:
5135 return bnx2x_q_send_deactivate(bp, params);
5136 case BNX2X_Q_CMD_ACTIVATE:
5137 return bnx2x_q_send_activate(bp, params);
5138 case BNX2X_Q_CMD_UPDATE:
5139 return bnx2x_q_send_update(bp, params);
5140 case BNX2X_Q_CMD_UPDATE_TPA:
5141 return bnx2x_q_send_update_tpa(bp, params);
5142 case BNX2X_Q_CMD_HALT:
5143 return bnx2x_q_send_halt(bp, params);
5144 case BNX2X_Q_CMD_CFC_DEL:
5145 return bnx2x_q_send_cfc_del(bp, params);
5146 case BNX2X_Q_CMD_TERMINATE:
5147 return bnx2x_q_send_terminate(bp, params);
5148 case BNX2X_Q_CMD_EMPTY:
5149 return bnx2x_q_send_empty(bp, params);
5151 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5156 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5157 struct bnx2x_queue_state_params *params)
5159 switch (params->cmd) {
5160 case BNX2X_Q_CMD_SETUP:
5161 return bnx2x_q_send_setup_e1x(bp, params);
5162 case BNX2X_Q_CMD_INIT:
5163 case BNX2X_Q_CMD_SETUP_TX_ONLY:
5164 case BNX2X_Q_CMD_DEACTIVATE:
5165 case BNX2X_Q_CMD_ACTIVATE:
5166 case BNX2X_Q_CMD_UPDATE:
5167 case BNX2X_Q_CMD_UPDATE_TPA:
5168 case BNX2X_Q_CMD_HALT:
5169 case BNX2X_Q_CMD_CFC_DEL:
5170 case BNX2X_Q_CMD_TERMINATE:
5171 case BNX2X_Q_CMD_EMPTY:
5172 return bnx2x_queue_send_cmd_cmn(bp, params);
5174 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5179 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5180 struct bnx2x_queue_state_params *params)
5182 switch (params->cmd) {
5183 case BNX2X_Q_CMD_SETUP:
5184 return bnx2x_q_send_setup_e2(bp, params);
5185 case BNX2X_Q_CMD_INIT:
5186 case BNX2X_Q_CMD_SETUP_TX_ONLY:
5187 case BNX2X_Q_CMD_DEACTIVATE:
5188 case BNX2X_Q_CMD_ACTIVATE:
5189 case BNX2X_Q_CMD_UPDATE:
5190 case BNX2X_Q_CMD_UPDATE_TPA:
5191 case BNX2X_Q_CMD_HALT:
5192 case BNX2X_Q_CMD_CFC_DEL:
5193 case BNX2X_Q_CMD_TERMINATE:
5194 case BNX2X_Q_CMD_EMPTY:
5195 return bnx2x_queue_send_cmd_cmn(bp, params);
5197 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5203 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5205 * @bp: device handle
5210 * It both checks if the requested command is legal in a current
5211 * state and, if it's legal, sets a `next_state' in the object
5212 * that will be used in the completion flow to set the `state'
5215 * returns 0 if a requested command is a legal transition,
5216 * -EINVAL otherwise.
5218 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5219 struct bnx2x_queue_sp_obj *o,
5220 struct bnx2x_queue_state_params *params)
5222 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5223 enum bnx2x_queue_cmd cmd = params->cmd;
5224 struct bnx2x_queue_update_params *update_params =
5225 ¶ms->params.update;
5226 u8 next_tx_only = o->num_tx_only;
5228 /* Forget all pending for completion commands if a driver only state
5229 * transition has been requested.
5231 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5233 o->next_state = BNX2X_Q_STATE_MAX;
5236 /* Don't allow a next state transition if we are in the middle of
5240 BNX2X_ERR("Blocking transition since pending was %lx\n",
5246 case BNX2X_Q_STATE_RESET:
5247 if (cmd == BNX2X_Q_CMD_INIT)
5248 next_state = BNX2X_Q_STATE_INITIALIZED;
5251 case BNX2X_Q_STATE_INITIALIZED:
5252 if (cmd == BNX2X_Q_CMD_SETUP) {
5253 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5254 ¶ms->params.setup.flags))
5255 next_state = BNX2X_Q_STATE_ACTIVE;
5257 next_state = BNX2X_Q_STATE_INACTIVE;
5261 case BNX2X_Q_STATE_ACTIVE:
5262 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5263 next_state = BNX2X_Q_STATE_INACTIVE;
5265 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5266 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5267 next_state = BNX2X_Q_STATE_ACTIVE;
5269 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5270 next_state = BNX2X_Q_STATE_MULTI_COS;
5274 else if (cmd == BNX2X_Q_CMD_HALT)
5275 next_state = BNX2X_Q_STATE_STOPPED;
5277 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5278 /* If "active" state change is requested, update the
5279 * state accordingly.
5281 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5282 &update_params->update_flags) &&
5283 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5284 &update_params->update_flags))
5285 next_state = BNX2X_Q_STATE_INACTIVE;
5287 next_state = BNX2X_Q_STATE_ACTIVE;
5291 case BNX2X_Q_STATE_MULTI_COS:
5292 if (cmd == BNX2X_Q_CMD_TERMINATE)
5293 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5295 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5296 next_state = BNX2X_Q_STATE_MULTI_COS;
5297 next_tx_only = o->num_tx_only + 1;
5300 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5301 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5302 next_state = BNX2X_Q_STATE_MULTI_COS;
5304 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5305 /* If "active" state change is requested, update the
5306 * state accordingly.
5308 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5309 &update_params->update_flags) &&
5310 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5311 &update_params->update_flags))
5312 next_state = BNX2X_Q_STATE_INACTIVE;
5314 next_state = BNX2X_Q_STATE_MULTI_COS;
5318 case BNX2X_Q_STATE_MCOS_TERMINATED:
5319 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5320 next_tx_only = o->num_tx_only - 1;
5321 if (next_tx_only == 0)
5322 next_state = BNX2X_Q_STATE_ACTIVE;
5324 next_state = BNX2X_Q_STATE_MULTI_COS;
5328 case BNX2X_Q_STATE_INACTIVE:
5329 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5330 next_state = BNX2X_Q_STATE_ACTIVE;
5332 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5333 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5334 next_state = BNX2X_Q_STATE_INACTIVE;
5336 else if (cmd == BNX2X_Q_CMD_HALT)
5337 next_state = BNX2X_Q_STATE_STOPPED;
5339 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5340 /* If "active" state change is requested, update the
5341 * state accordingly.
5343 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5344 &update_params->update_flags) &&
5345 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5346 &update_params->update_flags)){
5347 if (o->num_tx_only == 0)
5348 next_state = BNX2X_Q_STATE_ACTIVE;
5349 else /* tx only queues exist for this queue */
5350 next_state = BNX2X_Q_STATE_MULTI_COS;
5352 next_state = BNX2X_Q_STATE_INACTIVE;
5356 case BNX2X_Q_STATE_STOPPED:
5357 if (cmd == BNX2X_Q_CMD_TERMINATE)
5358 next_state = BNX2X_Q_STATE_TERMINATED;
5361 case BNX2X_Q_STATE_TERMINATED:
5362 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5363 next_state = BNX2X_Q_STATE_RESET;
5367 BNX2X_ERR("Illegal state: %d\n", state);
5370 /* Transition is assured */
5371 if (next_state != BNX2X_Q_STATE_MAX) {
5372 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5373 state, cmd, next_state);
5374 o->next_state = next_state;
5375 o->next_tx_only = next_tx_only;
5379 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5384 void bnx2x_init_queue_obj(struct bnx2x *bp,
5385 struct bnx2x_queue_sp_obj *obj,
5386 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5388 dma_addr_t rdata_mapping, unsigned long type)
5390 memset(obj, 0, sizeof(*obj));
5392 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5393 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5395 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5396 obj->max_cos = cid_cnt;
5398 obj->func_id = func_id;
5400 obj->rdata_mapping = rdata_mapping;
5402 obj->next_state = BNX2X_Q_STATE_MAX;
5404 if (CHIP_IS_E1x(bp))
5405 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5407 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5409 obj->check_transition = bnx2x_queue_chk_transition;
5411 obj->complete_cmd = bnx2x_queue_comp_cmd;
5412 obj->wait_comp = bnx2x_queue_wait_comp;
5413 obj->set_pending = bnx2x_queue_set_pending;
5416 /* return a queue object's logical state*/
5417 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5418 struct bnx2x_queue_sp_obj *obj)
5420 switch (obj->state) {
5421 case BNX2X_Q_STATE_ACTIVE:
5422 case BNX2X_Q_STATE_MULTI_COS:
5423 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5424 case BNX2X_Q_STATE_RESET:
5425 case BNX2X_Q_STATE_INITIALIZED:
5426 case BNX2X_Q_STATE_MCOS_TERMINATED:
5427 case BNX2X_Q_STATE_INACTIVE:
5428 case BNX2X_Q_STATE_STOPPED:
5429 case BNX2X_Q_STATE_TERMINATED:
5430 case BNX2X_Q_STATE_FLRED:
5431 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5437 /********************** Function state object *********************************/
5438 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5439 struct bnx2x_func_sp_obj *o)
5441 /* in the middle of transaction - return INVALID state */
5443 return BNX2X_F_STATE_MAX;
5445 /* unsure the order of reading of o->pending and o->state
5446 * o->pending should be read first
5453 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5454 struct bnx2x_func_sp_obj *o,
5455 enum bnx2x_func_cmd cmd)
5457 return bnx2x_state_wait(bp, cmd, &o->pending);
5461 * bnx2x_func_state_change_comp - complete the state machine transition
5463 * @bp: device handle
5467 * Called on state change transition. Completes the state
5468 * machine transition only - no HW interaction.
5470 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5471 struct bnx2x_func_sp_obj *o,
5472 enum bnx2x_func_cmd cmd)
5474 unsigned long cur_pending = o->pending;
5476 if (!test_and_clear_bit(cmd, &cur_pending)) {
5477 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5478 cmd, BP_FUNC(bp), o->state,
5479 cur_pending, o->next_state);
5484 "Completing command %d for func %d, setting state to %d\n",
5485 cmd, BP_FUNC(bp), o->next_state);
5487 o->state = o->next_state;
5488 o->next_state = BNX2X_F_STATE_MAX;
5490 /* It's important that o->state and o->next_state are
5491 * updated before o->pending.
5495 clear_bit(cmd, &o->pending);
5496 smp_mb__after_clear_bit();
5502 * bnx2x_func_comp_cmd - complete the state change command
5504 * @bp: device handle
5508 * Checks that the arrived completion is expected.
5510 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5511 struct bnx2x_func_sp_obj *o,
5512 enum bnx2x_func_cmd cmd)
5514 /* Complete the state machine part first, check if it's a
5517 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5522 * bnx2x_func_chk_transition - perform function state machine transition
5524 * @bp: device handle
5528 * It both checks if the requested command is legal in a current
5529 * state and, if it's legal, sets a `next_state' in the object
5530 * that will be used in the completion flow to set the `state'
5533 * returns 0 if a requested command is a legal transition,
5534 * -EINVAL otherwise.
5536 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5537 struct bnx2x_func_sp_obj *o,
5538 struct bnx2x_func_state_params *params)
5540 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5541 enum bnx2x_func_cmd cmd = params->cmd;
5543 /* Forget all pending for completion commands if a driver only state
5544 * transition has been requested.
5546 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5548 o->next_state = BNX2X_F_STATE_MAX;
5551 /* Don't allow a next state transition if we are in the middle of
5558 case BNX2X_F_STATE_RESET:
5559 if (cmd == BNX2X_F_CMD_HW_INIT)
5560 next_state = BNX2X_F_STATE_INITIALIZED;
5563 case BNX2X_F_STATE_INITIALIZED:
5564 if (cmd == BNX2X_F_CMD_START)
5565 next_state = BNX2X_F_STATE_STARTED;
5567 else if (cmd == BNX2X_F_CMD_HW_RESET)
5568 next_state = BNX2X_F_STATE_RESET;
5571 case BNX2X_F_STATE_STARTED:
5572 if (cmd == BNX2X_F_CMD_STOP)
5573 next_state = BNX2X_F_STATE_INITIALIZED;
5574 /* afex ramrods can be sent only in started mode, and only
5575 * if not pending for function_stop ramrod completion
5576 * for these events - next state remained STARTED.
5578 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5579 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5580 next_state = BNX2X_F_STATE_STARTED;
5582 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5583 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5584 next_state = BNX2X_F_STATE_STARTED;
5586 /* Switch_update ramrod can be sent in either started or
5587 * tx_stopped state, and it doesn't change the state.
5589 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5590 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5591 next_state = BNX2X_F_STATE_STARTED;
5593 else if (cmd == BNX2X_F_CMD_TX_STOP)
5594 next_state = BNX2X_F_STATE_TX_STOPPED;
5597 case BNX2X_F_STATE_TX_STOPPED:
5598 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5599 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5600 next_state = BNX2X_F_STATE_TX_STOPPED;
5602 else if (cmd == BNX2X_F_CMD_TX_START)
5603 next_state = BNX2X_F_STATE_STARTED;
5607 BNX2X_ERR("Unknown state: %d\n", state);
5610 /* Transition is assured */
5611 if (next_state != BNX2X_F_STATE_MAX) {
5612 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5613 state, cmd, next_state);
5614 o->next_state = next_state;
5618 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5625 * bnx2x_func_init_func - performs HW init at function stage
5627 * @bp: device handle
5630 * Init HW when the current phase is
5631 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5634 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5635 const struct bnx2x_func_sp_drv_ops *drv)
5637 return drv->init_hw_func(bp);
5641 * bnx2x_func_init_port - performs HW init at port stage
5643 * @bp: device handle
5646 * Init HW when the current phase is
5647 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5648 * FUNCTION-only HW blocks.
5651 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5652 const struct bnx2x_func_sp_drv_ops *drv)
5654 int rc = drv->init_hw_port(bp);
5658 return bnx2x_func_init_func(bp, drv);
5662 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5664 * @bp: device handle
5667 * Init HW when the current phase is
5668 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5669 * PORT-only and FUNCTION-only HW blocks.
5671 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5672 const struct bnx2x_func_sp_drv_ops *drv)
5674 int rc = drv->init_hw_cmn_chip(bp);
5678 return bnx2x_func_init_port(bp, drv);
5682 * bnx2x_func_init_cmn - performs HW init at common stage
5684 * @bp: device handle
5687 * Init HW when the current phase is
5688 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5689 * PORT-only and FUNCTION-only HW blocks.
5691 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5692 const struct bnx2x_func_sp_drv_ops *drv)
5694 int rc = drv->init_hw_cmn(bp);
5698 return bnx2x_func_init_port(bp, drv);
5701 static int bnx2x_func_hw_init(struct bnx2x *bp,
5702 struct bnx2x_func_state_params *params)
5704 u32 load_code = params->params.hw_init.load_phase;
5705 struct bnx2x_func_sp_obj *o = params->f_obj;
5706 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5709 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5710 BP_ABS_FUNC(bp), load_code);
5712 /* Prepare buffers for unzipping the FW */
5713 rc = drv->gunzip_init(bp);
5718 rc = drv->init_fw(bp);
5720 BNX2X_ERR("Error loading firmware\n");
5724 /* Handle the beginning of COMMON_XXX pases separately... */
5725 switch (load_code) {
5726 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5727 rc = bnx2x_func_init_cmn_chip(bp, drv);
5732 case FW_MSG_CODE_DRV_LOAD_COMMON:
5733 rc = bnx2x_func_init_cmn(bp, drv);
5738 case FW_MSG_CODE_DRV_LOAD_PORT:
5739 rc = bnx2x_func_init_port(bp, drv);
5744 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5745 rc = bnx2x_func_init_func(bp, drv);
5751 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5756 drv->gunzip_end(bp);
5758 /* In case of success, complete the command immediately: no ramrods
5762 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5768 * bnx2x_func_reset_func - reset HW at function stage
5770 * @bp: device handle
5773 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5774 * FUNCTION-only HW blocks.
5776 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5777 const struct bnx2x_func_sp_drv_ops *drv)
5779 drv->reset_hw_func(bp);
5783 * bnx2x_func_reset_port - reset HW at port stage
5785 * @bp: device handle
5788 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5789 * FUNCTION-only and PORT-only HW blocks.
5793 * It's important to call reset_port before reset_func() as the last thing
5794 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5795 * makes impossible any DMAE transactions.
5797 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5798 const struct bnx2x_func_sp_drv_ops *drv)
5800 drv->reset_hw_port(bp);
5801 bnx2x_func_reset_func(bp, drv);
5805 * bnx2x_func_reset_cmn - reset HW at common stage
5807 * @bp: device handle
5810 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5811 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5812 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5814 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5815 const struct bnx2x_func_sp_drv_ops *drv)
5817 bnx2x_func_reset_port(bp, drv);
5818 drv->reset_hw_cmn(bp);
5821 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5822 struct bnx2x_func_state_params *params)
5824 u32 reset_phase = params->params.hw_reset.reset_phase;
5825 struct bnx2x_func_sp_obj *o = params->f_obj;
5826 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5828 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5831 switch (reset_phase) {
5832 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5833 bnx2x_func_reset_cmn(bp, drv);
5835 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5836 bnx2x_func_reset_port(bp, drv);
5838 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5839 bnx2x_func_reset_func(bp, drv);
5842 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5847 /* Complete the command immediately: no ramrods have been sent. */
5848 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5853 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5854 struct bnx2x_func_state_params *params)
5856 struct bnx2x_func_sp_obj *o = params->f_obj;
5857 struct function_start_data *rdata =
5858 (struct function_start_data *)o->rdata;
5859 dma_addr_t data_mapping = o->rdata_mapping;
5860 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5862 memset(rdata, 0, sizeof(*rdata));
5864 /* Fill the ramrod data with provided parameters */
5865 rdata->function_mode = (u8)start_params->mf_mode;
5866 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5867 rdata->path_id = BP_PATH(bp);
5868 rdata->network_cos_mode = start_params->network_cos_mode;
5869 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5870 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5872 /* No need for an explicit memory barrier here as long we would
5873 * need to ensure the ordering of writing to the SPQ element
5874 * and updating of the SPQ producer which involves a memory
5875 * read and we will have to put a full memory barrier there
5876 * (inside bnx2x_sp_post()).
5879 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5880 U64_HI(data_mapping),
5881 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5884 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5885 struct bnx2x_func_state_params *params)
5887 struct bnx2x_func_sp_obj *o = params->f_obj;
5888 struct function_update_data *rdata =
5889 (struct function_update_data *)o->rdata;
5890 dma_addr_t data_mapping = o->rdata_mapping;
5891 struct bnx2x_func_switch_update_params *switch_update_params =
5892 ¶ms->params.switch_update;
5894 memset(rdata, 0, sizeof(*rdata));
5896 /* Fill the ramrod data with provided parameters */
5897 rdata->tx_switch_suspend_change_flg = 1;
5898 rdata->tx_switch_suspend = switch_update_params->suspend;
5899 rdata->echo = SWITCH_UPDATE;
5901 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5902 U64_HI(data_mapping),
5903 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5906 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5907 struct bnx2x_func_state_params *params)
5909 struct bnx2x_func_sp_obj *o = params->f_obj;
5910 struct function_update_data *rdata =
5911 (struct function_update_data *)o->afex_rdata;
5912 dma_addr_t data_mapping = o->afex_rdata_mapping;
5913 struct bnx2x_func_afex_update_params *afex_update_params =
5914 ¶ms->params.afex_update;
5916 memset(rdata, 0, sizeof(*rdata));
5918 /* Fill the ramrod data with provided parameters */
5919 rdata->vif_id_change_flg = 1;
5920 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5921 rdata->afex_default_vlan_change_flg = 1;
5922 rdata->afex_default_vlan =
5923 cpu_to_le16(afex_update_params->afex_default_vlan);
5924 rdata->allowed_priorities_change_flg = 1;
5925 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5926 rdata->echo = AFEX_UPDATE;
5928 /* No need for an explicit memory barrier here as long we would
5929 * need to ensure the ordering of writing to the SPQ element
5930 * and updating of the SPQ producer which involves a memory
5931 * read and we will have to put a full memory barrier there
5932 * (inside bnx2x_sp_post()).
5935 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5937 rdata->afex_default_vlan, rdata->allowed_priorities);
5939 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5940 U64_HI(data_mapping),
5941 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5945 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5946 struct bnx2x_func_state_params *params)
5948 struct bnx2x_func_sp_obj *o = params->f_obj;
5949 struct afex_vif_list_ramrod_data *rdata =
5950 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5951 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5952 ¶ms->params.afex_viflists;
5953 u64 *p_rdata = (u64 *)rdata;
5955 memset(rdata, 0, sizeof(*rdata));
5957 /* Fill the ramrod data with provided parameters */
5958 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5959 rdata->func_bit_map = afex_vif_params->func_bit_map;
5960 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5961 rdata->func_to_clear = afex_vif_params->func_to_clear;
5963 /* send in echo type of sub command */
5964 rdata->echo = afex_vif_params->afex_vif_list_command;
5966 /* No need for an explicit memory barrier here as long we would
5967 * need to ensure the ordering of writing to the SPQ element
5968 * and updating of the SPQ producer which involves a memory
5969 * read and we will have to put a full memory barrier there
5970 * (inside bnx2x_sp_post()).
5973 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5974 rdata->afex_vif_list_command, rdata->vif_list_index,
5975 rdata->func_bit_map, rdata->func_to_clear);
5977 /* this ramrod sends data directly and not through DMA mapping */
5978 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5979 U64_HI(*p_rdata), U64_LO(*p_rdata),
5980 NONE_CONNECTION_TYPE);
5983 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5984 struct bnx2x_func_state_params *params)
5986 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5987 NONE_CONNECTION_TYPE);
5990 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5991 struct bnx2x_func_state_params *params)
5993 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5994 NONE_CONNECTION_TYPE);
5996 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5997 struct bnx2x_func_state_params *params)
5999 struct bnx2x_func_sp_obj *o = params->f_obj;
6000 struct flow_control_configuration *rdata =
6001 (struct flow_control_configuration *)o->rdata;
6002 dma_addr_t data_mapping = o->rdata_mapping;
6003 struct bnx2x_func_tx_start_params *tx_start_params =
6004 ¶ms->params.tx_start;
6007 memset(rdata, 0, sizeof(*rdata));
6009 rdata->dcb_enabled = tx_start_params->dcb_enabled;
6010 rdata->dcb_version = tx_start_params->dcb_version;
6011 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6013 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6014 rdata->traffic_type_to_priority_cos[i] =
6015 tx_start_params->traffic_type_to_priority_cos[i];
6017 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6018 U64_HI(data_mapping),
6019 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6022 static int bnx2x_func_send_cmd(struct bnx2x *bp,
6023 struct bnx2x_func_state_params *params)
6025 switch (params->cmd) {
6026 case BNX2X_F_CMD_HW_INIT:
6027 return bnx2x_func_hw_init(bp, params);
6028 case BNX2X_F_CMD_START:
6029 return bnx2x_func_send_start(bp, params);
6030 case BNX2X_F_CMD_STOP:
6031 return bnx2x_func_send_stop(bp, params);
6032 case BNX2X_F_CMD_HW_RESET:
6033 return bnx2x_func_hw_reset(bp, params);
6034 case BNX2X_F_CMD_AFEX_UPDATE:
6035 return bnx2x_func_send_afex_update(bp, params);
6036 case BNX2X_F_CMD_AFEX_VIFLISTS:
6037 return bnx2x_func_send_afex_viflists(bp, params);
6038 case BNX2X_F_CMD_TX_STOP:
6039 return bnx2x_func_send_tx_stop(bp, params);
6040 case BNX2X_F_CMD_TX_START:
6041 return bnx2x_func_send_tx_start(bp, params);
6042 case BNX2X_F_CMD_SWITCH_UPDATE:
6043 return bnx2x_func_send_switch_update(bp, params);
6045 BNX2X_ERR("Unknown command: %d\n", params->cmd);
6050 void bnx2x_init_func_obj(struct bnx2x *bp,
6051 struct bnx2x_func_sp_obj *obj,
6052 void *rdata, dma_addr_t rdata_mapping,
6053 void *afex_rdata, dma_addr_t afex_rdata_mapping,
6054 struct bnx2x_func_sp_drv_ops *drv_iface)
6056 memset(obj, 0, sizeof(*obj));
6058 mutex_init(&obj->one_pending_mutex);
6061 obj->rdata_mapping = rdata_mapping;
6062 obj->afex_rdata = afex_rdata;
6063 obj->afex_rdata_mapping = afex_rdata_mapping;
6064 obj->send_cmd = bnx2x_func_send_cmd;
6065 obj->check_transition = bnx2x_func_chk_transition;
6066 obj->complete_cmd = bnx2x_func_comp_cmd;
6067 obj->wait_comp = bnx2x_func_wait_comp;
6069 obj->drv = drv_iface;
6073 * bnx2x_func_state_change - perform Function state change transition
6075 * @bp: device handle
6076 * @params: parameters to perform the transaction
6078 * returns 0 in case of successfully completed transition,
6079 * negative error code in case of failure, positive
6080 * (EBUSY) value if there is a completion to that is
6081 * still pending (possible only if RAMROD_COMP_WAIT is
6082 * not set in params->ramrod_flags for asynchronous
6085 int bnx2x_func_state_change(struct bnx2x *bp,
6086 struct bnx2x_func_state_params *params)
6088 struct bnx2x_func_sp_obj *o = params->f_obj;
6090 enum bnx2x_func_cmd cmd = params->cmd;
6091 unsigned long *pending = &o->pending;
6093 mutex_lock(&o->one_pending_mutex);
6095 /* Check that the requested transition is legal */
6096 rc = o->check_transition(bp, o, params);
6097 if ((rc == -EBUSY) &&
6098 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
6099 while ((rc == -EBUSY) && (--cnt > 0)) {
6100 mutex_unlock(&o->one_pending_mutex);
6102 mutex_lock(&o->one_pending_mutex);
6103 rc = o->check_transition(bp, o, params);
6106 mutex_unlock(&o->one_pending_mutex);
6107 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6111 mutex_unlock(&o->one_pending_mutex);
6115 /* Set "pending" bit */
6116 set_bit(cmd, pending);
6118 /* Don't send a command if only driver cleanup was requested */
6119 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
6120 bnx2x_func_state_change_comp(bp, o, cmd);
6121 mutex_unlock(&o->one_pending_mutex);
6124 rc = o->send_cmd(bp, params);
6126 mutex_unlock(&o->one_pending_mutex);
6129 o->next_state = BNX2X_F_STATE_MAX;
6130 clear_bit(cmd, pending);
6131 smp_mb__after_clear_bit();
6135 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
6136 rc = o->wait_comp(bp, o, cmd);
6144 return !!test_bit(cmd, pending);