const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
(vlan == f->vlan))
return f;
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)))
return f;
}
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
/* Only -1 for all the filters denotes not in vlan mode
* so we have to go through all the list in order to make sure
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->vlan >= 0 || vsi->info.pvid)
return true;
}
*
* Returns ptr to the filter object or NULL when no memory available.
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
- INIT_LIST_HEAD(&f->list);
- list_add_tail(&f->list, &vsi->mac_filter_list);
+ INIT_HLIST_NODE(&f->hlist);
+
+ key = i40e_addr_to_hkey(macaddr);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
* the exact filter you will remove already, such as via i40e_find_filter or
* i40e_find_mac.
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
* ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
/* this one never got added by the FW. Just remove it,
* no need to sync anything.
*/
- list_del(&f->list);
+ hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
* @macaddr: the MAC address
* @vlan: the VLAN
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
* ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
const u8 *macaddr)
{
struct i40e_mac_filter *f, *add = NULL;
+ struct hlist_node *h;
+ int bkt;
if (vsi->info.pvid)
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
add = i40e_add_filter(vsi, macaddr, f->vlan);
**/
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
bool found = false;
+ int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
- "Missing mac_filter_list_lock\n");
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
+ "Missing mac_filter_hash_lock\n");
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
found = true;
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
i40e_put_mac_in_vlan(vsi, addr->sa_data);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
__dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
__dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
* MAC filter entries from list were slated to be removed from device.
**/
static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
- struct list_head *from)
+ struct hlist_head *from)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(f, h, from, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
- list_for_each_entry_safe(f, ftmp, from, list) {
/* Move the element back into MAC filter list*/
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
}
/* Everything's good, mark all filters active. */
for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_ACTIVE;
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
} else if (aq_err == I40E_AQ_RC_ENOSPC) {
/* Device ran out of filter space. Check the return value
add_head->state = I40E_FILTER_ACTIVE;
retval++;
}
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
} else {
/* Some other horrible thing happened, fail all filters */
retval = 0;
for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_FAILED;
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
}
return retval;
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
- struct list_head tmp_add_list, tmp_del_list;
+ struct i40e_mac_filter *f, *add_head = NULL;
+ struct hlist_head tmp_add_list, tmp_del_list;
struct i40e_hw *hw = &vsi->back->hw;
bool promisc_changed = false;
char vsi_name[16] = "PF";
int filter_list_len = 0;
u32 changed_flags = 0;
i40e_status aq_ret = 0;
+ struct hlist_node *h;
int retval = 0;
struct i40e_pf *pf;
int num_add = 0;
u16 cmd_flags;
int list_size;
int fcnt;
+ int bkt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_add_list);
- INIT_LIST_HEAD(&tmp_del_list);
+ INIT_HLIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_del_list);
if (vsi->type == I40E_VSI_SRIOV)
snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* Create a list of filters to delete. */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE) {
/* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
vsi->active_filters--;
}
if (f->state == I40E_FILTER_NEW) {
- /* Move the element into temporary add_list */
- list_move_tail(&f->list, &tmp_add_list);
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_add_list);
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
/* Now process 'del_list' outside the lock */
- if (!list_empty(&tmp_del_list)) {
+ if (!hlist_empty(&tmp_del_list)) {
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
list_size = filter_list_len *
del_list = kzalloc(list_size, GFP_ATOMIC);
if (!del_list) {
/* Undo VSI's MAC filter entry element updates */
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
retval = -ENOMEM;
goto out;
}
- list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
+ hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
cmd_flags = 0;
/* add to delete list */
/* Release memory for MAC filter entries which were
* synced up with HW.
*/
- list_del(&f->list);
+ hlist_del(&f->hlist);
kfree(f);
}
del_list = NULL;
}
- if (!list_empty(&tmp_add_list)) {
+ if (!hlist_empty(&tmp_add_list)) {
/* Do all the adds now. */
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data);
goto out;
}
num_add = 0;
- list_for_each_entry(f, &tmp_add_list, list) {
+ hlist_for_each_entry(f, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
f->state = I40E_FILTER_FAILED;
/* Now move all of the filters from the temp add list back to
* the VSI's list.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
add_list = NULL;
}
/* See if we have any failed filters. We can't drop out of
* promiscuous until these have all been deleted.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->state == I40E_FILTER_FAILED)
failed_count++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (!failed_count) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
**/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *ftmp, *add_f, *del_f;
+ struct i40e_mac_filter *f, *add_f, *del_f;
+ struct hlist_node *h;
+ int bkt;
/* Locked once because all functions invoked below iterates list*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (vsi->netdev) {
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
add_f = i40e_add_filter(vsi, f->macaddr, vid);
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
del_f = i40e_find_filter(vsi, f->macaddr,
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *ftmp, *add_f;
+ struct i40e_mac_filter *f, *add_f;
+ struct hlist_node *h;
int filter_count = 0;
+ int bkt;
/* Locked once because all functions invoked below iterates list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (vsi->netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid);
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->vlan == vid)
__i40e_del_filter(vsi, f);
}
* be replaced with -1. This signifies that we should from now
* on accept any traffic (with any tag present, or untagged)
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (vsi->netdev) {
if (f->vlan &&
ether_addr_equal(netdev->dev_addr, f->macaddr))
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
if (!filter_count) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (!f->vlan)
__i40e_del_filter(vsi, f);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY);
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
- INIT_LIST_HEAD(&vsi->mac_filter_list);
+ hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
/* Initialize VSI lock */
- spin_lock_init(&vsi->mac_filter_list_lock);
+ spin_lock_init(&vsi->mac_filter_hash_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
ether_addr_copy(netdev->dev_addr, mac_addr);
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
vsi->active_filters = 0;
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
**/
int i40e_vsi_release(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
struct i40e_veb *veb = NULL;
struct i40e_pf *pf;
u16 uplink_seid;
- int i, n;
+ int i, n, bkt;
pf = vsi->back;
i40e_vsi_disable_irq(vsi);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* clear the sync flag on all filters */
if (vsi->netdev) {
}
/* make sure any remaining filters are marked for deletion */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
__i40e_del_filter(vsi, f);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_sync_vsi_filters(vsi);
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ?
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
(u32)hena);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0;
+ int num_vlans = 0, bkt;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++;
}
struct i40e_vsi *vsi;
bool alluni = false;
int aq_err = 0;
+ int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret =
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
}
vsi = pf->vsi[vf->lan_vsi_idx];
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac--;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ int bkt;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
}
/* Lock once because below invoked function add/del_filter requires
- * mac_filter_list_lock to be held
+ * mac_filter_hash_lock to be held
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list)
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
i40e_del_filter(vsi, f->macaddr, f->vlan);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
/* duplicate request, so just return success */
goto error_pvid;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,