From 350a714960eb8a980c913c9be5a96bb18b2fe9da Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Mon, 19 Sep 2016 03:58:09 -0400 Subject: [PATCH] bnxt_en: Fixed the VF link status after a link state change The VF link state can be changed via the 'ip link set' cmd. Currently, the new link state does not take effect immediately. The fix is for the PF to send a link change async event to the designated VF after a VF link state change. This async event will trigger the VF to update the link status. Signed-off-by: Eddie Wai Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 50d2007a2640..8be718508600 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -19,6 +19,45 @@ #include "bnxt_ethtool.h" #ifdef CONFIG_BNXT_SRIOV +static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, + struct bnxt_vf_info *vf, u16 event_id) +{ + struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_async_event_cmpl *async_cmpl; + int rc = 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + if (vf) + req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + else + /* broadcast this async event to all VFs */ + req.encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + async_cmpl->type = + cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); + async_cmpl->event_id = cpu_to_le16(event_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", + rc); + goto fwd_async_event_cmpl_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", + resp->error_code); + rc = -1; + } + +fwd_async_event_cmpl_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) { if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { @@ -243,8 +282,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) rc = -EINVAL; break; } - /* CHIMP TODO: send msg to VF to update new link state */ - + if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) + rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); return rc; } @@ -525,46 +565,6 @@ err_out1: return rc; } -static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, - struct bnxt_vf_info *vf, - u16 event_id) -{ - int rc = 0; - struct hwrm_fwd_async_event_cmpl_input req = {0}; - struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_async_event_cmpl *async_cmpl; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); - if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); - else - /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; - async_cmpl->type = - cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); - async_cmpl->event_id = cpu_to_le16(event_id); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { - netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", - rc); - goto fwd_async_event_cmpl_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", - resp->error_code); - rc = -1; - } - -fwd_async_event_cmpl_exit: - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - void bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); -- 2.39.5