L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ S: Maintained
+ F: arch/arm/mach-keystone/
+
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
- M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com>
-M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org
F: drivers/media/tuners/mxl5007t.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
- M: Andrew Gallatin <gallatin@myri.com>
+ M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
- W: http://www.myri.com/scs/download-Myri10GE.html
+ W: https://www.myricom.com/support/downloads/myri10ge.html
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
S: Maintained
F: Documentation/networking/sctp.txt
F: include/linux/sctp.h
+F: include/uapi/linux/sctp.h
F: include/net/sctp/
F: net/sctp/
S: Maintained
F: sound/usb/midi.*
+ USB NETWORKING DRIVERS
+ L: linux-usb@vger.kernel.org
+ S: Odd Fixes
+ F: drivers/net/usb/
+
USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+ #define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
};
/* Tx queues may be less or equal to Rx queues */
BNX2X_PCI_LINK_SPEED_5000 = 5000,
BNX2X_PCI_LINK_SPEED_8000 = 8000
};
+
+ void bnx2x_set_local_cmng(struct bnx2x *bp);
#endif /* bnx2x.h */
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
}
}
+ /* init cmng mode in HW according to local configuration */
+ void bnx2x_set_local_cmng(struct bnx2x *bp)
+ {
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+ }
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
}
}
- if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP,
- "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
- bnx2x_vfpf_storm_rx_mode(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
+ } else {
if (IS_PF(bp)) {
/* some multicasts */
if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
} else {
/* configuring mcast to a vf involves sleeping (when we
- * wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ * wait for the pf's response).
*/
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
} else {
- /* configuring rx mode to storms in a vf involves sleeping (when
- * we wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
}
}
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
int cnic_cnt, bool is_vf)
{
- int pos, index;
+ int index;
u16 control = 0;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos) {
+ if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
}
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
bnx2x_dcbnl_update_applist(bp, true);
#endif
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
/* Close the interface - either directly or implicitly */
if (remove_netdev) {
unregister_netdev(dev);
} else {
rtnl_lock();
- if (netif_running(dev))
- bnx2x_close(dev);
+ dev_close(dev);
rtnl_unlock();
}
* and a valid credit counter
*/
if (!vfop->rc && args->credit) {
- int cnt = 0;
struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
+
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head)
cnt++;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
atomic_set(args->credit, cnt);
}
}
alloc_mem_err:
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 132
+#define TG3_MIN_NUM 133
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 21, 2013"
+#define DRV_MODULE_RELDATE "Jul 29, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
static void tg3_power_down(struct tg3 *tp)
{
- tg3_power_down_prepare(tp);
-
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot);
}
/* tp->lock must be held */
static void tg3_refclk_write(struct tg3 *tp, u64 newval)
{
- tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
- tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
}
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static int tg3_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ u32 clock_ctl;
+ int rval = 0;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.index != 0)
+ return -EINVAL;
+
+ tg3_full_lock(tp, 0);
+ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+ if (on) {
+ u64 nsec;
+
+ nsec = rq->perout.start.sec * 1000000000ULL +
+ rq->perout.start.nsec;
+
+ if (rq->perout.period.sec || rq->perout.period.nsec) {
+ netdev_warn(tp->dev,
+ "Device supports only a one-shot timesync output, period must be 0\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ if (nsec & (1ULL << 63)) {
+ netdev_warn(tp->dev,
+ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+ tw32(TG3_EAV_WATCHDOG0_MSB,
+ TG3_EAV_WATCHDOG0_EN |
+ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+ tw32(TG3_EAV_REF_CLCK_CTL,
+ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+ } else {
+ tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+ }
+
+err_out:
+ tg3_full_unlock(tp);
+ return rval;
+
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
.max_adj = 250000000,
.n_alarm = 0,
.n_ext_ts = 0,
- .n_per_out = 0,
+ .n_per_out = 1,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 |
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
tg3_carrier_off(tp);
if (tg3_flag(tp, NO_NVRAM))
return -EINVAL;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
offset = eeprom->offset;
len = eeprom->len;
eeprom->len = 0;
u8 *buf;
__be32 start, end;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
tg3_phy_start(tp);
}
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
}
*/
if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
- tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ tp->msi_cap = tp->pdev->msi_cap;
} else {
struct pci_dev *bridge = NULL;
tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
- if (tg3_flag(tp, 5717_PLUS)) {
- /* Resume a low-power mode */
- tg3_frob_aux_power(tp, false);
- }
-
tg3_timer_init(tp);
tg3_carrier_off(tp);
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ tg3_power_down(tp);
+
+ rtnl_unlock();
+}
+
/**
* tg3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
done:
if (state == pci_channel_io_perm_failure) {
- tg3_napi_enable(tp);
- dev_close(netdev);
+ if (netdev) {
+ tg3_napi_enable(tp);
+ dev_close(netdev);
+ }
err = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_disable_device(pdev);
rtnl_lock();
if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
goto done;
}
pci_restore_state(pdev);
pci_save_state(pdev);
- if (!netif_running(netdev)) {
+ if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
goto done;
}
rc = PCI_ERS_RESULT_RECOVERED;
done:
- if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
tg3_napi_enable(tp);
dev_close(netdev);
}
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
.driver.pm = &tg3_pm_ops,
+ .shutdown = tg3_shutdown,
};
module_pci_driver(tg3_driver);
(struct be_async_event_grp5_pvid_state *)evt);
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
+ event_type);
break;
}
}
adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+ event_type);
break;
}
}
return len_encoded;
}
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
return status;
}
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
+
+ /* Hack to retrieve VF's pmac-id on BE3 */
+ if (BE3_chip(adapter) && !be_physfn(adapter))
+ adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
}
err:
return 1000;
case PHY_LINK_SPEED_10GBPS:
return 10000;
+ case PHY_LINK_SPEED_20GBPS:
+ return 20000;
+ case PHY_LINK_SPEED_25GBPS:
+ return 25000;
+ case PHY_LINK_SPEED_40GBPS:
+ return 40000;
}
return 0;
}
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
- int status;
+ int status = 0;
spin_lock_bh(&adapter->mcc_lock);
le16_to_cpu(resp_phy_info->fixed_speeds_supported);
adapter->phy.misc_params =
le32_to_cpu(resp_phy_info->misc_params);
+
+ if (BE2_chip(adapter)) {
+ adapter->phy.fixed_speeds_supported =
+ BE_SUPPORTED_SPEED_10GBPS |
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
}
pci_free_consistent(adapter->pdev, cmd.size,
cmd.va, cmd.dma);
return status;
}
-/* Uses synchronous MCCQ */
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+ if (lancer_chip(adapter))
+ req->privileges_lancer = cpu_to_le32(privileges);
+ else
+ req->privileges = cpu_to_le32(privileges);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ * If pmac_id is returned, pmac_id_valid is returned as true
+ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
- req->perm_override = 1;
+ if (*pmac_id_valid) {
+ req->mac_id = cpu_to_le32(*pmac_id);
+ req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->perm_override = 0;
+ } else {
+ req->perm_override = 1;
+ }
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_mac_list *resp =
get_mac_list_cmd.va;
+
+ if (*pmac_id_valid) {
+ memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+ ETH_ALEN);
+ goto out;
+ }
+
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
* or one or more true or pseudo permanant mac addresses.
* is 6 bytes
*/
if (mac_addr_size == sizeof(u32)) {
- *pmac_id_active = true;
+ *pmac_id_valid = true;
mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
*pmac_id = le32_to_cpu(mac_id);
goto out;
}
}
/* If no active mac_id found, return first mac addr */
- *pmac_id_active = false;
+ *pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
}
return status;
}
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+{
+ bool active = true;
+
+ if (BEx_chip(adapter))
+ return be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, curr_pmac_id);
+ else
+ /* Fetch the MAC address using pmac_id */
+ return be_cmd_get_mac_from_list(adapter, mac, &active,
+ &curr_pmac_id, 0);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+ int status;
+ bool pmac_valid = false;
+
+ memset(mac, 0, ETH_ALEN);
+
+ if (BEx_chip(adapter)) {
+ if (be_physfn(adapter))
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+ 0);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, 0);
+ } else {
+ status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+ NULL, 0);
+ }
+
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain)
return status;
}
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+ bool active_mac = false;
+ u8 old_mac[ETH_ALEN];
+ u32 pmac_id;
+ int status;
+
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, dom);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+ return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
u32 domain, u16 intf_id)
{
adapter->max_event_queues = le16_to_cpu(desc->eq_count);
adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+
+ /* Clear flags that driver is not interested in */
+ adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
}
err:
mutex_unlock(&adapter->mbox_lock);
}
/* Uses mbox */
-int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
}
/* Uses sync mcc */
-int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
BE_IF_FLAGS_MULTICAST = 0x1000
};
+ #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+ BE_IF_FLAGS_UNTAGGED)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
PHY_LINK_SPEED_10MBPS = 0x1,
PHY_LINK_SPEED_100MBPS = 0x2,
PHY_LINK_SPEED_1GBPS = 0x3,
- PHY_LINK_SPEED_10GBPS = 0x4
+ PHY_LINK_SPEED_10GBPS = 0x4,
+ PHY_LINK_SPEED_20GBPS = 0x5,
+ PHY_LINK_SPEED_25GBPS = 0x6,
+ PHY_LINK_SPEED_40GBPS = 0x7
};
struct be_cmd_resp_link_status {
u32 privilege_mask;
};
+struct be_cmd_req_set_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 privileges; /* Used by BE3, SH-R */
+ u32 privileges_lancer; /* Used by Lancer */
+};
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
u32 *privilege, u32 domain);
+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
+ u32 privileges, u32 vf_num);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
+extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
+ u8 *mac);
+extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
+extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
+ u32 dom);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
u32 domain, u16 intf_id);
extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_83xx_mbx_op,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
struct qlcnic_cmd_args *cmd)
{
int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
for (i = 0; i < cmd->rsp.num; i++)
cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
}
return IRQ_HANDLED;
}
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
{
- u32 resp, event;
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
-
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
out:
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
irqreturn_t qlcnic_83xx_intr(int irq, void *data)
}
/* Enable mailbox interrupt */
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
return err;
}
ahw->max_uc_count = count;
}
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
{
u32 val;
{
int i;
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
dev_info(&adapter->pdev->dev,
"Host MBX regs(%d)\n", cmd->req.num);
for (i = 0; i < cmd->req.num; i++) {
pr_info("\n");
}
-/* Mailbox response for mac rcode */
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+static inline void
+qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- u32 fw_data;
- u8 mac_cmd_rcode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
- fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
- mac_cmd_rcode = (u8)fw_data;
- if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
- mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
- mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
- return QLCNIC_RCODE_SUCCESS;
- return 1;
-}
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
-{
- u32 data;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* wait for mailbox completion */
- do {
- data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
- if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
- data = QLCNIC_RCODE_TIMEOUT;
- break;
- }
- mdelay(1);
- } while (!data);
- return data;
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
}
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- int i;
- u16 opcode;
- u8 mbx_err_code;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
opcode = LSW(cmd->req.arg[0]);
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-
- if (mbx_val) {
- QLCDB(adapter, DRV,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- QLCDB(adapter, DRV,
- "Mailbox not available, 0x%x, collect FW dump\n",
- mbx_val);
- cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return cmd->rsp.arg[0];
- }
-
- /* Fill in mailbox registers */
- mbx_cmd = cmd->req.arg[0];
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- for (i = 1; i < cmd->req.num; i++)
- writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
-
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
- qlcnic_83xx_get_mbx_data(adapter, cmd);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- qlcnic_dump_mbx(adapter, cmd);
- break;
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- goto out;
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd->rsp_opcode;
}
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
u32 temp;
const struct qlcnic_mailbox_metadata *mbx_tbl;
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
mbx_tbl = qlcnic_83xx_mbx_tbl;
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
return 0;
}
}
static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 resp, event;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
if (resp & QLCNIC_SET_OWNER) {
event = readl(QLCNIC_MBX_FW(ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
}
-
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
return;
INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
}
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
/* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_83xx_enable_mbx_poll(adapter);
qlcnic_83xx_free_mbx_intr(adapter);
+ }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_mbx_poll(adapter);
}
}
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"%s: failed to setup mbx interrupt\n",
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
- int err;
+ struct qlcnic_cmd_args *cmd = NULL;
u32 temp = 0;
- struct qlcnic_cmd_args cmd;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
if (err)
- return err;
+ goto out;
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd.req.arg[1] = (mode ? 1 : 0) | temp;
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_info(&adapter->pdev->dev,
- "Promiscous mode config failed\n");
+ cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
return err;
}
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
"Loopback test not supported in non privileged mode\n");
- return ret;
+ return -ENOTSUPP;
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
/* Poll for link up event before running traffic */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
- ret = -EIO;
+ ret = -EBUSY;
goto free_diag_res;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_info(netdev,
"Firmware didn't sent link up event to loopback request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ ret = -ETIMEDOUT;
qlcnic_83xx_clear_lb_mode(adapter, mode);
goto free_diag_res;
}
return status;
config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_err(netdev,
"Did not receive IDC completion AEN\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
qlcnic_83xx_clear_lb_mode(adapter, mode);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_err(netdev,
"Did not receive IDC completion AEN\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
u16 vlan_id, u8 op)
{
- int err;
- u32 *buf, temp = 0;
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd = NULL;
struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
- return err;
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
if (vlan_id)
op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd.req.arg[1] = op | (1 << 8);
+ cmd->req.arg[1] = op | (1 << 8);
qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
- cmd.req.arg[1] |= temp;
+ cmd->req.arg[1] |= temp;
mv.vlan = vlan_id;
mv.mac_addr0 = addr[0];
mv.mac_addr1 = addr[1];
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
- qlcnic_free_mbx_args(&cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
return err;
}
irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx;
u32 mask, resp, event;
+ unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
__qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(0, adapter->ahw->pci_base0 + mask);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
return IRQ_HANDLED;
}
u8 val;
int ret, max_sds_rings = adapter->max_sds_rings;
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
if (qlcnic_get_diag_lock(adapter)) {
netdev_info(netdev, "Device in diagnostics mode\n");
return -EBUSY;
idc->delay);
return err;
}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ INIT_COMPLETION(mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
/* Disable mailbox interrupt */
qlcnic_83xx_disable_mbx_intr(adapter);
{
int err;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
if (err)
return err;
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
return -EIO;
}
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
**/
static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
{
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 val;
/* Perform NIC configuration based ready state entry actions */
if (ahw->idc.state_entry(adapter))
dev_err(&adapter->pdev->dev,
"Error: device temperature %d above limits\n",
adapter->ahw->temp);
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_detach_driver(adapter);
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
if (ret) {
adapter->flags |= QLCNIC_FW_HANG;
if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
}
}
if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
/* Move to need reset state and prepare for reset */
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
return ret;
**/
static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
int ret = 0;
if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
- clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
/* Check if reset recovery is disabled */
{
u32 val;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (qlcnic_83xx_lock_driver(adapter)) {
dev_err(&adapter->pdev->dev,
"%s:failed, please retry\n", __func__);
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
- if (qlcnic_sriov_vf_check(adapter))
- return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
- if (qlcnic_83xx_check_hw_status(adapter))
- return -EIO;
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_setup_intr(adapter, 0);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
- /* Initilaize 83xx mailbox spinlock */
- spin_lock_init(&ahw->mbx_lock);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_clear_function_resources(adapter);
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
- if (qlcnic_83xx_idc_init(adapter))
- return -EIO;
-
/* Configure default, SR-IOV or Virtual NIC mode of operation */
- if (qlcnic_83xx_configure_opmode(adapter))
- return -EIO;
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
/* Perform operating mode specific initialization */
- if (adapter->nic_ops->init_driver(adapter))
- return -EIO;
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
- return adapter->ahw->idc.err_code;
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+exit:
+ return err;
}
dev_warn(&pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to setup interrupt\n");
- goto err_out_disable_msi;
- }
-
- if (qlcnic_83xx_check(adapter)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err)
+ if (qlcnic_82xx_check(adapter)) {
+ err = qlcnic_setup_intr(adapter, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
+ }
}
err = qlcnic_get_act_pci_func(adapter);
if (err)
goto err_out_disable_mbx_intr;
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
pci_set_drvdata(pdev, adapter);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_register_nic_idc_func(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
}
qlcnic_detach(adapter);
adapter->fw_fail_cnt = 0;
adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
if (!qlcnic_clr_drv_state(adapter))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_bytes += pkt_len;
stats->rx_packets++;
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * velocity_poll_controller - Velocity Poll controller function
+ * @dev: network device
+ *
+ *
+ * Used by NETCONSOLE and other diagnostic tools to allow network I/P
+ * with interrupts disabled.
+ */
+static void velocity_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ velocity_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* velocity_mii_ioctl - MII ioctl handler
* @dev: network device
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = velocity_poll_controller,
+#endif
};
/**
return ret;
err_iounmap:
+ netif_napi_del(&vptr->napi);
iounmap(regs);
err_free_dev:
free_netdev(netdev);
struct velocity_info *vptr = netdev_priv(netdev);
unregister_netdev(netdev);
+ netif_napi_del(&vptr->napi);
iounmap(vptr->mac_regs);
free_netdev(netdev);
velocity_nics--;
if (!vlan->port->passthru)
return -EOPNOTSUPP;
+ if (flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
if (is_unicast_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
return -EADDRNOTAVAIL;
}
+ if (data && data[IFLA_MACVLAN_FLAGS] &&
+ nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+ return -EINVAL;
+
if (data && data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
/*
* macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
* be shared with the tun/tap driver.
return 0;
}
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
const struct iovec *iv, unsigned long total_len,
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
- if (vlan)
+ if (vlan) {
+ local_bh_disable();
macvlan_start_xmit(skb, vlan->dev);
- else
+ local_bh_enable();
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
return total_len;
done:
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
- if (vlan)
+ if (vlan) {
+ preempt_disable();
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+ preempt_enable();
+ }
rcu_read_unlock();
return ret ? ret : copied;
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
+#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
>= dev->tx_queue_len / tun->numqueues)
goto drop;
+ if (skb->sk) {
+ sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+ sw_tx_timestamp(skb);
+ }
+
/* Orphan the skb - required as we might hang on to it
* for indefinite time. */
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- &err);
+ &err, 0);
if (!skb)
return ERR_PTR(err);
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, const struct iovec *iv,
u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > total_len)
+ if (len < sizeof(pi))
return -EINVAL;
+ len -= sizeof(pi);
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > total_len)
+ if (len < tun->vnet_hdr_sz)
return -EINVAL;
+ len -= tun->vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
+ int vlan_offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
total += tun->vnet_hdr_sz;
}
- len = min_t(int, skb->len, len);
+ if (!vlan_tx_tag_present(skb)) {
+ len = min_t(int, skb->len, len);
+ } else {
+ int copy, ret;
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+
+ veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+ ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+ }
- skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += skb->len;
+ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
+ total += len;
+done:
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
return ret;
}
-
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
if (!tun)
return -EBADFD;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
}
+ if (flags & MSG_ERRQUEUE) {
+ ret = sock_recv_errqueue(sock->sk, m, total_len,
+ SOL_PACKET, TUN_TX_TIMESTAMP);
+ goto out;
+ }
ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES;
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
dev->vlan_features = dev->features;
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* First remote destination for a forwarding entry.
* Guaranteed to be non-NULL because remotes are never deleted.
*/
-static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
{
- return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
/* Find VXLAN socket based on network namespace and UDP port */
if (skb == NULL)
goto errout;
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
+ first_remote_rtnl(fdb));
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
return NULL;
}
+/* Replace destination of unicast mac */
+static int vxlan_fdb_replace(struct vxlan_fdb *f,
+ __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
+{
+ struct vxlan_rdst *rd;
+
+ rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+ if (rd)
+ return 0;
+
+ rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
+ if (!rd)
+ return 0;
+ rd->remote_ip = ip;
+ rd->remote_port = port;
+ rd->remote_vni = vni;
+ rd->remote_ifindex = ifindex;
+ return 1;
+}
+
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
__be32 ip, __be16 port, __u32 vni, __u32 ifindex)
f->updated = jiffies;
notify = 1;
}
+ if ((flags & NLM_F_REPLACE)) {
+ /* Only change unicasts */
+ if (!(is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ int rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ } else
+ return -EOPNOTSUPP;
+ }
if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
return -ENOSPC;
+ /* Disallow replace to add a multicast entry */
+ if ((flags & NLM_F_REPLACE) &&
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ return -EOPNOTSUPP;
+
netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
- struct vxlan_rdst *rdst = first_remote(f);
+ struct vxlan_rdst *rdst = first_remote_rcu(f);
if (likely(rdst->remote_ip == src_ip))
return false;
}
f = vxlan_find_mac(vxlan, n->ha);
- if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
+ if (f && first_remote_rcu(f)->remote_ip == htonl(INADDR_ANY)) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
return -ENOTCONN;
if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
- ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+ vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
- flush_workqueue(vxlan_wq);
-
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
#define PACKET_RCVD 0
#define PACKET_REJECT 1
-#define IP_TNL_HASH_BITS 10
+#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
struct ip_tunnel_net {
- struct hlist_head *tunnels;
struct net_device *fb_tunnel_dev;
+ struct hlist_head tunnels[IP_TNL_HASH_SIZE];
};
#ifdef CONFIG_INET
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
return INET_ECN_encapsulate(tos, inner);
}
- static inline void tunnel_ip_select_ident(struct sk_buff *skb,
- const struct iphdr *old_iph,
- struct dst_entry *dst)
- {
- struct iphdr *iph = ip_hdr(skb);
-
- /* Use inner packet iph-id if possible. */
- if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
- iph->id = old_iph->id;
- else
- __ip_select_ident(iph, dst,
- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
- }
-
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct net *net, struct rtable *rt,
struct sk_buff *skb,
return NULL;
}
-extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
-extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
-extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
-
-extern void dev_init_scheduler(struct net_device *dev);
-extern void dev_shutdown(struct net_device *dev);
-extern void dev_activate(struct net_device *dev);
-extern void dev_deactivate(struct net_device *dev);
-extern void dev_deactivate_many(struct list_head *head);
-extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- struct Qdisc *qdisc);
-extern void qdisc_reset(struct Qdisc *qdisc);
-extern void qdisc_destroy(struct Qdisc *qdisc);
-extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops);
-extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, u32 parentid);
-extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
- const struct qdisc_size_table *stab);
-extern void tcf_destroy(struct tcf_proto *tp);
-extern void tcf_destroy_chain(struct tcf_proto **fl);
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ struct Qdisc_ops *ops);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ struct Qdisc_ops *ops, u32 parentid);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
+void tcf_destroy(struct tcf_proto *tp);
+void tcf_destroy_chain(struct tcf_proto **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u8 linklayer;
u8 shift;
};
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
- return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+ len += r->overhead;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
}
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf);
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
const struct psched_ratecfg *r)
memset(res, 0, sizeof(*res));
res->rate = r->rate_bytes_ps;
res->overhead = r->overhead;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
#endif
IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */
IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */
IPSTATS_MIB_CSUMERRORS, /* InCsumErrors */
+ IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
__IPSTATS_MIB_MAX
};
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
- LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */
+ LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
__LINUX_MIB_MAX
};
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
break;
}
case IPPROTO_IPIP:
- goto again;
+ proto = htons(ETH_P_IP);
+ goto ip;
+ case IPPROTO_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
default:
break;
}
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct netdev_phys_port_id ppid;
+
+ err = dev_get_phys_port_id(dev, &ppid);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
goto nla_put_failure;
}
+ if (rtnl_phys_port_id_fill(skb, dev))
+ goto nla_put_failure;
+
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
EXPORT_SYMBOL(ifla_policy);
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev))
+ if (err < 0) {
free_netdev(dev);
- if (err < 0)
goto out;
+ }
err = rtnl_configure_link(dev, ifm);
if (err < 0)
/* If aging addresses are supported device will need to
* implement its own handler for this.
*/
- if (ndm->ndm_state & NUD_PERMANENT) {
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n", dev->name);
return -EINVAL;
}
struct nlattr *extfilt;
u32 filter_mask = 0;
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
- return t->hlen;
+ return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
static void __net_exit ipgre_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
static void __net_exit ipgre_tap_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
SNMP_MIB_SENTINEL
};
-/* Following RFC4293 items are displayed in /proc/net/netstat */
+/* Following items are displayed in /proc/net/netstat */
static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+ /* Non RFC4293 fields */
SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
- SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+ SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_SENTINEL
};
* node.
*/
-static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
- int addrlen, int plen,
+static struct fib6_node *fib6_add_1(struct fib6_node *root,
+ struct in6_addr *addr, int plen,
int offset, int allow_create,
int replace_required)
{
but if it is >= plen, the value is ignored in any case.
*/
- bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
+ bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
/*
* (intermediate)[in]
if (!allow_create && !replace_required)
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
- fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
- allow_create, replace_required);
+ fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+ offsetof(struct rt6_info, rt6i_dst), allow_create,
+ replace_required);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
fn->subtree = sfn;
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+ #ifdef CONFIG_IPV6_SUBTREES
+ backtrack:
+ #endif
if (fn->fn_flags & RTN_ROOT)
break;
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Daisy Chang <daisyc@us.ibm.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
return;
}
- call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
sctp_packet_free(&transport->packet);
if (transport->asoc)
sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
}
/* Start T3_rtx timer if it is not already running and update the heartbeat
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
-
-#include "af_vsock.h"
+#include <net/af_vsock.h>
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
- connected_table);
+ connected_table)
fn(sk_vsock(vsk));
}