*
*************************************************************************/
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static void efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
- efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]));
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
}
static int efx_probe_channels(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, true);
+ efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
}
}
- efx_start_interrupts(efx, true);
+ efx_soft_enable_interrupts(efx);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
return rc;
return 0;
}
-/* Enable interrupts, then probe and start the event queues */
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED);
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
+ efx->irq_soft_enabled = true;
+ smp_wmb();
efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
efx_init_eventq(channel);
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
}
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_mcdi_mode_poll(efx);
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
efx_fini_eventq(channel);
}
}
+static void efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx_nic_enable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_init_eventq(channel);
+ }
+
+ efx_soft_enable_interrupts(efx);
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx_nic_disable_interrupts(efx);
+}
+
static void efx_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->type->reconfigure_mac(efx);
- efx_start_interrupts(efx, false);
+ efx_enable_interrupts(efx);
efx_restore_filters(efx);
efx_sriov_reset(efx);
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
}
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
dev_close(efx->net_dev);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
rtnl_unlock();
efx_sriov_fini(efx);
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
- efx_start_interrupts(efx, false);
+ efx_enable_interrupts(efx);
return 0;
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
}
rtnl_unlock();
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx_start_interrupts(efx, false);
+ efx_enable_interrupts(efx);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
};
+/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
/**
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @channel_name: Names for channels and their IRQs
+ * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
unsigned int port_num;
const struct efx_nic_type *type;
int legacy_irq;
- bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue;
char workqueue_name[16];
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
- char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
unsigned int_error_count;
unsigned long int_error_expire;
+ bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
u32 queues;
int syserr;
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
}
/* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
}
result = IRQ_HANDLED;
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
+ }
}
}
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
+ if (context->index == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
}
/* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
+ efx_schedule_channel_irq(efx->channel[context->index]);
return IRQ_HANDLED;
}
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* ACK legacy interrupt */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)