u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
struct fjes_device_command_param param;
struct ep_share_mem_info *buf_pair;
+ unsigned long flags;
size_t mem_size;
int result;
int epidx;
if (result)
return result;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
fjes_support_mtu[0]);
fjes_hw_setup_epbuf(&buf_pair->rx, mac,
fjes_support_mtu[0]);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
mutex_init(&hw->hw_info.lock);
+ spin_lock_init(&hw->rx_status_lock);
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
void fjes_hw_raise_epstop(struct fjes_hw *hw)
{
enum ep_partner_status status;
+ unsigned long flags;
int epidx;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
struct fjes_adapter *adapter;
struct net_device *netdev;
+ unsigned long flags;
ulong unshare_bit = 0;
ulong share_bit = 0;
continue;
if (test_bit(epidx, &share_bit)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
mutex_lock(&hw->hw_info.lock);
mutex_unlock(&hw->hw_info.lock);
- if (ret == 0)
+ if (ret == 0) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
+ }
}
if (test_bit(epidx, &irq_bit)) {
REG_ICTL_MASK_TXRX_STOP_REQ);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.
info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
}
}
{
struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+ unsigned long flags;
ulong remain_bit;
int epid_bit;
while ((remain_bit = hw->epstop_req_bit)) {
for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
if (remain_bit & 1) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epid_bit].
tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epid_bit, &hw->epstop_req_bit);
set_bit(epid_bit,
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int epidx;
netif_tx_stop_all_queues(netdev);
napi_disable(&adapter->napi);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
fjes_free_irq(adapter);
struct net_device *netdev = adapter->netdev;
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int result;
int epidx;
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
if (fjes_hw_epid_is_same_zone(hw, epidx)) {
mutex_lock(&hw->hw_info.lock);
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
bool reset_flag = false;
+ unsigned long flags;
int result;
int epidx;
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
}
struct fjes_adapter *adapter = netdev_priv(netdev);
bool running = netif_running(netdev);
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int ret = -EINVAL;
int idx, epidx;
return ret;
if (running) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_MTU_CHANGING_DONE;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
cancel_work_sync(&adapter->tx_stall_task);
netdev->mtu = new_mtu;
if (running) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- local_irq_disable();
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr,
netdev->mtu);
- local_irq_enable();
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_MTU_CHANGING_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
napi_enable(&adapter->napi);
+ napi_schedule(&adapter->napi);
}
return ret;
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
break;
case EP_PARTNER_WAITING:
if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
set_bit(src_epid, &adapter->unshare_watch_bitmask);
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
/* fall through */
case EP_PARTNER_UNSHARE:
size_t frame_len;
void *frame;
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
- FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
while (work_done < budget) {
prefetch(&adapter->hw);
if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
napi_reschedule(napi);
} else {
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx].tx
+ .info->v1i.rx_status &=
~FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
}
int max_epid, my_epid, epidx;
int stop_req, stop_req_done;
ulong unshare_watch_bitmask;
+ unsigned long flags;
int wait_time = 0;
int is_shared;
int ret;
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
}
if (test_bit(epidx, &unshare_watch_bitmask)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
}
}
}