struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
*/
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
}
skb = skb->prev;
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");