* hv_netvsc_packet cannot be deallocated
*/
for (i = 0; i < packet->page_buf_cnt; i++) {
- data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
- KM_IRQ1);
+ data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn));
data = (void *)(unsigned long)data +
packet->page_buf[i].offset;
packet->page_buf[i].len);
kunmap_atomic((void *)((unsigned long)data -
- packet->page_buf[i].offset), KM_IRQ1);
+ packet->page_buf[i].offset));
}
local_irq_restore(flags);
}
rndis_hdr = (struct rndis_message *)kmap_atomic(
- pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
+ pfn_to_page(pkt->page_buf[0].pfn));
rndis_hdr = (void *)((unsigned long)rndis_hdr +
pkt->page_buf[0].offset);
sizeof(struct rndis_message) :
rndis_hdr->msg_len);
- kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
+ kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset);
dump_rndis_message(dev, &rndis_msg);
local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
+ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
+ + orig_sgl[i].offset;
dest = dest_addr;
destlen = orig_sgl[i].length;
if (bounce_addr == 0)
bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
while (destlen) {
src = bounce_addr + bounce_sgl[j].offset;
if (bounce_sgl[j].offset == bounce_sgl[j].length) {
/* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
j++;
/* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1)
bounce_addr =
(unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
+ sg_page((&bounce_sgl[j])));
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
}
}
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
+ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset));
}
local_irq_restore(flags);
local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
+ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
+ + orig_sgl[i].offset;
src = src_addr;
srclen = orig_sgl[i].length;
if (bounce_addr == 0)
bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
while (srclen) {
/* assume bounce offset always == 0 */
if (bounce_sgl[j].length == PAGE_SIZE) {
/* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
j++;
/* if we need to use another bounce buffer */
if (srclen || i != orig_sgl_count - 1)
bounce_addr =
(unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
+ sg_page((&bounce_sgl[j])));
} else if (srclen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
}
}
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
+ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset));
}
local_irq_restore(flags);