return;
BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
- gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
+ npo.copy_prod);
+ BUG_ON(ret != 0);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
static void xen_netbk_tx_action(struct xen_netbk *netbk)
{
unsigned nr_gops;
+ int ret;
nr_gops = xen_netbk_tx_build_gops(netbk);
if (nr_gops == 0)
return;
-
- gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
+ netbk->tx_copy_ops, nr_gops);
+ BUG_ON(ret);
xen_netbk_tx_submit(netbk);
+
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <linux/delay.h>
#include <linux/hardirq.h>
#include <xen/xen.h>
}
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
-/* Handling of paged out grant targets (GNTST_eagain) */
-#define MAX_DELAY 256
-static inline void
-gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
- const char *func)
-{
- unsigned delay = 1;
-
- do {
- BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
- if (*status == GNTST_eagain)
- msleep(delay++);
- } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
-
- if (delay >= MAX_DELAY) {
- printk(KERN_ERR "%s: %s eagain grant\n", func, current->comm);
- *status = GNTST_bad_page;
- }
-}
-
-void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
-{
- struct gnttab_map_grant_ref *op;
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
- BUG();
- for (op = batch; op < batch + count; op++)
- if (op->status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
- &op->status, __func__);
-}
-EXPORT_SYMBOL_GPL(gnttab_batch_map);
-
-void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
-{
- struct gnttab_copy *op;
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
- BUG();
- for (op = batch; op < batch + count; op++)
- if (op->status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_copy, op,
- &op->status, __func__);
-}
-EXPORT_SYMBOL_GPL(gnttab_batch_copy);
-
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
if (ret)
return ret;
- /* Retry eagain maps */
- for (i = 0; i < count; i++)
- if (map_ops[i].status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
- &map_ops[i].status, __func__);
-
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
- gnttab_batch_map(&op, 1);
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
if (op.status != GNTST_okay) {
free_vm_area(area);
gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
dev->otherend_id);
- gnttab_batch_map(&op, 1);
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
if (op.status != GNTST_okay) {
xenbus_dev_fatal(dev, op.status,
struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
-/* Perform a batch of grant map/copy operations. Retry every batch slot
- * for which the hypervisor returns GNTST_eagain. This is typically due
- * to paged out target frames.
- *
- * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
- *
- * Return value in each iand every status field of the batch guaranteed
- * to not be GNTST_eagain.
- */
-void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
-void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
-
#endif /* __ASM_GNTTAB_H__ */