An old inefficiency of the TX path that we are grant mapping the first slot,
and then copy the header part to the linear area. Instead, doing a grant copy
for that header straight on is more reasonable. Especially because there are
ongoing efforts to make Xen avoiding TLB flush after unmap when the page were
not touched in Dom0. In the original way the memcpy ruined that.
The key changes:
- the vif has a tx_copy_ops array again
- xenvif_tx_build_gops sets up the grant copy operations
- we don't have to figure out whether the header and first frag are on the same
grant mapped page or not
Note, we only grant copy PKT_PROT_LEN bytes from the first slot, the rest (if
any) will be on the first frag, which is grant mapped. If the first slot is
smaller than PKT_PROT_LEN, then we grant copy that, and later __pskb_pull_tail
will pull more from the frags (if any)
Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+ struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
/* passed to gnttab_[un]map_refs with pages under (un)mapping */
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
/* passed to gnttab_[un]map_refs with pages under (un)mapping */
static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb,
static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb,
- struct gnttab_map_grant_ref **gopp_map)
+ struct gnttab_map_grant_ref **gopp_map,
+ struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
struct skb_shared_info *shinfo = skb_shinfo(skb);
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct pending_tx_info *tx_info;
int nr_frags = shinfo->nr_frags;
int nr_frags = shinfo->nr_frags;
struct sk_buff *first_skb = NULL;
/* Check status of header. */
struct sk_buff *first_skb = NULL;
/* Check status of header. */
- err = gop_map->status;
- if (unlikely(err))
+ err = (*gopp_copy)->status;
+ (*gopp_copy)++;
+ if (unlikely(err)) {
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Grant copy of header failed! status: %d pending_idx% %u ref: %u\n",
+ (*gopp_copy)->status,
+ pending_idx,
+ (*gopp_copy)->source.u.ref);
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
- else
- xenvif_grant_handle_set(vif, pending_idx , gop_map->handle);
-
- /* Skip first skb fragment if it is on same page as header fragment. */
- start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- for (i = start; i < nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++, gop_map++) {
int j, newerr;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
int j, newerr;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
- tx_info = &vif->pending_tx_info[pending_idx];
/* Check error status: if okay then remember grant handle. */
/* Check error status: if okay then remember grant handle. */
- newerr = (++gop_map)->status;
+ newerr = gop_map->status;
if (likely(!newerr)) {
xenvif_grant_handle_set(vif,
if (likely(!newerr)) {
xenvif_grant_handle_set(vif,
}
/* Error on this fragment: respond to client with an error. */
}
/* Error on this fragment: respond to client with an error. */
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Grant map of %d. frag failed! status: %d pending_idx% %u ref: %u\n",
+ i,
+ gop_map->status,
+ pending_idx,
+ gop_map->ref);
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
- /* First error: invalidate header and preceding fragments. */
- if (!first_skb)
- pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- else
- pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- xenvif_idx_unmap(vif, pending_idx);
- for (j = start; j < i; j++) {
+ /* First error: invalidate preceding fragments. */
+ for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(vif, pending_idx);
}
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(vif, pending_idx);
}
skb = shinfo->frag_list;
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
skb = shinfo->frag_list;
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
if (first_skb && err) {
int j;
shinfo = skb_shinfo(first_skb);
if (first_skb && err) {
int j;
shinfo = skb_shinfo(first_skb);
- pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- for (j = start; j < shinfo->nr_frags; j++) {
+ for (j = 0; j < shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(vif, pending_idx);
}
}
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(vif, pending_idx);
}
}
- *gopp_map = gop_map + 1;
int i;
u16 prev_pending_idx = INVALID_PENDING_IDX;
int i;
u16 prev_pending_idx = INVALID_PENDING_IDX;
- if (skb_shinfo(skb)->destructor_arg)
- prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
-
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
pending_idx = frag_get_pending_idx(frag);
/* If this is not the first frag, chain it to the previous*/
pending_idx = frag_get_pending_idx(frag);
/* If this is not the first frag, chain it to the previous*/
- if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
+ if (prev_pending_idx == INVALID_PENDING_IDX)
skb_shinfo(skb)->destructor_arg =
&callback_param(vif, pending_idx);
skb_shinfo(skb)->destructor_arg =
&callback_param(vif, pending_idx);
- else if (likely(pending_idx != prev_pending_idx))
callback_param(vif, prev_pending_idx).ctx =
&callback_param(vif, pending_idx);
callback_param(vif, prev_pending_idx).ctx =
&callback_param(vif, pending_idx);
-static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
+static void xenvif_tx_build_gops(struct xenvif *vif,
+ int budget,
+ unsigned *copy_ops,
+ unsigned *map_ops)
{
struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
{
struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
- xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
-
- gop++;
-
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
+ vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+ vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
+ vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
+
+ vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
+ virt_to_mfn(skb->data);
+ vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+ vif->tx_copy_ops[*copy_ops].dest.offset =
+ offset_in_page(skb->data);
+
+ vif->tx_copy_ops[*copy_ops].len = data_len;
+ vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+
+ (*copy_ops)++;
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
+ xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
+ gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
+ memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
+ sizeof(txreq));
- if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
+ if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
+ (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
- return gop - vif->tx_map_ops;
+ (*map_ops) = gop - vif->tx_map_ops;
+ return;
}
/* Consolidate skb with a frag_list into a brand new one with local pages on
}
/* Consolidate skb with a frag_list into a brand new one with local pages on
static int xenvif_tx_submit(struct xenvif *vif)
{
struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
static int xenvif_tx_submit(struct xenvif *vif)
{
struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
+ struct gnttab_copy *gop_copy = vif->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
struct sk_buff *skb;
int work_done = 0;
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map))) {
- netdev_dbg(vif->dev, "netback grant failed.\n");
+ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
- memcpy(skb->data,
- (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
- data_len);
callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
- skb_shinfo(skb)->destructor_arg =
- &callback_param(vif, pending_idx);
} else {
/* Schedule a response immediately. */
} else {
/* Schedule a response immediately. */
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
}
if (txp->flags & XEN_NETTXF_csum_blank)
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget)
{
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget)
{
+ unsigned nr_mops, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(vif)))
return 0;
int work_done, ret;
if (unlikely(!tx_work_todo(vif)))
return 0;
- nr_mops = xenvif_tx_build_gops(vif, budget);
+ xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
- ret = gnttab_map_refs(vif->tx_map_ops,
- NULL,
- vif->pages_to_map,
- nr_mops);
- BUG_ON(ret);
+ gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
+ if (nr_mops != 0) {
+ ret = gnttab_map_refs(vif->tx_map_ops,
+ NULL,
+ vif->pages_to_map,
+ nr_mops);
+ BUG_ON(ret);
+ }
work_done = xenvif_tx_submit(vif);
work_done = xenvif_tx_submit(vif);