AKA inline WQE.
A TX latency optimization to save data gather DMA reads.
Controlled by ETHTOOL_TX_COPYBREAK.
Signed-off-by: Achiad Shochat <achiad@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
bool lro_en;
u32 lro_wqe_sz;
u8 rss_hfunc;
+ u16 tx_max_inline;
};
enum {
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
return err;
}
+static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ const struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ *(u32 *)data = priv->params.tx_max_inline;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params new_params;
+ u32 val;
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val > mlx5e_get_max_inline_cap(mdev)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.tx_max_inline = val;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.set_settings = mlx5e_set_settings,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ u16 max_inline;
};
struct mlx5e_cq_param {
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+ sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
return 0;
}
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_comp_vectors)
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.rx_hash_log_tbl_sz =
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb)
{
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ /* Some NIC TX decisions, e.g loopback, are based on the packet
+ * headers and occur before the data gather.
+ * Therefore these headers must be copied into the WQE
+ */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+ if (skb_headlen(skb) <= sq->max_inline)
+ return skb_headlen(skb);
+
return MLX5E_MIN_INLINE;
}