summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/resource_tracker.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1960
1 files changed, 1648 insertions, 312 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index f984a89c27df..771b92019af1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -46,12 +46,25 @@
#include "mlx4.h"
#include "fw.h"
+#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_PF_COUNTERS_PER_PORT 2
+#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
u64 mac;
+ int ref_count;
+ u8 smac_index;
+ u8 port;
+};
+
+struct vlan_res {
+ struct list_head list;
+ u16 vlan;
+ int ref_count;
+ int vlan_index;
u8 port;
};
@@ -64,6 +77,7 @@ struct res_common {
int from_state;
int to_state;
int removing;
+ const char *func_name;
};
enum {
@@ -102,7 +116,14 @@ struct res_qp {
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
+ /* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
+ __be32 param3;
+ u8 vlan_control;
+ u8 fvl_rx;
+ u8 pri_path_fl;
+ u8 vlan_index;
+ u8 feup;
};
enum res_mtt_states {
@@ -202,6 +223,13 @@ enum res_fs_rule_states {
struct res_fs_rule {
struct res_common com;
int qpn;
+ /* VF DMFS mbox with port flipped */
+ void *mirr_mbox;
+ /* > 0 --> apply mirror when getting into HA mode */
+ /* = 0 --> un-apply mirror when getting out of HA mode */
+ u32 mirr_mbox_size;
+ struct list_head mirr_list;
+ u64 mirr_rule_id;
};
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@@ -209,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
struct rb_node *node = root->rb_node;
while (node) {
- struct res_common *res = container_of(node, struct res_common,
- node);
+ struct res_common *res = rb_entry(node, struct res_common,
+ node);
if (res_id < res->res_id)
node = node->rb_left;
@@ -228,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
/* Figure out where to put new node */
while (*new) {
- struct res_common *this = container_of(*new, struct res_common,
- node);
+ struct res_common *this = rb_entry(*new, struct res_common,
+ node);
parent = *new;
if (res->res_id < this->res_id)
@@ -257,7 +285,7 @@ enum qp_transition {
};
/* For Debug uses */
-static const char *ResourceType(enum mlx4_resource rt)
+static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
@@ -266,22 +294,218 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
+ case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
- };
+ }
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
+static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int err = -EDQUOT;
+ int allocated, free, reserved, guaranteed, from_free;
+ int from_rsvd;
+
+ if (slave > dev->persist->num_vfs)
+ return -EINVAL;
+
+ spin_lock(&res_alloc->alloc_lock);
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ free = (port > 0) ? res_alloc->res_port_free[port - 1] :
+ res_alloc->res_free;
+ reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
+ res_alloc->res_reserved;
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated + count > res_alloc->quota[slave]) {
+ mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
+ slave, port, resource_str(res_type), count,
+ allocated, res_alloc->quota[slave]);
+ goto out;
+ }
+
+ if (allocated + count <= guaranteed) {
+ err = 0;
+ from_rsvd = count;
+ } else {
+ /* portion may need to be obtained from free area */
+ if (guaranteed - allocated > 0)
+ from_free = count - (guaranteed - allocated);
+ else
+ from_free = count;
+
+ from_rsvd = count - from_free;
+
+ if (free - from_free >= reserved)
+ err = 0;
+ else
+ mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
+ slave, port, resource_str(res_type), free,
+ from_free, reserved);
+ }
+
+ if (!err) {
+ /* grant the request */
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] += count;
+ res_alloc->res_port_free[port - 1] -= count;
+ res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
+ } else {
+ res_alloc->allocated[slave] += count;
+ res_alloc->res_free -= count;
+ res_alloc->res_reserved -= from_rsvd;
+ }
+ }
+
+out:
+ spin_unlock(&res_alloc->alloc_lock);
+ return err;
+}
+
+static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int allocated, guaranteed, from_rsvd;
+
+ if (slave > dev->persist->num_vfs)
+ return;
+
+ spin_lock(&res_alloc->alloc_lock);
+
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated - count >= guaranteed) {
+ from_rsvd = 0;
+ } else {
+ /* portion may need to be returned to reserved area */
+ if (allocated - guaranteed > 0)
+ from_rsvd = count - (allocated - guaranteed);
+ else
+ from_rsvd = count;
+ }
+
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] -= count;
+ res_alloc->res_port_free[port - 1] += count;
+ res_alloc->res_port_rsvd[port - 1] += from_rsvd;
+ } else {
+ res_alloc->allocated[slave] -= count;
+ res_alloc->res_free += count;
+ res_alloc->res_reserved += from_rsvd;
+ }
+
+ spin_unlock(&res_alloc->alloc_lock);
+ return;
+}
+
+static inline void initialize_res_quotas(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ enum mlx4_resource res_type,
+ int vf, int num_instances)
+{
+ res_alloc->guaranteed[vf] = num_instances /
+ (2 * (dev->persist->num_vfs + 1));
+ res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
+ if (vf == mlx4_master_func_num(dev)) {
+ res_alloc->res_free = num_instances;
+ if (res_type == RES_MTT) {
+ /* reserved mtts will be taken out of the PF allocation */
+ res_alloc->res_free += dev->caps.reserved_mtts;
+ res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
+ res_alloc->quota[vf] += dev->caps.reserved_mtts;
+ }
+ }
+}
+
+void mlx4_init_quotas(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pf;
+
+ /* quotas for VFs are initialized in mlx4_slave_cap */
+ if (mlx4_is_slave(dev))
+ return;
+
+ if (!mlx4_is_mfunc(dev)) {
+ dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev);
+ dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
+ dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
+ dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
+ dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
+ return;
+ }
+
+ pf = mlx4_master_func_num(dev);
+ dev->quotas.qp =
+ priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
+ dev->quotas.cq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
+ dev->quotas.srq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
+ dev->quotas.mtt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
+ dev->quotas.mpt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+}
+
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ int vf)
+{
+ struct mlx4_active_ports actv_ports;
+ int ports, counters_guaranteed;
+
+ /* For master, only allocate according to the number of phys ports */
+ if (vf == mlx4_master_func_num(dev))
+ return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+ /* calculate real number of ports for the VF */
+ actv_ports = mlx4_get_active_ports(dev, vf);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+ /* If we do not have enough counters for this VF, do not
+ * allocate any for it. '-1' to reduce the sink counter.
+ */
+ if ((res_alloc->res_reserved + counters_guaranteed) >
+ (dev->caps.max_counters - 1))
+ return 0;
+
+ return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
+ int i, j;
int t;
priv->mfunc.master.res_tracker.slave_list =
- kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ kcalloc(dev->num_slaves, sizeof(struct slave_list),
GFP_KERNEL);
if (!priv->mfunc.master.res_tracker.slave_list)
return -ENOMEM;
@@ -298,8 +522,134 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[i];
+ res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
+ sizeof(int),
+ GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
+ sizeof(int),
+ GFP_KERNEL);
+ if (i == RES_MAC || i == RES_VLAN)
+ res_alloc->allocated =
+ kcalloc(MLX4_MAX_PORTS *
+ (dev->persist->num_vfs + 1),
+ sizeof(int), GFP_KERNEL);
+ else
+ res_alloc->allocated =
+ kcalloc(dev->persist->num_vfs + 1,
+ sizeof(int), GFP_KERNEL);
+ /* Reduce the sink counter */
+ if (i == RES_COUNTER)
+ res_alloc->res_free = dev->caps.max_counters - 1;
+
+ if (!res_alloc->quota || !res_alloc->guaranteed ||
+ !res_alloc->allocated)
+ goto no_mem_err;
+
+ spin_lock_init(&res_alloc->alloc_lock);
+ for (t = 0; t < dev->persist->num_vfs + 1; t++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, t);
+ switch (i) {
+ case RES_QP:
+ initialize_res_quotas(dev, res_alloc, RES_QP,
+ t, dev->caps.num_qps -
+ dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev));
+ break;
+ case RES_CQ:
+ initialize_res_quotas(dev, res_alloc, RES_CQ,
+ t, dev->caps.num_cqs -
+ dev->caps.reserved_cqs);
+ break;
+ case RES_SRQ:
+ initialize_res_quotas(dev, res_alloc, RES_SRQ,
+ t, dev->caps.num_srqs -
+ dev->caps.reserved_srqs);
+ break;
+ case RES_MPT:
+ initialize_res_quotas(dev, res_alloc, RES_MPT,
+ t, dev->caps.num_mpts -
+ dev->caps.reserved_mrws);
+ break;
+ case RES_MTT:
+ initialize_res_quotas(dev, res_alloc, RES_MTT,
+ t, dev->caps.num_mtts -
+ dev->caps.reserved_mtts);
+ break;
+ case RES_MAC:
+ if (t == mlx4_master_func_num(dev)) {
+ int max_vfs_pport = 0;
+ /* Calculate the max vfs per port for */
+ /* both ports. */
+ for (j = 0; j < dev->caps.num_ports;
+ j++) {
+ struct mlx4_slaves_pport slaves_pport =
+ mlx4_phys_to_slaves_pport(dev, j + 1);
+ unsigned current_slaves =
+ bitmap_weight(slaves_pport.slaves,
+ dev->caps.num_ports) - 1;
+ if (max_vfs_pport < current_slaves)
+ max_vfs_pport =
+ current_slaves;
+ }
+ res_alloc->quota[t] =
+ MLX4_MAX_MAC_NUM -
+ 2 * max_vfs_pport;
+ res_alloc->guaranteed[t] = 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] =
+ MLX4_MAX_MAC_NUM;
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ }
+ break;
+ case RES_VLAN:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
+ res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] =
+ res_alloc->quota[t];
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
+ res_alloc->guaranteed[t] = 0;
+ }
+ break;
+ case RES_COUNTER:
+ res_alloc->quota[t] = dev->caps.max_counters;
+ res_alloc->guaranteed[t] =
+ mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
+ break;
+ default:
+ break;
+ }
+ if (i == RES_MAC || i == RES_VLAN) {
+ for (j = 0; j < dev->caps.num_ports; j++)
+ if (test_bit(j, actv_ports.ports))
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
+ } else {
+ res_alloc->res_reserved += res_alloc->guaranteed[t];
+ }
+ }
+ }
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
- return 0 ;
+ return 0;
+
+no_mem_err:
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
+ return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +659,29 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
- if (type != RES_TR_FREE_STRUCTS_ONLY)
- for (i = 0 ; i < dev->num_slaves; i++)
+ if (type != RES_TR_FREE_STRUCTS_ONLY) {
+ for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
+ }
+ /* free master's vlans */
+ i = dev->caps.function;
+ mlx4_reset_roce_gids(dev, i);
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ rem_slave_vlans(dev, i);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
if (type != RES_TR_FREE_SLAVES_ONLY) {
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
@@ -343,18 +709,43 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+ int port;
- if (MLX4_QP_ST_UD == ts)
- qp_ctx->pri_path.mgid_index = 0x80 | slave;
-
- if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- qp_ctx->pri_path.mgid_index = slave & 0x7F;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ if (MLX4_QP_ST_UD == ts) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port))
+ qp_ctx->pri_path.mgid_index =
+ mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
+ else
+ qp_ctx->pri_path.mgid_index = slave | 0x80;
+
+ } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->pri_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->pri_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->pri_path.mgid_index = slave & 0x7F;
+ }
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->alt_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->alt_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ }
+ }
}
}
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port);
+
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
@@ -363,31 +754,47 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
- int port;
+ int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
- if (MLX4_VGT != vp_oper->state.default_vlan) {
- qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
- if (MLX4_QP_ST_RC == qp_type ||
- (MLX4_QP_ST_UD == qp_type &&
- !mlx4_is_qp_reserved(dev, qpn)))
- return -EINVAL;
+ err = handle_counter(dev, qpc, slave, port);
+ if (err)
+ goto out;
+ if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
*/
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
- /* force strip vlan by clear vsd */
- qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+ if (qp_type == MLX4_QP_ST_UD ||
+ (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+ if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+ *(__be32 *)inbox->buf =
+ cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+ MLX4_QP_OPTPAR_VLAN_STRIPPING);
+ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ } else {
+ struct mlx4_update_qp_params params = {.flags = 0};
+
+ err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ if (err)
+ goto out;
+ }
+ }
+ /* preserve IF_COUNTER flag */
+ qpc->pri_path.vlan_control &=
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
- qpc->pri_path.vlan_control =
+ qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
@@ -395,28 +802,46 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control =
- MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
+ /* vst QinQ should block untagged on TX,
+ * but cvlan is in payload and phv is set so
+ * hw see it as untagged. Block tagged instead.
+ */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* vst 802.1Q */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ }
} else { /* priority tagged */
- qpc->pri_path.vlan_control =
+ qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
}
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ qpc->pri_path.fl |= MLX4_FL_SV;
+ else
+ qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
+ qpc->qos_vport = vp_oper->state.qos_vport;
}
if (vp_oper->state.spoofchk) {
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
- return 0;
+out:
+ return err;
}
static int mpt_mask(struct mlx4_dev *dev)
@@ -424,6 +849,36 @@ static int mpt_mask(struct mlx4_dev *dev)
return dev->caps.num_mpts - 1;
}
+static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
+{
+ switch (t) {
+ case RES_QP:
+ return "QP";
+ case RES_CQ:
+ return "CQ";
+ case RES_SRQ:
+ return "SRQ";
+ case RES_XRCD:
+ return "XRCD";
+ case RES_MPT:
+ return "MPT";
+ case RES_MTT:
+ return "MTT";
+ case RES_MAC:
+ return "MAC";
+ case RES_VLAN:
+ return "VLAN";
+ case RES_COUNTER:
+ return "COUNTER";
+ case RES_FS_RULE:
+ return "FS_RULE";
+ case RES_EQ:
+ return "EQ";
+ default:
+ return "INVALID RESOURCE";
+ }
+}
+
static void *find_res(struct mlx4_dev *dev, u64 res_id,
enum mlx4_resource type)
{
@@ -433,9 +888,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id,
res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
- enum mlx4_resource type,
- void *res)
+static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
+ enum mlx4_resource type,
+ void *res, const char *func_name)
{
struct res_common *r;
int err = 0;
@@ -448,6 +903,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
}
if (r->state == RES_ANY_BUSY) {
+ mlx4_warn(dev,
+ "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
+ func_name, slave, res_id, mlx4_resource_type_to_str(type),
+ r->func_name);
err = -EBUSY;
goto exit;
}
@@ -459,6 +918,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
+ r->func_name = func_name;
if (res)
*((struct res_common **)res) = r;
@@ -468,6 +928,9 @@ exit:
return err;
}
+#define get_res(dev, slave, res_id, type, res) \
+ _get_res((dev), (slave), (res_id), (type), (res), __func__)
+
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
u64 res_id, int *slave)
@@ -498,16 +961,97 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
- if (r)
+ if (r) {
r->state = r->from_state;
+ r->func_name = "";
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int port);
+
+static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
+ int counter_index)
+{
+ struct res_common *r;
+ struct res_counter *counter;
+ int ret = 0;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return ret;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, counter_index, RES_COUNTER);
+ if (!r || r->owner != slave) {
+ ret = -EINVAL;
+ } else {
+ counter = container_of(r, struct res_counter, com);
+ if (!counter->port)
+ counter->port = port;
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+ return ret;
+}
+
+static int handle_unexisting_counter(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qpc, u8 slave,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (port == counter->port) {
+ qpc->pri_path.counter_index = counter->com.res_id;
+ spin_unlock_irq(mlx4_tlock(dev));
+ return 0;
+ }
+ }
spin_unlock_irq(mlx4_tlock(dev));
+
+ /* No existing counter, need to allocate a new counter */
+ err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
+ port);
+ if (err == -ENOENT) {
+ err = 0;
+ } else if (err && err != -ENOSPC) {
+ mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
+ __func__, slave, err);
+ } else {
+ qpc->pri_path.counter_index = counter_idx;
+ mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
+ __func__, slave, qpc->pri_path.counter_index);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port)
+{
+ if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
+ return handle_existing_counter(dev, slave, port,
+ qpc->pri_path.counter_index);
+
+ return handle_unexisting_counter(dev, qpc, slave, port);
}
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -525,7 +1069,7 @@ static struct res_common *alloc_mtt_tr(int id, int order)
{
struct res_mtt *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -541,7 +1085,7 @@ static struct res_common *alloc_mpt_tr(int id, int key)
{
struct res_mpt *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -556,7 +1100,7 @@ static struct res_common *alloc_eq_tr(int id)
{
struct res_eq *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -570,7 +1114,7 @@ static struct res_common *alloc_cq_tr(int id)
{
struct res_cq *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -585,7 +1129,7 @@ static struct res_common *alloc_srq_tr(int id)
{
struct res_srq *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -596,16 +1140,17 @@ static struct res_common *alloc_srq_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_counter_tr(int id)
+static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
+ ret->port = port;
return &ret->com;
}
@@ -614,7 +1159,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
{
struct res_xrcdn *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -628,7 +1173,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
{
struct res_fs_rule *ret;
- ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
@@ -663,10 +1208,10 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
ret = alloc_srq_tr(id);
break;
case RES_MAC:
- printk(KERN_ERR "implementation missing\n");
+ pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
- ret = alloc_counter_tr(id);
+ ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
@@ -683,6 +1228,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
return ret;
}
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ int *counters_arr;
+ int i = 0, err = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return -ENOMEM;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (counter->port == port) {
+ counters_arr[i] = (int)tmp->res_id;
+ i++;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ counters_arr[i] = -1;
+
+ i = 0;
+
+ while (counters_arr[i] != -1) {
+ err = mlx4_get_counter_stats(dev, counters_arr[i], data,
+ 0);
+ if (err) {
+ memset(data, 0, sizeof(*data));
+ goto table_changed;
+ }
+ i++;
+ }
+
+table_changed:
+ kfree(counters_arr);
+ return 0;
+}
+
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
@@ -693,7 +1285,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct rb_root *root = &tracker->res_tree[type];
- res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
if (!res_arr)
return -ENOMEM;
@@ -726,8 +1318,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
return 0;
undo:
- for (--i; i >= base; --i)
+ for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
+ list_del_init(&res_arr[i]->list);
+ }
spin_unlock_irq(mlx4_tlock(dev));
@@ -757,10 +1351,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
- printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
- __func__, __LINE__,
- mtt_states_str(res->com.state),
- atomic_read(&res->ref_count));
+ pr_devel("%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
@@ -854,7 +1448,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
case RES_MTT:
return remove_mtt_ok((struct res_mtt *)res, extra);
case RES_MAC:
- return -ENOSYS;
+ return -EOPNOTSUPP;
case RES_EQ:
return remove_eq_ok((struct res_eq *)res);
case RES_COUNTER:
@@ -1063,13 +1657,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
@@ -1083,43 +1678,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_CQ_BUSY:
- err = -EBUSY;
- break;
-
- case RES_CQ_ALLOCATED:
- if (r->com.state != RES_CQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- else
- err = 0;
- break;
-
- case RES_CQ_HW:
- if (r->com.state != RES_CQ_ALLOCATED)
- err = -EINVAL;
- else
- err = 0;
- break;
-
- default:
+ } else if (state == RES_CQ_ALLOCATED) {
+ if (r->com.state != RES_CQ_HW)
err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_CQ_BUSY;
- if (cq)
- *cq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -1128,7 +1709,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
- enum res_cq_states state, struct res_srq **srq)
+ enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -1137,39 +1718,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_SRQ_BUSY:
+ } else if (state == RES_SRQ_ALLOCATED) {
+ if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
- break;
-
- case RES_SRQ_ALLOCATED:
- if (r->com.state != RES_SRQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- break;
-
- case RES_SRQ_HW:
- if (r->com.state != RES_SRQ_ALLOCATED)
- err = -EINVAL;
- break;
-
- default:
- err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
+ err = -EINVAL;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_SRQ_BUSY;
- if (srq)
- *srq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -1224,17 +1791,29 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
int align;
int base;
int qpn;
+ u8 flags;
switch (op) {
case RES_OP_RESERVE:
- count = get_param_l(&in_param);
+ count = get_param_l(&in_param) & 0xffffff;
+ /* Turn off all unsupported QP allocation flags that the
+ * slave tries to set.
+ */
+ flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param);
- err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
+ err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
@@ -1282,15 +1861,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
order = get_param_l(&in_param);
+
+ err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
+ if (err)
+ return err;
+
base = __mlx4_alloc_mtt_range(dev, order);
- if (base == -1)
+ if (base == -1) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
+ }
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
- if (err)
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
- else
+ } else {
set_param_l(out_param, base);
+ }
return err;
}
@@ -1305,13 +1893,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
+ err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
+ if (err)
+ break;
+
index = __mlx4_mpt_reserve(dev);
- if (index == -1)
+ if (index == -1) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
+ }
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
@@ -1345,12 +1940,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_cq_alloc_icm(dev, &cqn);
+ err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
@@ -1373,12 +1975,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_srq_alloc_icm(dev, &srqn);
+ err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
@@ -1393,17 +2002,51 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
+ u8 smac_index, u64 *mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct mac_res *res;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
- res = kzalloc(sizeof *res, GFP_KERNEL);
- if (!res)
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->smac_index == smac_index && res->port == (u8) port) {
+ *mac = res->mac;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ /* mac found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
+
+ if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
+ return -EINVAL;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
+ }
res->mac = mac;
res->port = (u8) port;
+ res->smac_index = smac_index;
+ res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_MAC]);
return 0;
@@ -1420,8 +2063,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
- list_del(&res->list);
- kfree(res);
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
+ kfree(res);
+ }
break;
}
}
@@ -1434,49 +2080,167 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
+ int i;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
- __mlx4_unregister_mac(dev, res->port, res->mac);
+ /* dereference the mac the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
u64 mac;
+ u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
if (err >= 0) {
+ smac_index = err;
set_param_l(out_param, err);
err = 0;
}
if (!err) {
- err = mac_add_to_slave(dev, slave, mac, port);
+ err = mac_add_to_slave(dev, slave, mac, port, smac_index);
if (err)
__mlx4_unregister_mac(dev, port, mac);
}
return err;
}
-static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port, int vlan_index)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ /* vlan found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
+
+ if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
+ return -EINVAL;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
+ return -ENOMEM;
+ }
+ res->vlan = vlan;
+ res->port = (u8) port;
+ res->vlan_index = vlan_index;
+ res->ref_count = 1;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
+
+static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_VLAN,
+ 1, port);
+ kfree(res);
+ }
+ break;
+ }
+ }
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+ int i;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ list_del(&res->list);
+ /* dereference the vlan the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_vlan(dev, res->port, res->vlan);
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
+ kfree(res);
+ }
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int in_port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err;
+ u16 vlan;
+ int vlan_index;
+ int port;
+
+ port = !in_port ? get_param_l(out_param) : in_port;
+
+ if (!port || op != RES_OP_RESERVE_AND_MAP)
+ return -EINVAL;
+
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
+ /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
+ if (!in_port && port > 0 && port <= dev->caps.num_ports) {
+ slave_state[slave].old_vlan_api = true;
+ return 0;
+ }
+
+ vlan = (u16) in_param;
+
+ err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
+ if (!err) {
+ set_param_l(out_param, (u32) vlan_index);
+ err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
+ if (err)
+ __mlx4_unregister_vlan(dev, port, vlan);
+ }
+ return err;
+}
+
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
@@ -1484,15 +2248,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE)
return -EINVAL;
- err = __mlx4_counter_alloc(dev, &index);
+ err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
- err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
- if (err)
+ err = __mlx4_counter_alloc(dev, &index);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ return err;
+ }
+
+ err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
+ if (err) {
__mlx4_counter_free(dev, index);
- else
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ } else {
set_param_l(out_param, index);
+ }
return err;
}
@@ -1528,7 +2300,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
int err;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
@@ -1556,17 +2328,19 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
@@ -1597,6 +2371,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
@@ -1634,8 +2409,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
- if (!err)
+ if (!err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
+ }
return err;
}
@@ -1660,19 +2437,19 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
- index = get_param_l(&in_param);
- id = index & mpt_mask(dev);
- err = mr_res_start_move_to(dev, slave, id,
- RES_MPT_RESERVED, &mpt);
- if (err)
- return err;
-
- __mlx4_mpt_free_icm(dev, mpt->key);
- res_end_move(dev, slave, RES_MPT, id);
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
return err;
+
+ __mlx4_mpt_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
break;
default:
err = -EINVAL;
@@ -1694,6 +2471,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
@@ -1718,6 +2496,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
@@ -1730,14 +2509,19 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -1751,9 +2535,32 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
- return 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err = 0;
+
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ if (slave_state[slave].old_vlan_api)
+ return 0;
+ if (!port)
+ return -EINVAL;
+ vlan_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_vlan(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1766,11 +2573,15 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return -EINVAL;
index = get_param_l(&in_param);
+ if (index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
@@ -1803,7 +2614,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
int err = -EINVAL;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
@@ -1831,12 +2642,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -1847,6 +2660,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_XRCD:
err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
+ break;
default:
break;
@@ -1909,19 +2723,20 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
- int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size;
int rq_size;
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+ int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
- total_pages =
- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
- page_shift);
+ tot = (total_mem + (page_offset << 6)) >> page_shift;
+ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
@@ -1946,7 +2761,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
- struct res_mpt *mpt;
+ struct res_mpt *mpt = NULL;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
@@ -1967,7 +2782,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
/* Make sure that the PD bits related to the slave id are zeros. */
pd = mr_get_pd(inbox->buf);
pd_slave = (pd >> 17) & 0x7f;
- if (pd_slave != 0 && pd_slave != slave) {
+ if (pd_slave != 0 && --pd_slave != slave) {
err = -EPERM;
goto ex_abort;
}
@@ -2068,12 +2883,34 @@ int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
if (err)
return err;
- if (mpt->com.from_state != RES_MPT_HW) {
+ if (mpt->com.from_state == RES_MPT_MAPPED) {
+ /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
+ * that, the VF must read the MPT. But since the MPT entry memory is not
+ * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
+ * entry contents. To guarantee that the MPT cannot be changed, the driver
+ * must perform HW2SW_MPT before this query and return the MPT entry to HW
+ * ownership fofollowing the change. The change here allows the VF to
+ * perform QUERY_MPT also when the entry is in SW ownership.
+ */
+ struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
+ &mlx4_priv(dev)->mr_table.dmpt_table,
+ mpt->key, NULL);
+
+ if (NULL == mpt_entry || NULL == outbox->buf) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
+
+ err = 0;
+ } else if (mpt->com.from_state == RES_MPT_HW) {
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ } else {
err = -EBUSY;
goto out;
}
- err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, id, RES_MPT);
@@ -2108,6 +2945,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
context->qkey = cpu_to_be32(qkey);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox);
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2128,13 +2969,23 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ int local_qpn = vhcr->in_modifier & 0xffffff;
+
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
+ qp->param3 = 0;
+ qp->vlan_control = 0;
+ qp->fvl_rx = 0;
+ qp->pri_path_fl = 0;
+ qp->vlan_index = 0;
+ qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -2182,6 +3033,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -2244,7 +3098,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
{
int err;
int eqn = vhcr->in_modifier;
- int res_id = (slave << 8) | eqn;
+ int res_id = (slave << 10) | eqn;
struct mlx4_eq_context *eqc = inbox->buf;
int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
int mtt_size = eq_get_mtt_size(eqc);
@@ -2285,6 +3139,23 @@ out_add:
return err;
}
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ u8 get = vhcr->op_modifier;
+
+ if (get != 1)
+ return -EPERM;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ return err;
+}
+
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
@@ -2310,19 +3181,31 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
}
static int verify_qp_parameters(struct mlx4_dev *dev,
+ struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
enum qp_transition transition, u8 slave)
{
u32 qp_type;
+ u32 qpn;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
+ int port;
+ int num_gids;
qp_ctx = inbox->buf + 8;
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
+ if (slave != mlx4_master_func_num(dev)) {
+ qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
+ /* setting QP rate-limit is disallowed for VFs */
+ if (qp_ctx->rate_limit_params)
+ return -EPERM;
+ }
+
switch (qp_type) {
case MLX4_QP_ST_RC:
+ case MLX4_QP_ST_XRC:
case MLX4_QP_ST_UC:
switch (transition) {
case QP_TRANS_INIT2RTR:
@@ -2330,20 +3213,46 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
- if (slave != mlx4_master_func_num(dev))
- /* slaves have only gid index 0 */
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- if (qp_ctx->pri_path.mgid_index)
+ if (slave != mlx4_master_func_num(dev)) {
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->pri_path.mgid_index >= num_gids)
return -EINVAL;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- if (qp_ctx->alt_path.mgid_index)
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
+ }
+ }
break;
default:
break;
}
+ break;
+ case MLX4_QP_ST_MLX:
+ qpn = vhcr->in_modifier & 0x7fffff;
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (transition == QP_TRANS_INIT2RTR &&
+ slave != mlx4_master_func_num(dev) &&
+ mlx4_is_qp_reserved(dev, qpn) &&
+ !mlx4_vf_smi_enabled(dev, slave, port)) {
+ /* only enabled VFs may create MLX proxy QPs */
+ mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
+ __func__, slave, port);
+ return -EPERM;
+ }
break;
+
default:
break;
}
@@ -2372,7 +3281,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
/* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct
- * - Translate inbox contents to simple addresses in host endianess */
+ * - Translate inbox contents to simple addresses in host endianness */
mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0;
@@ -2396,7 +3305,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
- int res_id = eqn | (slave << 8);
+ int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
@@ -2440,6 +3349,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
if (!priv->mfunc.master.slave_state)
return -EINVAL;
+ /* check for slave valid, slave not PF, and slave active */
+ if (slave < 0 || slave > dev->persist->num_vfs ||
+ slave == dev->caps.function ||
+ !priv->mfunc.master.slave_state[slave].active)
+ return 0;
+
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
@@ -2447,7 +3362,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
return 0;
mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
- res_id = (slave << 8) | event_eq->eqn;
+ res_id = (slave << 10) | event_eq->eqn;
err = get_res(dev, slave, res_id, RES_EQ, &req);
if (err)
goto unlock;
@@ -2470,7 +3385,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
memcpy(mailbox->buf, (u8 *) eqe, 28);
- in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
@@ -2496,7 +3411,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
- int res_id = eqn | (slave << 8);
+ int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
@@ -2526,7 +3441,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
int cqn = vhcr->in_modifier;
struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
- struct res_cq *cq;
+ struct res_cq *cq = NULL;
struct res_mtt *mtt;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
@@ -2562,7 +3477,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
{
int err;
int cqn = vhcr->in_modifier;
- struct res_cq *cq;
+ struct res_cq *cq = NULL;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
if (err)
@@ -2701,7 +3616,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
int err;
int srqn = vhcr->in_modifier;
struct res_mtt *mtt;
- struct res_srq *srq;
+ struct res_srq *srq = NULL;
struct mlx4_srq_context *srqc = inbox->buf;
int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
@@ -2745,7 +3660,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
{
int err;
int srqn = vhcr->in_modifier;
- struct res_srq *srq;
+ struct res_srq *srq = NULL;
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
if (err)
@@ -2850,6 +3765,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
+ u8 pri_sched_queue;
+ int port = mlx4_slave_convert_port(
+ dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
+
+ if (port < 0)
+ return -EINVAL;
+
+ pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
+ ((port & 1) << 6);
+
+ if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+ qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
+ qpc->pri_path.sched_queue = pri_sched_queue;
+ }
+
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = mlx4_slave_convert_port(
+ dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+ + 1) - 1;
+ if (port < 0)
+ return -EINVAL;
+ qpc->alt_path.sched_queue =
+ (qpc->alt_path.sched_queue & ~(1 << 6)) |
+ (port & 1) << 6;
+ }
+ return 0;
+}
+
+static int roce_verify_mac(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ u64 mac;
+ int port;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ u8 sched = *(u8 *)(inbox->buf + 64);
+ u8 smac_ix;
+
+ port = (sched >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
+ smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
+ if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
+ return -ENOENT;
+ }
+ return 0;
+}
+
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2861,18 +3828,26 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
+ u8 orig_vlan_control = qpc->pri_path.vlan_control;
+ u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+ u8 orig_pri_path_fl = qpc->pri_path.fl;
+ u8 orig_vlan_index = qpc->pri_path.vlan_index;
+ u8 orig_feup = qpc->pri_path.feup;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
+ if (err)
+ return err;
+
+ if (roce_verify_mac(dev, slave, qpc, inbox))
+ return -EINVAL;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
orig_sched_queue = qpc->pri_path.sched_queue;
- err = update_vport_qp_param(dev, inbox, slave, qpn);
- if (err)
- return err;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
@@ -2882,15 +3857,24 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
goto out;
}
+ err = update_vport_qp_param(dev, inbox, slave, qpn);
+ if (err)
+ goto out;
+
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
/* if no error, save sched queue value passed in by VF. This is
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
- if (!err)
+ if (!err) {
qp->sched_queue = orig_sched_queue;
-
+ qp->vlan_control = orig_vlan_control;
+ qp->fvl_rx = orig_fvl_rx;
+ qp->pri_path_fl = orig_pri_path_fl;
+ qp->vlan_index = orig_vlan_index;
+ qp->feup = orig_feup;
+ }
put_res(dev, slave, qpn, RES_QP);
return err;
}
@@ -2904,7 +3888,10 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
@@ -2923,7 +3910,10 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
@@ -2941,6 +3931,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
+ int err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
@@ -2954,7 +3947,10 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
@@ -2973,7 +3969,10 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
@@ -3033,7 +4032,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
struct res_gid *res;
int err;
- res = kzalloc(sizeof *res, GFP_KERNEL);
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
@@ -3076,16 +4075,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
return err;
}
-static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_loopback, enum mlx4_protocol prot,
+static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
+ u8 gid[16], int block_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_loopback, prot,
reg_id);
+ }
case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH) {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ gid[5] = port;
+ }
return mlx4_qp_attach_common(dev, qp, gid,
block_loopback, prot, type);
default:
@@ -3093,9 +4102,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
}
-static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot, enum mlx4_steer_type type,
- u64 reg_id)
+static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot,
+ enum mlx4_steer_type type, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3107,6 +4116,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
}
+static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ int real_port;
+
+ if (prot != MLX4_PROT_ETH)
+ return 0;
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
+ dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (real_port < 0)
+ return -EINVAL;
+ gid[5] = real_port;
+ }
+
+ return 0;
+}
+
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3132,7 +4160,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qp.qpn = qpn;
if (attach) {
- err = qp_attach(dev, &qp, gid, block_loopback, prot,
+ err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
type, &reg_id);
if (err) {
pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3142,6 +4170,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
if (err)
goto ex_detach;
} else {
+ err = mlx4_adjust_port(dev, slave, gid, prot);
+ if (err)
+ goto ex_put;
+
err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
if (err)
goto ex_put;
@@ -3176,7 +4208,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
- if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+ if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
@@ -3231,7 +4263,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
}
if (!be_mac) {
- pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
@@ -3246,6 +4278,91 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
+ 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
+ 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd_info)
+{
+ int err;
+ u32 qpn = vhcr->in_modifier & 0xffffff;
+ struct res_qp *rqp;
+ u64 mac;
+ unsigned port;
+ u64 pri_addr_path_mask;
+ struct mlx4_update_qp_context *cmd;
+ int smac_index;
+
+ cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+ pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+ if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+ (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+ return -EPERM;
+
+ if ((pri_addr_path_mask &
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
+ !(dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
+ mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
+ slave);
+ return -EOPNOTSUPP;
+ }
+
+ /* Just change the smac for the QP */
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err) {
+ mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+ return err;
+ }
+
+ port = (rqp->sched_queue >> 6 & 1) + 1;
+
+ if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
+ }
+
+ err = mlx4_cmd(dev, inbox->dma,
+ vhcr->in_modifier, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+ goto err_mac;
+ }
+
+err_mac:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+static u32 qp_attach_mbox_size(void *mbox)
+{
+ u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ struct _rule_hw *rule_header;
+
+ rule_header = (struct _rule_hw *)(mbox + size);
+
+ while (rule_header->size) {
+ size += rule_header->size * sizeof(u32);
+ rule_header += 1;
+ }
+ return size;
+}
+
+static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
+
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3262,26 +4379,35 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
+ struct res_fs_rule *rrule;
+ u32 mbox_size;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ err = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (err <= 0)
+ return -EINVAL;
+ ctrl->port = err;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
- pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
break;
case MLX4_NET_TRANS_RULE_ID_IB:
@@ -3289,18 +4415,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
- pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
- pr_err("Corrupted mailbox.\n");
+ pr_err("Corrupted mailbox\n");
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3308,23 +4434,69 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- goto err_put;
+ goto err_put_qp;
+
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
- mlx4_err(dev, "Fail to add flow steering resources.\n ");
- /* detach rule*/
+ mlx4_err(dev, "Fail to add flow steering resources\n");
+ goto err_detach;
+ }
+
+ err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
+ if (err)
+ goto err_detach;
+
+ mbox_size = qp_attach_mbox_size(inbox->buf);
+ rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
+ if (!rrule->mirr_mbox) {
+ err = -ENOMEM;
+ goto err_put_rule;
+ }
+ rrule->mirr_mbox_size = mbox_size;
+ rrule->mirr_rule_id = 0;
+ memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
+
+ /* set different port */
+ ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
+ if (ctrl->port == 1)
+ ctrl->port = 2;
+ else
+ ctrl->port = 1;
+
+ if (mlx4_is_bonded(dev))
+ mlx4_do_mirror_rule(dev, rrule);
+
+ atomic_inc(&rqp->ref_count);
+
+err_put_rule:
+ put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
+err_detach:
+ /* detach rule on error */
+ if (err)
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
- goto err_put;
- }
- atomic_inc(&rqp->ref_count);
-err_put:
+err_put_qp:
put_res(dev, slave, qpn, RES_QP);
return err;
}
+static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
+{
+ int err;
+
+ err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
+ return err;
+ }
+
+ mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ return 0;
+}
+
int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3334,6 +4506,8 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct res_qp *rqp;
struct res_fs_rule *rrule;
+ u64 mirr_reg_id;
+ int qpn;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -3342,15 +4516,34 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
if (err)
return err;
+
+ if (!rrule->mirr_mbox) {
+ mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
+ put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
+ return -EINVAL;
+ }
+ mirr_reg_id = rrule->mirr_rule_id;
+ kfree(rrule->mirr_mbox);
+ qpn = rrule->qpn;
+
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
- err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
+ if (mirr_reg_id && mlx4_is_bonded(dev)) {
+ err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
+ if (err) {
+ mlx4_err(dev, "Fail to get resource of mirror rule\n");
+ } else {
+ put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
+ mlx4_undo_mirror_rule(dev, rrule);
+ }
+ }
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
- mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
@@ -3360,7 +4553,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
if (!err)
atomic_dec(&rqp->ref_count);
out:
- put_res(dev, slave, rrule->qpn, RES_QP);
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
@@ -3428,7 +4621,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
- ResourceType(type),
+ resource_str(type),
r->res_id);
++busy;
} else {
@@ -3479,8 +4672,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP);
if (err)
- mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- "for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -3497,6 +4690,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ if (!valid_reserved(dev, slave, qpn)) {
+ __mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_release_resource(dev, slave,
+ RES_QP, 1, 0);
+ }
kfree(qp);
state = 0;
break;
@@ -3513,10 +4711,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_qps: failed"
- " to move slave %d qpn %d to"
- " reset\n", slave,
- qp->local_qpn);
+ mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
@@ -3544,14 +4740,13 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
struct res_srq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int srqn;
int err;
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
- mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -3568,6 +4763,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
@@ -3579,9 +4776,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_srqs: failed"
- " to move slave %d srq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
@@ -3610,14 +4805,13 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
struct res_cq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int cqn;
int err;
err = move_all_busy(dev, slave, RES_CQ);
if (err)
- mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -3634,6 +4828,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
@@ -3645,9 +4841,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_cqs: failed"
- " to move slave %d cq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
@@ -3673,14 +4867,13 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
struct res_mpt *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int mptn;
int err;
err = move_all_busy(dev, slave, RES_MPT);
if (err)
- mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -3697,6 +4890,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
@@ -3713,9 +4908,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_mrs: failed"
- " to move slave %d mpt %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
@@ -3741,14 +4934,13 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
struct res_mtt *mtt;
struct res_mtt *tmp;
int state;
- LIST_HEAD(tlist);
int base;
int err;
err = move_all_busy(dev, slave, RES_MTT);
if (err)
- mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -3766,6 +4958,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave, RES_MTT,
+ 1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
@@ -3780,6 +4974,92 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev));
}
+static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ struct res_fs_rule *mirr_rule;
+ u64 reg_id;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!fs_rule->mirr_mbox) {
+ mlx4_err(dev, "rule mirroring mailbox is null\n");
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
+ err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ if (err)
+ goto err;
+
+ err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
+ if (err)
+ goto err_detach;
+
+ err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
+ if (err)
+ goto err_rem;
+
+ fs_rule->mirr_rule_id = reg_id;
+ mirr_rule->mirr_rule_id = 0;
+ mirr_rule->mirr_mbox_size = 0;
+ mirr_rule->mirr_mbox = NULL;
+ put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
+
+ return 0;
+err_rem:
+ rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
+err_detach:
+ mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+err:
+ return err;
+}
+
+static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
+ struct rb_node *p;
+ struct res_fs_rule *fs_rule;
+ int err = 0;
+ LIST_HEAD(mirr_list);
+
+ for (p = rb_first(root); p; p = rb_next(p)) {
+ fs_rule = rb_entry(p, struct res_fs_rule, com.node);
+ if ((bond && fs_rule->mirr_mbox_size) ||
+ (!bond && !fs_rule->mirr_mbox_size))
+ list_add_tail(&fs_rule->mirr_list, &mirr_list);
+ }
+
+ list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
+ if (bond)
+ err += mlx4_do_mirror_rule(dev, fs_rule);
+ else
+ err += mlx4_undo_mirror_rule(dev, fs_rule);
+ }
+ return err;
+}
+
+int mlx4_bond_fs_rules(struct mlx4_dev *dev)
+{
+ return mlx4_mirror_fs_rules(dev, true);
+}
+
+int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
+{
+ return mlx4_mirror_fs_rules(dev, false);
+}
+
static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3818,6 +5098,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule->mirr_mbox);
kfree(fs_rule);
state = 0;
break;
@@ -3842,14 +5123,12 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
struct res_eq *tmp;
int err;
int state;
- LIST_HEAD(tlist);
int eqn;
- struct mlx4_cmd_mailbox *mailbox;
err = move_all_busy(dev, slave, RES_EQ);
if (err)
- mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -3870,21 +5149,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
break;
case RES_EQ_HW:
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- cond_resched();
- continue;
- }
- err = mlx4_cmd_box(dev, slave, 0,
- eqn & 0xff, 0,
- MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, slave, eqn & 0x3ff,
+ 1, MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_eqs: failed"
- " to move slave %d eqs %d to"
- " SW ownership\n", slave, eqn);
- mlx4_free_cmd_mailbox(dev, mailbox);
+ mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ slave, eqn & 0x3ff);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
break;
@@ -3908,25 +5179,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
struct res_counter *counter;
struct res_counter *tmp;
int err;
- int index;
+ int *counters_arr = NULL;
+ int i, j;
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
- mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
- spin_lock_irq(mlx4_tlock(dev));
- list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
- if (counter->com.owner == slave) {
- index = counter->com.res_id;
- rb_erase(&counter->com.node,
- &tracker->res_tree[RES_COUNTER]);
- list_del(&counter->com.list);
- kfree(counter);
- __mlx4_counter_free(dev, index);
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return;
+
+ do {
+ i = 0;
+ j = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+ if (counter->com.owner == slave) {
+ counters_arr[i++] = counter->com.res_id;
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
+ list_del(&counter->com.list);
+ kfree(counter);
+ }
}
- }
- spin_unlock_irq(mlx4_tlock(dev));
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ while (j < i) {
+ __mlx4_counter_free(dev, counters_arr[j++]);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ }
+ } while (i);
+
+ kfree(counters_arr);
}
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
@@ -3942,8 +5229,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
- mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -3961,9 +5248,9 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
-
+ mlx4_reset_roce_gids(dev, slave);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
- /*VLAN*/
+ rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
@@ -3977,6 +5264,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+ struct mlx4_vf_immed_vlan_work *work)
+{
+ ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+ ctx->qp_context.qos_vport = work->qos_vport;
+}
+
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
@@ -3990,13 +5284,21 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
- u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
+ u64 qp_path_mask_vlan_ctrl =
+ ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
+
+ u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
@@ -4022,15 +5324,18 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
- else
+ else if (work->vlan_proto == htons(ETH_P_8021AD))
+ vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
- upd_context->qp_context.pri_path.vlan_control = vlan_control;
- upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4048,20 +5353,51 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
spin_lock_irq(mlx4_tlock(dev));
continue;
}
- upd_context->qp_context.pri_path.sched_queue =
- qp->sched_queue & 0xC7;
- upd_context->qp_context.pri_path.sched_queue |=
- ((work->qos & 0x7) << 3);
+ if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
+ upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
+ else
+ upd_context->primary_addr_path_mask =
+ cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
+ if (work->vlan_id == MLX4_VGT) {
+ upd_context->qp_context.param3 = qp->param3;
+ upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
+ upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
+ upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
+ upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
+ upd_context->qp_context.pri_path.feup = qp->feup;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue;
+ } else {
+ upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
+ upd_context->qp_context.pri_path.vlan_control = vlan_control;
+ upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_context.pri_path.fvl_rx =
+ qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.fl =
+ qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (work->vlan_proto == htons(ETH_P_8021AD))
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
+ else
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
+ upd_context->qp_context.pri_path.feup =
+ qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue & 0xC7;
+ upd_context->qp_context.pri_path.sched_queue |=
+ ((work->qos & 0x7) << 3);
+
+ if (dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_QOS_VPP)
+ update_qos_vpp(upd_context, work);
+ }
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
- mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- "port %d, qpn %d (%d)\n",
- work->slave, port, qp->local_qpn,
- err);
+ mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ work->slave, port, qp->local_qpn, err);
errors++;
}
}
@@ -4080,7 +5416,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
- work->orig_vlan_ix);
+ work->orig_vlan_id);
out:
kfree(work);
return;