diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/cq.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cq.c | 106 |
1 files changed, 84 insertions, 22 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index c56a511b918e..e130e7259275 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -55,11 +55,11 @@ #define TASKLET_MAX_TIME 2 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) -void mlx4_cq_tasklet_cb(unsigned long data) +void mlx4_cq_tasklet_cb(struct tasklet_struct *t) { unsigned long flags; unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES; - struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data; + struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task); struct mlx4_cq *mcq, *temp; spin_lock_irqsave(&ctx->lock, flags); @@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data) list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { list_del_init(&mcq->tasklet_ctx.list); mcq->tasklet_ctx.comp(mcq); - if (atomic_dec_and_test(&mcq->refcount)) + if (refcount_dec_and_test(&mcq->refcount)) complete(&mcq->free); if (time_after(jiffies, end)) break; @@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) * still arrive. */ if (list_empty_careful(&cq->tasklet_ctx.list)) { - atomic_inc(&cq->refcount); + refcount_inc(&cq->refcount); kick = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); if (kick) @@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) return; } - /* Acessing the CQ outside of rcu_read_lock is safe, because + /* Accessing the CQ outside of rcu_read_lock is safe, because * the CQ is freed only after interrupt handling is completed. */ ++cq->arm_sn; @@ -137,16 +137,16 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) return; } - /* Acessing the CQ outside of rcu_read_lock is safe, because + /* Accessing the CQ outside of rcu_read_lock is safe, because * the CQ is freed only after interrupt handling is completed. */ cq->event(cq, event_type); } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, - int cq_num) + int cq_num, u8 opmod) { - return mlx4_cmd(dev, mailbox->dma, cq_num, 0, + return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } @@ -241,13 +241,14 @@ err_out: return err; } -static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage) { + u32 in_modifier = RES_CQ | (((u32)usage & 3) << 30); u64 out_param; int err; if (mlx4_is_mfunc(dev)) { - err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, + err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier, RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) @@ -286,11 +287,64 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) __mlx4_cq_free_icm(dev, cqn); } +static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) +{ + int entries_per_copy = PAGE_SIZE / cqe_size; + void *init_ents; + int err = 0; + int i; + + init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!init_ents) + return -ENOMEM; + + /* Populate a list of CQ entries to reduce the number of + * copy_to_user calls. 0xcc is the initialization value + * required by the FW. + */ + memset(init_ents, 0xcc, PAGE_SIZE); + + if (entries_per_copy < entries) { + for (i = 0; i < entries / entries_per_copy; i++) { + err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ? + -EFAULT : 0; + if (err) + goto out; + + buf += PAGE_SIZE; + } + } else { + err = copy_to_user((void __user *)buf, init_ents, + array_size(entries, cqe_size)) ? + -EFAULT : 0; + } + +out: + kfree(init_ents); + + return err; +} + +static void mlx4_init_kernel_cqes(struct mlx4_buf *buf, + int entries, + int cqe_size) +{ + int i; + + if (buf->nbufs == 1) + memset(buf->direct.buf, 0xcc, entries * cqe_size); + else + for (i = 0; i < buf->npages; i++) + memset(buf->page_list[i].buf, 0xcc, + 1UL << buf->page_shift); +} + int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, - int timestamp_en) + int timestamp_en, void *buf_addr, bool user_cq) { + bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; @@ -303,7 +357,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq->vector = vector; - err = mlx4_cq_alloc_icm(dev, &cq->cqn); + err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage); if (err) return err; @@ -335,7 +389,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); - err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); + if (sw_cq_init) { + if (user_cq) { + err = mlx4_init_user_cqes(buf_addr, nent, + dev->caps.cqe_size); + if (err) + sw_cq_init = false; + } else { + mlx4_init_kernel_cqes(buf_addr, nent, + dev->caps.cqe_size); + } + } + + err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init); + mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; @@ -343,7 +410,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; - atomic_set(&cq->refcount, 1); + refcount_set(&cq->refcount, 1); init_completion(&cq->free); cq->comp = mlx4_add_cq_to_tasklet; cq->tasklet_ctx.priv = @@ -385,7 +452,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) priv->eq_table.eq[MLX4_EQ_ASYNC].irq) synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - if (atomic_dec_and_test(&cq->refcount)) + if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); @@ -396,19 +463,14 @@ EXPORT_SYMBOL_GPL(mlx4_cq_free); int mlx4_init_cq_table(struct mlx4_dev *dev) { struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; - int err; spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, - dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); - if (err) - return err; - - return 0; + return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); } void mlx4_cleanup_cq_table(struct mlx4_dev *dev) |
