diff options
Diffstat (limited to 'drivers/net/ethernet/cavium/liquidio/request_manager.c')
| -rw-r--r-- | drivers/net/ethernet/cavium/liquidio/request_manager.c | 235 |
1 files changed, 158 insertions, 77 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 7b297f1f6dbe..d7cfb20eea00 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -40,15 +40,6 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); -static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) -{ - struct octeon_instr_queue *iq = - (struct octeon_instr_queue *)oct->instr_queue[iq_no]; - return iq->iqcmd_64B; -} - -#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) - /* Define this to return the request status comaptible to old code */ /*#define OCTEON_USE_OLD_REQ_STATUS*/ @@ -77,13 +68,6 @@ int octeon_init_instr_queue(struct octeon_device *oct, return 1; } - if (num_descs & (num_descs - 1)) { - dev_err(&oct->pci_dev->dev, - "Number of descriptors for instr queue %d not in power of 2.\n", - iq_no); - return 1; - } - q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; @@ -102,11 +86,10 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ - iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), - numa_node); + iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), + numa_node); if (!iq->request_list) - iq->request_list = vmalloc(sizeof(*iq->request_list) * - num_descs); + iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", @@ -114,10 +97,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, return 1; } - memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); - - dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", - iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", + iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; @@ -129,23 +110,29 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); - spin_lock_init(&iq->post_lock); + if (iq_no == 0) { + iq->allow_soft_cmds = true; + spin_lock_init(&iq->post_lock); + } else { + iq->allow_soft_cmds = false; + } spin_lock_init(&iq->iq_flush_running_lock); oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ - oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); + oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", - WQ_MEM_RECLAIM, + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!oct->check_db_wq[iq_no].wq) { vfree(iq->request_list); @@ -190,10 +177,15 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) q_size = iq->max_count * desc_size; lio_dma_free(oct, (u32)q_size, iq->base_addr, iq->base_addr_dma); + oct->io_qmask.iq &= ~(1ULL << iq_no); + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + oct->num_iqs--; return 0; } return 1; } +EXPORT_SYMBOL_GPL(octeon_delete_instr_queue); /* Return 0 on success, 1 on failure */ int octeon_setup_iq(struct octeon_device *oct, @@ -214,15 +206,13 @@ int octeon_setup_iq(struct octeon_device *oct, return 0; } oct->instr_queue[iq_no] = - vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); + vzalloc_node(sizeof(struct octeon_instr_queue), numa_node); if (!oct->instr_queue[iq_no]) oct->instr_queue[iq_no] = - vmalloc(sizeof(struct octeon_instr_queue)); + vzalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[iq_no]) return 1; - memset(oct->instr_queue[iq_no], 0, - sizeof(struct octeon_instr_queue)); oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->app_ctx = app_ctx; @@ -235,8 +225,10 @@ int octeon_setup_iq(struct octeon_device *oct, } oct->num_iqs++; - if (oct->fn_list.enable_io_queues(oct)) + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); return 1; + } return 0; } @@ -267,6 +259,7 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) return instr_cnt; } +EXPORT_SYMBOL_GPL(lio_wait_for_instr_fetch); static inline void ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) @@ -274,13 +267,25 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { writel(iq->fill_cnt, iq->doorbell_reg); /* make sure doorbell write goes through */ - mmiowb(); iq->fill_cnt = 0; iq->last_db_time = jiffies; return; } } +void +octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq; + + iq = oct->instr_queue[iq_no]; + spin_lock(&iq->post_lock); + if (iq->fill_cnt) + ring_doorbell(oct, iq); + spin_unlock(&iq->post_lock); +} +EXPORT_SYMBOL_GPL(octeon_ring_doorbell_locked); + static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, u8 *cmd) { @@ -343,6 +348,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, return 0; } +EXPORT_SYMBOL_GPL(octeon_register_reqtype_free_fn); static inline void __add_to_request_list(struct octeon_instr_queue *iq, @@ -357,13 +363,13 @@ int lio_process_iq_request_list(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) { + struct cavium_wq *cwq = &oct->dma_comp_wq; int reqtype; void *buf; u32 old = iq->flush_index; u32 inst_count = 0; unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; - struct octeon_instr_irh *irh; unsigned long flags; while (old != iq->octeon_read_index) { @@ -385,40 +391,21 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_RESP_NET: case REQTYPE_SOFT_COMMAND: sc = buf; - - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd3.irh; - else - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -441,8 +428,13 @@ lio_process_iq_request_list(struct octeon_device *oct, bytes_compl); iq->flush_index = old; + if (atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count)) + queue_work(cwq->wq, &cwq->wk.work.work); + return inst_count; } +EXPORT_SYMBOL_GPL(lio_process_iq_request_list); /* Can only be called from process context */ int @@ -475,13 +467,12 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } tot_inst_processed += inst_processed; - inst_processed = 0; - } while (tot_inst_processed < napi_budget); if (napi_budget && (tot_inst_processed >= napi_budget)) @@ -546,29 +537,33 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype) { + int xmit_stopped; struct iq_post_status st; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; /* Get the lock and prevent other tasks and tx interrupt handler from * running. */ - spin_lock_bh(&iq->post_lock); + if (iq->allow_soft_cmds) + spin_lock_bh(&iq->post_lock); st = __post_command2(iq, cmd); if (st.status != IQ_SEND_FAILED) { - octeon_report_sent_bytes_to_bql(buf, reqtype); + xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype); __add_to_request_list(iq, st.index, buf, reqtype); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); - if (force_db) + if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db || + xmit_stopped || st.status == IQ_SEND_STOP) ring_doorbell(oct, iq); } else { INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } - spin_unlock_bh(&iq->post_lock); + if (iq->allow_soft_cmds) + spin_unlock_bh(&iq->post_lock); /* This is only done here to expedite packets being flushed * for cases where there are no IQ completion interrupts. @@ -576,6 +571,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, return st.status; } +EXPORT_SYMBOL_GPL(octeon_send_command); void octeon_prepare_soft_command(struct octeon_device *oct, @@ -614,7 +610,8 @@ octeon_prepare_soft_command(struct octeon_device *oct, pki_ih3->tag = LIO_CONTROL; pki_ih3->tagtype = ATOMIC_TAG; pki_ih3->qpg = - oct->instr_queue[sc->iq_no]->txpciq.s.qpg; + oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg; + pki_ih3->pm = 0x7; pki_ih3->sl = 8; @@ -682,15 +679,25 @@ octeon_prepare_soft_command(struct octeon_device *oct, } } } +EXPORT_SYMBOL_GPL(octeon_prepare_soft_command); int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { + struct octeon_instr_queue *iq; struct octeon_instr_ih2 *ih2; struct octeon_instr_ih3 *ih3; struct octeon_instr_irh *irh; u32 len; + iq = oct->instr_queue[sc->iq_no]; + if (!iq->allow_soft_cmds) { + dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n", + sc->iq_no); + INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1); + return IQ_SEND_FAILED; + } + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; if (ih3->dlengsz) { @@ -721,12 +728,12 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); } +EXPORT_SYMBOL_GPL(octeon_send_soft_command); int octeon_setup_sc_buffer_pool(struct octeon_device *oct) { @@ -756,12 +763,80 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +EXPORT_SYMBOL_GPL(octeon_setup_sc_buffer_pool); + +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(octeon_free_sc_done_list); + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(octeon_free_sc_zombie_list); int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -778,6 +853,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) return 0; } +EXPORT_SYMBOL_GPL(octeon_free_sc_buffer_pool); struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, u32 datasize, @@ -790,6 +866,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); @@ -846,6 +925,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, return sc; } +EXPORT_SYMBOL_GPL(octeon_alloc_soft_command); void octeon_free_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) @@ -858,3 +938,4 @@ void octeon_free_soft_command(struct octeon_device *oct, spin_unlock_bh(&oct->sc_buf_pool.lock); } +EXPORT_SYMBOL_GPL(octeon_free_soft_command); |
