diff options
Diffstat (limited to 'drivers/crypto/chelsio/chcr_core.c')
| -rw-r--r-- | drivers/crypto/chelsio/chcr_core.c | 223 |
1 files changed, 139 insertions, 84 deletions
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index b6dd9cbe815f..39c70e6255f9 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -1,4 +1,4 @@ -/** +/* * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. * * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. @@ -26,13 +26,10 @@ #include "chcr_core.h" #include "cxgb4_uld.h" -static LIST_HEAD(uld_ctx_list); -static DEFINE_MUTEX(dev_mutex); -static atomic_t dev_count; -static struct uld_ctx *ctx_rr; +static struct chcr_driver_data drv_data; -typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); -static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); +typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); +static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); static void *chcr_uld_add(const struct cxgb4_lld_info *lld); static int chcr_uld_state_change(void *handle, enum cxgb4_state state); @@ -43,13 +40,36 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { static struct cxgb4_uld_info chcr_uld_info = { .name = DRV_MODULE_NAME, .nrxq = MAX_ULD_QSETS, - .ntxq = MAX_ULD_QSETS, + /* Max ntxq will be derived from fw config file*/ .rxq_size = 1024, .add = chcr_uld_add, .state_change = chcr_uld_state_change, .rx_handler = chcr_uld_rx_handler, }; +static void detach_work_fn(struct work_struct *work) +{ + struct chcr_dev *dev; + + dev = container_of(work, struct chcr_dev, detach_work.work); + + if (atomic_read(&dev->inflight)) { + dev->wqretry--; + if (dev->wqretry) { + pr_debug("Request Inflight Count %d\n", + atomic_read(&dev->inflight)); + + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); + } else { + WARN(1, "CHCR:%d request Still Pending\n", + atomic_read(&dev->inflight)); + complete(&dev->detach_comp); + } + } else { + complete(&dev->detach_comp); + } +} + struct uld_ctx *assign_chcr_device(void) { struct uld_ctx *u_ctx = NULL; @@ -60,67 +80,78 @@ struct uld_ctx *assign_chcr_device(void) * Although One session must use the same device to * maintain request-response ordering. */ - mutex_lock(&dev_mutex); - if (!list_empty(&uld_ctx_list)) { - u_ctx = ctx_rr; - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) - ctx_rr = list_first_entry(&uld_ctx_list, - struct uld_ctx, - entry); + mutex_lock(&drv_data.drv_mutex); + if (!list_empty(&drv_data.act_dev)) { + u_ctx = drv_data.last_dev; + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) + drv_data.last_dev = list_first_entry(&drv_data.act_dev, + struct uld_ctx, entry); else - ctx_rr = list_next_entry(ctx_rr, entry); + drv_data.last_dev = + list_next_entry(drv_data.last_dev, entry); } - mutex_unlock(&dev_mutex); + mutex_unlock(&drv_data.drv_mutex); return u_ctx; } -static int chcr_dev_add(struct uld_ctx *u_ctx) +static void chcr_dev_add(struct uld_ctx *u_ctx) { struct chcr_dev *dev; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENXIO; + dev = &u_ctx->dev; + dev->state = CHCR_ATTACH; + atomic_set(&dev->inflight, 0); + mutex_lock(&drv_data.drv_mutex); + list_move(&u_ctx->entry, &drv_data.act_dev); + if (!drv_data.last_dev) + drv_data.last_dev = u_ctx; + mutex_unlock(&drv_data.drv_mutex); +} +static void chcr_dev_init(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev; + + dev = &u_ctx->dev; spin_lock_init(&dev->lock_chcr_dev); - u_ctx->dev = dev; - dev->u_ctx = u_ctx; - atomic_inc(&dev_count); - mutex_lock(&dev_mutex); - list_add_tail(&u_ctx->entry, &uld_ctx_list); - if (!ctx_rr) - ctx_rr = u_ctx; - mutex_unlock(&dev_mutex); - return 0; + INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); + init_completion(&dev->detach_comp); + dev->state = CHCR_INIT; + dev->wqretry = WQ_RETRY; + atomic_inc(&drv_data.dev_count); + atomic_set(&dev->inflight, 0); + mutex_lock(&drv_data.drv_mutex); + list_add_tail(&u_ctx->entry, &drv_data.inact_dev); + mutex_unlock(&drv_data.drv_mutex); } -static int chcr_dev_remove(struct uld_ctx *u_ctx) +static int chcr_dev_move(struct uld_ctx *u_ctx) { - if (ctx_rr == u_ctx) { - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) - ctx_rr = list_first_entry(&uld_ctx_list, - struct uld_ctx, - entry); + mutex_lock(&drv_data.drv_mutex); + if (drv_data.last_dev == u_ctx) { + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) + drv_data.last_dev = list_first_entry(&drv_data.act_dev, + struct uld_ctx, entry); else - ctx_rr = list_next_entry(ctx_rr, entry); + drv_data.last_dev = + list_next_entry(drv_data.last_dev, entry); } - list_del(&u_ctx->entry); - if (list_empty(&uld_ctx_list)) - ctx_rr = NULL; - kfree(u_ctx->dev); - u_ctx->dev = NULL; - atomic_dec(&dev_count); + list_move(&u_ctx->entry, &drv_data.inact_dev); + if (list_empty(&drv_data.act_dev)) + drv_data.last_dev = NULL; + atomic_dec(&drv_data.dev_count); + mutex_unlock(&drv_data.drv_mutex); + return 0; } -static int cpl_fw6_pld_handler(struct chcr_dev *dev, +static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input) { struct crypto_async_request *req; struct cpl_fw6_pld *fw6_pld; u32 ack_err_status = 0; int error_status = 0; - struct adapter *adap = padap(dev); fw6_pld = (struct cpl_fw6_pld *)input; req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( @@ -128,12 +159,8 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, ack_err_status = ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); - if (ack_err_status) { - if (CHK_MAC_ERR_BIT(ack_err_status) || - CHK_PAD_ERR_BIT(ack_err_status)) - error_status = -EBADMSG; - atomic_inc(&adap->chcr_stats.error); - } + if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) + error_status = -EBADMSG; /* call completion callback with failure status */ if (req) { error_status = chcr_handle_resp(req, input, error_status); @@ -141,6 +168,9 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, pr_err("Incorrect request address from the firmware\n"); return -EFAULT; } + if (error_status) + atomic_inc(&adap->chcr_stats.error); + return 0; } @@ -154,16 +184,18 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) struct uld_ctx *u_ctx; /* Create the device and add it in the device list */ + pr_info_once("%s\n", DRV_DESC); + if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) + return ERR_PTR(-EOPNOTSUPP); + + /* Create the device and add it in the device list */ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); if (!u_ctx) { u_ctx = ERR_PTR(-ENOMEM); goto out; } - if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) { - u_ctx = ERR_PTR(-ENOMEM); - goto out; - } u_ctx->lldi = *lld; + chcr_dev_init(u_ctx); out: return u_ctx; } @@ -172,21 +204,40 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl) { struct uld_ctx *u_ctx = (struct uld_ctx *)handle; - struct chcr_dev *dev = u_ctx->dev; + struct chcr_dev *dev = &u_ctx->dev; + struct adapter *adap = padap(dev); const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; - if (rpl->opcode != CPL_FW6_PLD) { - pr_err("Unsupported opcode\n"); + if (!work_handlers[rpl->opcode]) { + pr_err("Unsupported opcode %d received\n", rpl->opcode); return 0; } if (!pgl) - work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]); + work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); else - work_handlers[rpl->opcode](dev, pgl->va); + work_handlers[rpl->opcode](adap, pgl->va); return 0; } +static void chcr_detach_device(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev = &u_ctx->dev; + + if (dev->state == CHCR_DETACH) { + pr_debug("Detached Event received for already detach device\n"); + return; + } + dev->state = CHCR_DETACH; + if (atomic_read(&dev->inflight) != 0) { + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); + wait_for_completion(&dev->detach_comp); + } + + // Move u_ctx to inactive_dev list + chcr_dev_move(u_ctx); +} + static int chcr_uld_state_change(void *handle, enum cxgb4_state state) { struct uld_ctx *u_ctx = handle; @@ -194,22 +245,17 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) switch (state) { case CXGB4_STATE_UP: - if (!u_ctx->dev) { - ret = chcr_dev_add(u_ctx); - if (ret != 0) - return ret; + if (u_ctx->dev.state != CHCR_INIT) { + // ALready Initialised. + return 0; } - if (atomic_read(&dev_count) == 1) - ret = start_crypto(); + chcr_dev_add(u_ctx); + ret = start_crypto(); break; case CXGB4_STATE_DETACH: - if (u_ctx->dev) { - mutex_lock(&dev_mutex); - chcr_dev_remove(u_ctx); - mutex_unlock(&dev_mutex); - } - if (!atomic_read(&dev_count)) + chcr_detach_device(u_ctx); + if (!atomic_read(&drv_data.dev_count)) stop_crypto(); break; @@ -223,8 +269,12 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) static int __init chcr_crypto_init(void) { - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) - pr_err("ULD register fail: No chcr crypto support in cxgb4"); + INIT_LIST_HEAD(&drv_data.act_dev); + INIT_LIST_HEAD(&drv_data.inact_dev); + atomic_set(&drv_data.dev_count, 0); + mutex_init(&drv_data.drv_mutex); + drv_data.last_dev = NULL; + cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); return 0; } @@ -232,19 +282,25 @@ static int __init chcr_crypto_init(void) static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; + struct adapter *adap; - if (atomic_read(&dev_count)) - stop_crypto(); - + stop_crypto(); + cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ - mutex_lock(&dev_mutex); - list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { - if (u_ctx->dev) - chcr_dev_remove(u_ctx); + mutex_lock(&drv_data.drv_mutex); + list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); + list_del(&u_ctx->entry); kfree(u_ctx); } - mutex_unlock(&dev_mutex); - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); + list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); + list_del(&u_ctx->entry); + kfree(u_ctx); + } + mutex_unlock(&drv_data.drv_mutex); } module_init(chcr_crypto_init); @@ -253,4 +309,3 @@ module_exit(chcr_crypto_exit); MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chelsio Communications"); -MODULE_VERSION(DRV_VERSION); |
