diff options
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/flower/metadata.c')
| -rw-r--r-- | drivers/net/ethernet/netronome/nfp/flower/metadata.c | 27 |
1 files changed, 23 insertions, 4 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index f448c5682594..dde60c4572fa 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -339,7 +339,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, goto err_free_ctx_entry; } - /* Do net allocate a mask-id for pre_tun_rules. These flows are used to + /* Do not allocate a mask-id for pre_tun_rules. These flows are used to * configure the pre_tun table and are never actually send to the * firmware as an add-flow message. This causes the mask-id allocation * on the firmware to get out of sync if allocated here. @@ -502,6 +502,12 @@ const struct rhashtable_params nfp_ct_map_params = { .automatic_shrinking = true, }; +const struct rhashtable_params neigh_table_params = { + .key_offset = offsetof(struct nfp_neigh_entry, neigh_cookie), + .head_offset = offsetof(struct nfp_neigh_entry, ht_node), + .key_len = sizeof(unsigned long), +}; + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { @@ -522,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_stats_ctx_table; + mutex_init(&priv->nfp_fl_lock); + err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); if (err) goto err_free_merge_table; @@ -530,6 +538,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_ct_zone_table; + err = rhashtable_init(&priv->neigh_table, &neigh_table_params); + if (err) + goto err_free_ct_map_table; + + INIT_LIST_HEAD(&priv->predt_list); + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -537,7 +551,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_ct_map_table; + goto err_free_neigh_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -550,8 +564,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, /* Init ring buffer and unallocated stats_ids. */ priv->stats_ids.free_list.buf = - vmalloc(array_size(NFP_FL_STATS_ELEM_RS, - priv->stats_ring_size)); + vmalloc_array(priv->stats_ring_size, + NFP_FL_STATS_ELEM_RS); if (!priv->stats_ids.free_list.buf) goto err_free_last_used; @@ -565,6 +579,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, goto err_free_ring_buf; spin_lock_init(&priv->stats_lock); + spin_lock_init(&priv->predt_lock); return 0; @@ -574,6 +589,8 @@ err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_neigh_table: + rhashtable_destroy(&priv->neigh_table); err_free_ct_map_table: rhashtable_destroy(&priv->ct_map_table); err_free_ct_zone_table: @@ -700,6 +717,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) rhashtable_free_and_destroy(&priv->ct_map_table, nfp_free_map_table_entry, NULL); + rhashtable_free_and_destroy(&priv->neigh_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); |
