summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/siena/rx_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/siena/rx_common.c')
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c116
1 files changed, 62 insertions, 54 deletions
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 9fb442da043c..4579f43484c3 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -30,6 +30,9 @@ MODULE_PARM_DESC(rx_refill_threshold,
*/
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf);
+
/* Check the RX page recycle ring for a page that can be reused. */
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
@@ -103,9 +106,9 @@ static void efx_recycle_rx_page(struct efx_channel *channel,
}
/* Recycle the pages that are used by buffers that have just been received. */
-void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
@@ -118,15 +121,15 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
} while (--n_frags);
}
-void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
+ efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
@@ -181,12 +184,12 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
/* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int entries;
@@ -217,7 +220,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
return rc;
}
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
{
unsigned int max_fill, trigger, max_trigger;
struct efx_nic *efx = rx_queue->efx;
@@ -272,7 +275,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
efx_nic_init_rx(rx_queue);
}
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_rx_buffer *rx_buf;
int i;
@@ -301,7 +304,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->xdp_rxq_info_valid = false;
}
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -315,8 +318,8 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
/* Unmap a DMA-mapped page. This function is only called for the final RX
* buffer in a page.
*/
-void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
@@ -330,9 +333,9 @@ void efx_unmap_rx_buffer(struct efx_nic *efx,
}
}
-void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
{
do {
if (rx_buf->page) {
@@ -343,7 +346,7 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
} while (--num_bufs);
}
-void efx_rx_slow_fill(struct timer_list *t)
+void efx_siena_rx_slow_fill(struct timer_list *t)
{
struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
@@ -352,7 +355,7 @@ void efx_rx_slow_fill(struct timer_list *t)
++rx_queue->slow_fill_count;
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
}
@@ -425,7 +428,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
return 0;
}
-void efx_rx_config_page_split(struct efx_nic *efx)
+void efx_siena_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
@@ -439,7 +442,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
efx->rx_bufs_per_page);
}
-/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
*
* This will aim to fill the RX descriptor queue up to
@@ -450,7 +453,8 @@ void efx_rx_config_page_split(struct efx_nic *efx)
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+ bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -517,7 +521,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
@@ -556,7 +560,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx, *new;
@@ -589,7 +593,8 @@ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
return new;
}
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx;
@@ -602,14 +607,14 @@ struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
return NULL;
}
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
{
list_del(&ctx->list);
kfree(ctx);
}
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
{
size_t i;
@@ -619,7 +624,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx,
}
/**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
* @spec: Specification to test
*
* Return: %true if the specification is a non-drop RX filter that
@@ -627,7 +632,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx,
* IPv4 or IPv6 address value in the respective multicast address
* range. Otherwise %false.
*/
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
{
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
@@ -652,8 +657,8 @@ bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
return false;
}
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
{
if ((left->match_flags ^ right->match_flags) |
((left->flags ^ right->flags) &
@@ -665,7 +670,7 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
offsetof(struct efx_filter_spec, outer_vid)) == 0;
}
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
{
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
return jhash2((const u32 *)&spec->outer_vid,
@@ -675,8 +680,8 @@ u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
}
#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force)
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+ unsigned int filter_idx, bool *force)
{
if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
/* ARFS is currently updating this entry, leave it */
@@ -692,7 +697,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
/* ARFS has moved on, so old filter is not needed. Since we did
* not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
- * not be removed by efx_rps_hash_del() subsequently.
+ * not be removed by efx_siena_rps_hash_del() subsequently.
*/
*force = true;
return true;
@@ -705,7 +710,7 @@ static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
- u32 hash = efx_filter_spec_hash(spec);
+ u32 hash = efx_siena_filter_spec_hash(spec);
lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table)
@@ -713,7 +718,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
}
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
@@ -725,15 +730,15 @@ struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec))
+ if (efx_siena_filter_spec_equal(spec, &rule->spec))
return rule;
}
return NULL;
}
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new)
+static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
@@ -744,7 +749,7 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
*new = false;
return rule;
}
@@ -758,7 +763,8 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
return rule;
}
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
@@ -769,7 +775,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
return;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
/* Someone already reused the entry. We know that if
* this check doesn't fire (i.e. filter_id == REMOVING)
* then the REMOVING mark was put there by our caller,
@@ -788,7 +794,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
}
#endif
-int efx_probe_filters(struct efx_nic *efx)
+int efx_siena_probe_filters(struct efx_nic *efx)
{
int rc;
@@ -835,7 +841,7 @@ out_unlock:
return rc;
}
-void efx_remove_filters(struct efx_nic *efx)
+void efx_siena_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel;
@@ -870,7 +876,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock);
- rule = efx_rps_hash_find(efx, &req->spec);
+ rule = efx_siena_rps_hash_find(efx, &req->spec);
/* The rule might have already gone, if someone else's request
* for the same spec was already worked and then expired before
* we got around to our work. In that case we have nothing
@@ -930,8 +936,9 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* We're overloading the NIC's filter tables, so let's do a
* chunk of extra expiry work.
*/
- __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
- 100u));
+ __efx_siena_filter_rfs_expire(channel,
+ min(channel->rfs_filter_count,
+ 100u));
}
/* Release references */
@@ -939,8 +946,8 @@ static void efx_filter_rfs_work(struct work_struct *data)
dev_put(req->net_dev);
}
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
@@ -1041,7 +1048,8 @@ out_clear:
return rc;
}
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+ unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
struct efx_nic *efx = channel->efx;