summaryrefslogtreecommitdiff
path: root/lib/dim/net_dim.c
diff options
context:
space:
mode:
authorTal Gilboa <talgi@mellanox.com>2019-01-10 17:33:17 +0200
committerSaeed Mahameed <saeedm@mellanox.com>2019-06-25 13:46:39 -0700
commit4f75da3666c0c572967729a2401ac650be5581b6 (patch)
treed050bfc18a162cd7fc85703e473d298e2a042f8d /lib/dim/net_dim.c
parent8960b38932bee8db0bc9c4d8c135f21df6cdd297 (diff)
linux/dim: Move implementation to .c files
Moved all logic from dim.h and net_dim.h to dim.c and net_dim.c. This is both more structurally appealing and would allow to only expose externally used functions. Signed-off-by: Tal Gilboa <talgi@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'lib/dim/net_dim.c')
-rw-r--r--lib/dim/net_dim.c190
1 files changed, 190 insertions, 0 deletions
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000000..5bcc902c5388
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+struct dim_cq_moder
+net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_rx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_tx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_tx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+
+static int net_dim_step(struct dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return DIM_STEPPED;
+}
+
+static void net_dim_exit_parking(struct dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+static int net_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? DIM_STATS_BETTER :
+ DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_GOING_RIGHT:
+ case DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_BETTER)
+ dim_turn(dim);
+
+ if (dim_on_top(dim)) {
+ dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case DIM_ON_EDGE:
+ dim_park_on_top(dim);
+ break;
+ case DIM_TOO_TIRED:
+ dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if (prev_state != DIM_PARKING_ON_TOP ||
+ dim->tune_state != DIM_PARKING_ON_TOP)
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void net_dim(struct dim *dim, struct dim_sample end_sample)
+{
+ struct dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < DIM_NEVENTS)
+ break;
+ dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case DIM_START_MEASURE:
+ dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
+ end_sample.byte_ctr, &dim->start_sample);
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(net_dim);