diff options
Diffstat (limited to 'tools/testing/selftests/resctrl/mba_test.c')
-rw-r--r-- | tools/testing/selftests/resctrl/mba_test.c | 116 |
1 files changed, 85 insertions, 31 deletions
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c index d3bf4368341e..c7e9adc0368f 100644 --- a/tools/testing/selftests/resctrl/mba_test.c +++ b/tools/testing/selftests/resctrl/mba_test.c @@ -17,14 +17,30 @@ #define ALLOCATION_MIN 10 #define ALLOCATION_STEP 10 +static int mba_init(const struct resctrl_val_param *param, int domain_id) +{ + int ret; + + ret = initialize_read_mem_bw_imc(); + if (ret) + return ret; + + initialize_mem_bw_resctrl(param, domain_id); + + return 0; +} + /* * Change schemata percentage from 100 to 10%. Write schemata to specified * con_mon grp, mon_grp in resctrl FS. * For each allocation, run 5 times in order to get average values. */ -static int mba_setup(struct resctrl_val_param *p) +static int mba_setup(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *p) { - static int runs_per_allocation, allocation = 100; + static unsigned int allocation = ALLOCATION_MIN; + static int runs_per_allocation; char allocation_str[64]; int ret; @@ -35,47 +51,56 @@ static int mba_setup(struct resctrl_val_param *p) if (runs_per_allocation++ != 0) return 0; - if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX) + if (allocation > ALLOCATION_MAX) return END_OF_TESTS; sprintf(allocation_str, "%d", allocation); - ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, - p->resctrl_val); + ret = write_schemata(p->ctrlgrp, allocation_str, uparams->cpu, test->resource); if (ret < 0) return ret; - allocation -= ALLOCATION_STEP; + allocation += ALLOCATION_STEP; return 0; } +static int mba_measure(const struct user_params *uparams, + struct resctrl_val_param *param, pid_t bm_pid) +{ + return measure_read_mem_bw(uparams, param, bm_pid); +} + static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc) { - int allocation, runs; + unsigned int allocation; bool ret = false; + int runs; ksft_print_msg("Results are displayed in (MB)\n"); /* Memory bandwidth from 100% down to 10% */ for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP; allocation++) { - unsigned long avg_bw_imc, avg_bw_resc; unsigned long sum_bw_imc = 0, sum_bw_resc = 0; + long avg_bw_imc, avg_bw_resc; int avg_diff_per; float avg_diff; - /* - * The first run is discarded due to inaccurate value from - * phase transition. - */ - for (runs = NUM_OF_RUNS * allocation + 1; + for (runs = NUM_OF_RUNS * allocation; runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) { sum_bw_imc += bw_imc[runs]; sum_bw_resc += bw_resc[runs]; } - avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1); - avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1); + avg_bw_imc = sum_bw_imc / NUM_OF_RUNS; + avg_bw_resc = sum_bw_resc / NUM_OF_RUNS; + if (avg_bw_imc < THROTTLE_THRESHOLD || avg_bw_resc < THROTTLE_THRESHOLD) { + ksft_print_msg("Bandwidth below threshold (%d MiB). Dropping results from MBA schemata %u.\n", + THROTTLE_THRESHOLD, + ALLOCATION_MIN + ALLOCATION_STEP * allocation); + continue; + } + avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc; avg_diff_per = (int)(avg_diff * 100); @@ -83,7 +108,7 @@ static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc) avg_diff_per > MAX_DIFF_PERCENT ? "Fail:" : "Pass:", MAX_DIFF_PERCENT, - ALLOCATION_MAX - ALLOCATION_STEP * allocation); + ALLOCATION_MIN + ALLOCATION_STEP * allocation); ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per); ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc); @@ -102,16 +127,17 @@ static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc) static int check_results(void) { + unsigned long bw_resc[NUM_OF_RUNS * ALLOCATION_MAX / ALLOCATION_STEP]; + unsigned long bw_imc[NUM_OF_RUNS * ALLOCATION_MAX / ALLOCATION_STEP]; char *token_array[8], output[] = RESULT_FILE_NAME, temp[512]; - unsigned long bw_imc[1024], bw_resc[1024]; int runs; FILE *fp; fp = fopen(output, "r"); if (!fp) { - perror(output); + ksft_perror(output); - return errno; + return -1; } runs = 0; @@ -136,34 +162,62 @@ static int check_results(void) return show_mba_info(bw_imc, bw_resc); } -void mba_test_cleanup(void) +static void mba_test_cleanup(void) { remove(RESULT_FILE_NAME); } -int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd) +static int mba_run_test(const struct resctrl_test *test, const struct user_params *uparams) { struct resctrl_val_param param = { - .resctrl_val = MBA_STR, .ctrlgrp = "c1", - .mongrp = "m1", - .cpu_no = cpu_no, .filename = RESULT_FILE_NAME, - .bw_report = "reads", - .setup = mba_setup + .init = mba_init, + .setup = mba_setup, + .measure = mba_measure, }; + struct fill_buf_param fill_buf = {}; int ret; remove(RESULT_FILE_NAME); - ret = resctrl_val(benchmark_cmd, ¶m); + if (uparams->fill_buf) { + fill_buf.buf_size = uparams->fill_buf->buf_size; + fill_buf.memflush = uparams->fill_buf->memflush; + param.fill_buf = &fill_buf; + } else if (!uparams->benchmark_cmd[0]) { + ssize_t buf_size; + + buf_size = get_fill_buf_size(uparams->cpu, "L3"); + if (buf_size < 0) + return buf_size; + fill_buf.buf_size = buf_size; + fill_buf.memflush = true; + param.fill_buf = &fill_buf; + } + + ret = resctrl_val(test, uparams, ¶m); if (ret) - goto out; + return ret; ret = check_results(); - -out: - mba_test_cleanup(); + if (ret && (get_vendor() == ARCH_INTEL) && !snc_kernel_support()) + ksft_print_msg("Kernel doesn't support Sub-NUMA Clustering but it is enabled on the system.\n"); return ret; } + +static bool mba_feature_check(const struct resctrl_test *test) +{ + return test_resource_feature_check(test) && + resctrl_mon_feature_exists("L3_MON", "mbm_local_bytes"); +} + +struct resctrl_test mba_test = { + .name = "MBA", + .resource = "MB", + .vendor_specific = ARCH_INTEL, + .feature_check = mba_feature_check, + .run_test = mba_run_test, + .cleanup = mba_test_cleanup, +}; |