diff options
Diffstat (limited to 'tools/testing/selftests/resctrl/fill_buf.c')
-rw-r--r-- | tools/testing/selftests/resctrl/fill_buf.c | 147 |
1 files changed, 56 insertions, 91 deletions
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c index 0d425f26583a..19a01a52dc1a 100644 --- a/tools/testing/selftests/resctrl/fill_buf.c +++ b/tools/testing/selftests/resctrl/fill_buf.c @@ -38,7 +38,7 @@ static void cl_flush(void *p) #endif } -static void mem_flush(unsigned char *buf, size_t buf_size) +void mem_flush(unsigned char *buf, size_t buf_size) { unsigned char *cp = buf; size_t i = 0; @@ -51,60 +51,46 @@ static void mem_flush(unsigned char *buf, size_t buf_size) sb(); } -static void *malloc_and_init_memory(size_t buf_size) -{ - void *p = NULL; - uint64_t *p64; - size_t s64; - int ret; - - ret = posix_memalign(&p, PAGE_SIZE, buf_size); - if (ret < 0) - return NULL; - - p64 = (uint64_t *)p; - s64 = buf_size / sizeof(uint64_t); - - while (s64 > 0) { - *p64 = (uint64_t)rand(); - p64 += (CL_SIZE / sizeof(uint64_t)); - s64 -= (CL_SIZE / sizeof(uint64_t)); - } - - return p; -} +/* + * Buffer index step advance to workaround HW prefetching interfering with + * the measurements. + * + * Must be a prime to step through all indexes of the buffer. + * + * Some primes work better than others on some architectures (from MBA/MBM + * result stability point of view). + */ +#define FILL_IDX_MULT 23 static int fill_one_span_read(unsigned char *buf, size_t buf_size) { - unsigned char *end_ptr = buf + buf_size; - unsigned char sum, *p; - - sum = 0; - p = buf; - while (p < end_ptr) { - sum += *p; - p += (CL_SIZE / 2); + unsigned int size = buf_size / (CL_SIZE / 2); + unsigned int i, idx = 0; + unsigned char sum = 0; + + /* + * Read the buffer in an order that is unexpected by HW prefetching + * optimizations to prevent them interfering with the caching pattern. + * + * The read order is (in terms of halves of cachelines): + * i * FILL_IDX_MULT % size + * The formula is open-coded below to avoiding modulo inside the loop + * as it improves MBA/MBM result stability on some architectures. + */ + for (i = 0; i < size; i++) { + sum += buf[idx * (CL_SIZE / 2)]; + + idx += FILL_IDX_MULT; + while (idx >= size) + idx -= size; } return sum; } -static void fill_one_span_write(unsigned char *buf, size_t buf_size) -{ - unsigned char *end_ptr = buf + buf_size; - unsigned char *p; - - p = buf; - while (p < end_ptr) { - *p = '1'; - p += (CL_SIZE / 2); - } -} - -static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) +void fill_cache_read(unsigned char *buf, size_t buf_size, bool once) { int ret = 0; - FILE *fp; while (1) { ret = fill_one_span_read(buf, buf_size); @@ -113,67 +99,46 @@ static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) } /* Consume read result so that reading memory is not optimized out. */ - fp = fopen("/dev/null", "w"); - if (!fp) { - perror("Unable to write to /dev/null"); - return -1; - } - fprintf(fp, "Sum: %d ", ret); - fclose(fp); - - return 0; + *value_sink = ret; } -static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once) +unsigned char *alloc_buffer(size_t buf_size, bool memflush) { - while (1) { - fill_one_span_write(buf, buf_size); - if (once) - break; - } + void *buf = NULL; + uint64_t *p64; + ssize_t s64; + int ret; - return 0; -} + ret = posix_memalign(&buf, PAGE_SIZE, buf_size); + if (ret < 0) + return NULL; -static int fill_cache(size_t buf_size, int memflush, int op, bool once) -{ - unsigned char *buf; - int ret; + /* Initialize the buffer */ + p64 = buf; + s64 = buf_size / sizeof(uint64_t); - buf = malloc_and_init_memory(buf_size); - if (!buf) - return -1; + while (s64 > 0) { + *p64 = (uint64_t)rand(); + p64 += (CL_SIZE / sizeof(uint64_t)); + s64 -= (CL_SIZE / sizeof(uint64_t)); + } /* Flush the memory before using to avoid "cache hot pages" effect */ if (memflush) mem_flush(buf, buf_size); - if (op == 0) - ret = fill_cache_read(buf, buf_size, once); - else - ret = fill_cache_write(buf, buf_size, once); - - free(buf); - - if (ret) { - printf("\n Error in fill cache read/write...\n"); - return -1; - } - - - return 0; + return buf; } -int run_fill_buf(size_t span, int memflush, int op, bool once) +ssize_t get_fill_buf_size(int cpu_no, const char *cache_type) { - size_t cache_size = span; + unsigned long cache_total_size = 0; int ret; - ret = fill_cache(cache_size, memflush, op, once); - if (ret) { - printf("\n Error in fill cache\n"); - return -1; - } + ret = get_cache_size(cpu_no, cache_type, &cache_total_size); + if (ret) + return ret; - return 0; + return cache_total_size * 2 > MINIMUM_SPAN ? + cache_total_size * 2 : MINIMUM_SPAN; } |