diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-13 16:35:58 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-13 16:35:58 -0800 | 
| commit | 880b9df1bf157dc28a2e65beea6183d095e0ccb0 (patch) | |
| tree | 4b02c486ad69782a0472e2253e33c322a7b960db /lib/test_xarray.c | |
| parent | 65e08c5e86311143f45c3e4389561af3107fc8f6 (diff) | |
| parent | 48483614de97c4f5219abeda630e62b2bebdce62 (diff) | |
Merge tag 'xarray-4.20-rc7' of git://git.infradead.org/users/willy/linux-dax
Pull XArray fixes from Matthew Wilcox:
 "Two bugfixes, each with test-suite updates, two improvements to the
  test-suite without associated bugs, and one patch adding a missing
  API"
* tag 'xarray-4.20-rc7' of git://git.infradead.org/users/willy/linux-dax:
  XArray: Fix xa_alloc when id exceeds max
  XArray tests: Check iterating over multiorder entries
  XArray tests: Handle larger indices more elegantly
  XArray: Add xa_cmpxchg_irq and xa_cmpxchg_bh
  radix tree: Don't return retry entries from lookup
Diffstat (limited to 'lib/test_xarray.c')
| -rw-r--r-- | lib/test_xarray.c | 155 | 
1 files changed, 112 insertions, 43 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0598e86af8fc..4676c0a1eeca 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }  } while (0)  #endif +static void *xa_mk_index(unsigned long index) +{ +	return xa_mk_value(index & LONG_MAX); +} +  static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)  { -	return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); +	return xa_store(xa, index, xa_mk_index(index), gfp);  }  static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)  {  	u32 id = 0; -	XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), +	XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),  				gfp) != 0);  	XA_BUG_ON(xa, id != index);  }  static void xa_erase_index(struct xarray *xa, unsigned long index)  { -	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); +	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));  	XA_BUG_ON(xa, xa_load(xa, index) != NULL);  } @@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)  	xas_set(&xas, 0);  	xas_for_each(&xas, entry, ULONG_MAX) { -		xas_store(&xas, xa_mk_value(xas.xa_index)); +		xas_store(&xas, xa_mk_index(xas.xa_index));  	}  	xas_unlock(&xas); @@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)  		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));  		xa_set_mark(xa, index + 2, XA_MARK_1);  		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); -		xa_store_order(xa, index, order, xa_mk_value(index), +		xa_store_order(xa, index, order, xa_mk_index(index),  				GFP_KERNEL);  		for (i = base; i < next; i++) {  			XA_STATE(xas, xa, i); @@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)  			xas_set(&xas, j);  			do {  				xas_lock(&xas); -				xas_store(&xas, xa_mk_value(j)); +				xas_store(&xas, xa_mk_index(j));  				xas_unlock(&xas);  			} while (xas_nomem(&xas, GFP_KERNEL));  		} @@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)  		xas_set(&xas, 0);  		j = i;  		xas_for_each(&xas, entry, ULONG_MAX) { -			XA_BUG_ON(xa, entry != xa_mk_value(j)); +			XA_BUG_ON(xa, entry != xa_mk_index(j));  			xas_store(&xas, NULL);  			j++;  		} @@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,  	unsigned long min = index & ~((1UL << order) - 1);  	unsigned long max = min + (1UL << order); -	xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); -	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); -	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); +	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); +	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); +	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));  	XA_BUG_ON(xa, xa_load(xa, max) != NULL);  	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);  	xas_lock(&xas); -	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); +	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));  	xas_unlock(&xas); -	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); -	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); +	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); +	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));  	XA_BUG_ON(xa, xa_load(xa, max) != NULL);  	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); @@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,  	xas_unlock(&xas);  	XA_BUG_ON(xa, !xa_empty(xa));  } + +static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, +		unsigned int order) +{ +	XA_STATE(xas, xa, 0); +	void *entry; +	int n = 0; + +	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + +	xas_lock(&xas); +	xas_for_each(&xas, entry, ULONG_MAX) { +		XA_BUG_ON(xa, entry != xa_mk_index(index)); +		n++; +	} +	XA_BUG_ON(xa, n != 1); +	xas_set(&xas, index + 1); +	xas_for_each(&xas, entry, ULONG_MAX) { +		XA_BUG_ON(xa, entry != xa_mk_index(index)); +		n++; +	} +	XA_BUG_ON(xa, n != 2); +	xas_unlock(&xas); + +	xa_destroy(xa); +}  #endif  static noinline void check_multi_store(struct xarray *xa) @@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)  	for (i = 0; i < max_order; i++) {  		for (j = 0; j < max_order; j++) { -			xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); -			xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); +			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); +			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);  			for (k = 0; k < max_order; k++) {  				void *entry = xa_load(xa, (1UL << k) - 1);  				if ((i < k) && (j < k))  					XA_BUG_ON(xa, entry != NULL);  				else -					XA_BUG_ON(xa, entry != xa_mk_value(j)); +					XA_BUG_ON(xa, entry != xa_mk_index(j));  			}  			xa_erase(xa, 0); @@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)  		check_multi_store_1(xa, (1UL << i) + 1, i);  	}  	check_multi_store_2(xa, 4095, 9); + +	for (i = 1; i < 20; i++) { +		check_multi_store_3(xa, 0, i); +		check_multi_store_3(xa, 1UL << i, i); +	}  #endif  } @@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)  	xa_destroy(&xa0);  	id = 0xfffffffeU; -	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), +	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),  				GFP_KERNEL) != 0);  	XA_BUG_ON(&xa0, id != 0xfffffffeU); -	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), +	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),  				GFP_KERNEL) != 0);  	XA_BUG_ON(&xa0, id != 0xffffffffU); -	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), +	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),  				GFP_KERNEL) != -ENOSPC);  	XA_BUG_ON(&xa0, id != 0xffffffffU);  	xa_destroy(&xa0); + +	id = 10; +	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), +				GFP_KERNEL) != -ENOSPC); +	XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); +	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), +				GFP_KERNEL) != -ENOSPC); +	xa_erase_index(&xa0, 3); +	XA_BUG_ON(&xa0, !xa_empty(&xa0));  }  static noinline void __check_store_iter(struct xarray *xa, unsigned long start, @@ -610,11 +655,11 @@ retry:  	xas_lock(&xas);  	xas_for_each_conflict(&xas, entry) {  		XA_BUG_ON(xa, !xa_is_value(entry)); -		XA_BUG_ON(xa, entry < xa_mk_value(start)); -		XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); +		XA_BUG_ON(xa, entry < xa_mk_index(start)); +		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));  		count++;  	} -	xas_store(&xas, xa_mk_value(start)); +	xas_store(&xas, xa_mk_index(start));  	xas_unlock(&xas);  	if (xas_nomem(&xas, GFP_KERNEL)) {  		count = 0; @@ -622,9 +667,9 @@ retry:  	}  	XA_BUG_ON(xa, xas_error(&xas));  	XA_BUG_ON(xa, count != present); -	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); +	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));  	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != -			xa_mk_value(start)); +			xa_mk_index(start));  	xa_erase_index(xa, start);  } @@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)  		for (j = 0; j < index; j++) {  			XA_STATE(xas, xa, j + index);  			xa_store_index(xa, index - 1, GFP_KERNEL); -			xa_store_order(xa, index, i, xa_mk_value(index), +			xa_store_order(xa, index, i, xa_mk_index(index),  					GFP_KERNEL);  			rcu_read_lock();  			xas_for_each(&xas, entry, ULONG_MAX) { @@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)  		j = 0;  		index = 0;  		xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { -			XA_BUG_ON(xa, xa_mk_value(index) != entry); +			XA_BUG_ON(xa, xa_mk_index(index) != entry);  			XA_BUG_ON(xa, index != j++);  		}  	} @@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)  	xa_destroy(xa);  } +static noinline void check_find_3(struct xarray *xa) +{ +	XA_STATE(xas, xa, 0); +	unsigned long i, j, k; +	void *entry; + +	for (i = 0; i < 100; i++) { +		for (j = 0; j < 100; j++) { +			for (k = 0; k < 100; k++) { +				xas_set(&xas, j); +				xas_for_each_marked(&xas, entry, k, XA_MARK_0) +					; +				if (j > k) +					XA_BUG_ON(xa, +						xas.xa_node != XAS_RESTART); +			} +		} +		xa_store_index(xa, i, GFP_KERNEL); +		xa_set_mark(xa, i, XA_MARK_0); +	} +	xa_destroy(xa); +} +  static noinline void check_find(struct xarray *xa)  {  	check_find_1(xa);  	check_find_2(xa); +	check_find_3(xa);  	check_multi_find(xa);  	check_multi_find_2(xa);  } @@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)  			for (index = 0; index < (1UL << (order + 5));  			     index += (1UL << order)) {  				xa_store_order(xa, index, order, -						xa_mk_value(index), GFP_KERNEL); +						xa_mk_index(index), GFP_KERNEL);  				XA_BUG_ON(xa, xa_load(xa, index) != -						xa_mk_value(index)); +						xa_mk_index(index));  				XA_BUG_ON(xa, xa_find_entry(xa, -						xa_mk_value(index)) != index); +						xa_mk_index(index)) != index);  			}  			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);  			xa_destroy(xa); @@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)  	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);  	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);  	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); -	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); +	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);  	xa_erase_index(xa, ULONG_MAX);  	XA_BUG_ON(xa, !xa_empty(xa));  } @@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)  			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);  		XA_BUG_ON(xa, xas.xa_index != i);  		if (i == 0 || i == idx) -			XA_BUG_ON(xa, entry != xa_mk_value(i)); +			XA_BUG_ON(xa, entry != xa_mk_index(i));  		else  			XA_BUG_ON(xa, entry != NULL);  	} @@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)  			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);  		XA_BUG_ON(xa, xas.xa_index != i);  		if (i == 0 || i == idx) -			XA_BUG_ON(xa, entry != xa_mk_value(i)); +			XA_BUG_ON(xa, entry != xa_mk_index(i));  		else  			XA_BUG_ON(xa, entry != NULL);  	} while (i > 0); @@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)  	do {  		void *entry = xas_prev(&xas);  		i--; -		XA_BUG_ON(xa, entry != xa_mk_value(i)); +		XA_BUG_ON(xa, entry != xa_mk_index(i));  		XA_BUG_ON(xa, i != xas.xa_index);  	} while (i != 0); @@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)  	do {  		void *entry = xas_next(&xas); -		XA_BUG_ON(xa, entry != xa_mk_value(i)); +		XA_BUG_ON(xa, entry != xa_mk_index(i));  		XA_BUG_ON(xa, i != xas.xa_index);  		i++;  	} while (i < (1 << 16)); @@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)  		void *entry = xas_prev(&xas);  		i--;  		if ((i < (1 << 8)) || (i >= (1 << 15))) -			XA_BUG_ON(xa, entry != xa_mk_value(i)); +			XA_BUG_ON(xa, entry != xa_mk_index(i));  		else  			XA_BUG_ON(xa, entry != NULL);  		XA_BUG_ON(xa, i != xas.xa_index); @@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)  	do {  		void *entry = xas_next(&xas);  		if ((i < (1 << 8)) || (i >= (1 << 15))) -			XA_BUG_ON(xa, entry != xa_mk_value(i)); +			XA_BUG_ON(xa, entry != xa_mk_index(i));  		else  			XA_BUG_ON(xa, entry != NULL);  		XA_BUG_ON(xa, i != xas.xa_index); @@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,  		if (xas_error(&xas))  			goto unlock;  		for (i = 0; i < (1U << order); i++) { -			XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); +			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));  			xas_next(&xas);  		}  unlock: @@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,  		if (xas_error(&xas))  			goto unlock;  		for (i = 0; i < (1UL << order); i++) { -			void *old = xas_store(&xas, xa_mk_value(base + i)); +			void *old = xas_store(&xas, xa_mk_index(base + i));  			if (xas.xa_index == index) -				XA_BUG_ON(xa, old != xa_mk_value(base + i)); +				XA_BUG_ON(xa, old != xa_mk_index(base + i));  			else  				XA_BUG_ON(xa, old != NULL);  			xas_next(&xas); @@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,  		unsigned long last)  {  #ifdef CONFIG_XARRAY_MULTI -	xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); +	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); -	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); -	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); +	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); +	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));  	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);  	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); @@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)  		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);  		rcu_read_unlock(); -		xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), +		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),  				GFP_KERNEL);  		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);  | 
