summaryrefslogtreecommitdiff
path: root/lib/idr.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c123
1 files changed, 60 insertions, 63 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 73f4d53c02f3..cca4b9302a71 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
if (layer_idr)
return get_from_free_list(layer_idr);
- /* try to allocate directly from kmem_cache */
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ /*
+ * Try to allocate directly from kmem_cache. We want to try this
+ * before preload buffer; otherwise, non-preloading idr_alloc()
+ * users will end up taking advantage of preloading ones. As the
+ * following is allowed to fail for preloaded cases, suppress
+ * warning this time.
+ */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
if (new)
return new;
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
* Try to fetch one from the per-cpu preload buffer if in process
* context. See idr_preload() for details.
*/
- if (in_interrupt())
- return NULL;
-
- preempt_disable();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
+ if (!in_interrupt()) {
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ if (new)
+ return new;
}
- preempt_enable();
- return new;
+
+ /*
+ * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
+ * that memory allocation failure warning is printed as intended.
+ */
+ return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
}
static void idr_layer_rcu_free(struct rcu_head *head)
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
-/**
- * idr_pre_get - reserve resources for idr allocation
- * @idp: idr handle
- * @gfp_mask: memory allocation flags
- *
- * This function should be called prior to calling the idr_get_new* functions.
- * It preallocates enough memory to satisfy the worst possible allocation. The
- * caller should pass in GFP_KERNEL if possible. This of course requires that
- * no spinning locks be held.
- *
- * If the system is REALLY out of memory this function returns %0,
- * otherwise %1.
- */
-int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
+int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
return 1;
}
-EXPORT_SYMBOL(idr_pre_get);
+EXPORT_SYMBOL(__idr_pre_get);
/**
* sub_alloc - try to allocate an id without growing the tree depth
* @idp: idr handle
* @starting_id: id to start search at
- * @id: pointer to the allocated handle
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
* @gfp_mask: allocation mask for idr_layer_alloc()
* @layer_idr: optional idr passed to idr_layer_alloc()
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
-/**
- * idr_get_new_above - allocate new idr entry above or equal to a start id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @starting_id: id to start search at
- * @id: pointer to the allocated handle
- *
- * This is the allocate id function. It should be called with any
- * required locks.
- *
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
- *
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
- *
- * @id returns a value in the range @starting_id ... %0x7fffffff
- */
-int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
+int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
*id = rv;
return 0;
}
-EXPORT_SYMBOL(idr_get_new_above);
+EXPORT_SYMBOL(__idr_get_new_above);
/**
* idr_preload - preload for idr_alloc()
@@ -515,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(idr_alloc);
+/**
+ * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Essentially the same as idr_alloc, but prefers to allocate progressively
+ * higher ids if it can. If the "cur" counter wraps, then it will start again
+ * at the "start" end of the range and allocate one that has already been used.
+ */
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
+ gfp_t gfp_mask)
+{
+ int id;
+
+ id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
+ if (id == -ENOSPC)
+ id = idr_alloc(idr, ptr, start, end, gfp_mask);
+
+ if (likely(id >= 0))
+ idr->cur = id + 1;
+ return id;
+}
+EXPORT_SYMBOL(idr_alloc_cyclic);
+
static void idr_remove_warning(int id)
{
printk(KERN_WARNING
@@ -569,8 +576,7 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *p;
struct idr_layer *to_free;
- /* see comment in idr_find_slowpath() */
- if (WARN_ON_ONCE(id < 0))
+ if (id < 0)
return;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
@@ -667,15 +673,7 @@ void *idr_find_slowpath(struct idr *idp, int id)
int n;
struct idr_layer *p;
- /*
- * If @id is negative, idr_find() used to ignore the sign bit and
- * performed lookup with the rest of bits, which is weird and can
- * lead to very obscure bugs. We're now returning NULL for all
- * negative IDs but just in case somebody was depending on the sign
- * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
- * be detected and fixed. WARN_ON_ONCE() can later be removed.
- */
- if (WARN_ON_ONCE(id < 0))
+ if (id < 0)
return NULL;
p = rcu_dereference_raw(idp->top);
@@ -824,8 +822,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
int n;
struct idr_layer *p, *old_p;
- /* see comment in idr_find_slowpath() */
- if (WARN_ON_ONCE(id < 0))
+ if (id < 0)
return ERR_PTR(-EINVAL);
p = idp->top;
@@ -918,7 +915,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
/* allocate idr_layers */
- if (!idr_pre_get(&ida->idr, gfp_mask))
+ if (!__idr_pre_get(&ida->idr, gfp_mask))
return 0;
/* allocate free_bitmap */