1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Resizable, Scalable, Concurrent Hash Table
5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/workqueue.h>
26 #include <linux/rculist.h>
28 #include <linux/rhashtable-types.h>
30 * The end of the chain is marked with a special nulls marks which has
31 * the least significant bit set.
34 /* Maximum chain length before rehash
36 * The maximum (not average) chain length grows with the size of the hash
37 * table, at a rate of (log N)/(log log N).
39 * The value of 16 is selected so that even if the hash table grew to
40 * 2^32 you would not expect the maximum chain length to exceed it
41 * unless we are under attack (or extremely unlucky).
43 * As this limit is only to detect attacks, we don't need to set it to a
44 * lower value as you'd need the chain length to vastly exceed 16 to have
45 * any real effect on the system.
47 #define RHT_ELASTICITY 16u
50 * struct bucket_table - Table of hash buckets
51 * @size: Number of hash buckets
52 * @nest: Number of bits of first-level nested table.
53 * @rehash: Current bucket being rehashed
54 * @hash_rnd: Random seed to fold into hash
55 * @locks_mask: Mask to apply before accessing locks[]
56 * @locks: Array of spinlocks protecting individual buckets
57 * @walkers: List of active walkers
58 * @rcu: RCU structure for freeing the table
59 * @future_tbl: Table under construction during rehashing
60 * @ntbl: Nested table used when out of memory.
61 * @buckets: size * hash buckets
68 unsigned int locks_mask;
70 struct list_head walkers;
73 struct bucket_table __rcu *future_tbl;
75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
79 * NULLS_MARKER() expects a hash value with the low
80 * bits mostly likely to be significant, and it discards
82 * We git it an address, in which the bottom 2 bits are
83 * always 0, and the msb might be significant.
84 * So we shift the address down one bit to align with
85 * expectations and avoid losing a significant bit.
87 #define RHT_NULLS_MARKER(ptr) \
88 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
89 #define INIT_RHT_NULLS_HEAD(ptr) \
90 ((ptr) = RHT_NULLS_MARKER(&(ptr)))
92 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
94 return ((unsigned long) ptr & 1);
97 static inline void *rht_obj(const struct rhashtable *ht,
98 const struct rhash_head *he)
100 return (char *)he - ht->p.head_offset;
103 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
106 return hash & (tbl->size - 1);
109 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
110 const void *key, const struct rhashtable_params params,
111 unsigned int hash_rnd)
115 /* params must be equal to ht->p if it isn't constant. */
116 if (!__builtin_constant_p(params.key_len))
117 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
118 else if (params.key_len) {
119 unsigned int key_len = params.key_len;
122 hash = params.hashfn(key, key_len, hash_rnd);
123 else if (key_len & (sizeof(u32) - 1))
124 hash = jhash(key, key_len, hash_rnd);
126 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
128 unsigned int key_len = ht->p.key_len;
131 hash = params.hashfn(key, key_len, hash_rnd);
133 hash = jhash(key, key_len, hash_rnd);
139 static inline unsigned int rht_key_hashfn(
140 struct rhashtable *ht, const struct bucket_table *tbl,
141 const void *key, const struct rhashtable_params params)
143 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
145 return rht_bucket_index(tbl, hash);
148 static inline unsigned int rht_head_hashfn(
149 struct rhashtable *ht, const struct bucket_table *tbl,
150 const struct rhash_head *he, const struct rhashtable_params params)
152 const char *ptr = rht_obj(ht, he);
154 return likely(params.obj_hashfn) ?
155 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
158 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
162 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
164 * @tbl: current table
166 static inline bool rht_grow_above_75(const struct rhashtable *ht,
167 const struct bucket_table *tbl)
169 /* Expand table when exceeding 75% load */
170 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
171 (!ht->p.max_size || tbl->size < ht->p.max_size);
175 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
177 * @tbl: current table
179 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
180 const struct bucket_table *tbl)
182 /* Shrink table beneath 30% load */
183 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
184 tbl->size > ht->p.min_size;
188 * rht_grow_above_100 - returns true if nelems > table-size
190 * @tbl: current table
192 static inline bool rht_grow_above_100(const struct rhashtable *ht,
193 const struct bucket_table *tbl)
195 return atomic_read(&ht->nelems) > tbl->size &&
196 (!ht->p.max_size || tbl->size < ht->p.max_size);
200 * rht_grow_above_max - returns true if table is above maximum
202 * @tbl: current table
204 static inline bool rht_grow_above_max(const struct rhashtable *ht,
205 const struct bucket_table *tbl)
207 return atomic_read(&ht->nelems) >= ht->max_elems;
210 /* The bucket lock is selected based on the hash and protects mutations
211 * on a group of hash buckets.
213 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
214 * a single lock always covers both buckets which may both contains
215 * entries which link to the same bucket of the old table during resizing.
216 * This allows to simplify the locking as locking the bucket in both
217 * tables during resize always guarantee protection.
219 * IMPORTANT: When holding the bucket lock of both the old and new table
220 * during expansions and shrinking, the old bucket lock must always be
223 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
226 return &tbl->locks[hash & tbl->locks_mask];
229 #ifdef CONFIG_PROVE_LOCKING
230 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
231 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
233 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
238 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
243 #endif /* CONFIG_PROVE_LOCKING */
245 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
246 struct rhash_head *obj);
248 void rhashtable_walk_enter(struct rhashtable *ht,
249 struct rhashtable_iter *iter);
250 void rhashtable_walk_exit(struct rhashtable_iter *iter);
251 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
253 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
255 (void)rhashtable_walk_start_check(iter);
258 void *rhashtable_walk_next(struct rhashtable_iter *iter);
259 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
260 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
262 void rhashtable_free_and_destroy(struct rhashtable *ht,
263 void (*free_fn)(void *ptr, void *arg),
265 void rhashtable_destroy(struct rhashtable *ht);
267 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
269 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
270 struct bucket_table *tbl,
273 #define rht_dereference(p, ht) \
274 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
276 #define rht_dereference_rcu(p, ht) \
277 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
279 #define rht_dereference_bucket(p, tbl, hash) \
280 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
282 #define rht_dereference_bucket_rcu(p, tbl, hash) \
283 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
285 #define rht_entry(tpos, pos, member) \
286 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
288 static inline struct rhash_head __rcu *const *rht_bucket(
289 const struct bucket_table *tbl, unsigned int hash)
291 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
295 static inline struct rhash_head __rcu **rht_bucket_var(
296 struct bucket_table *tbl, unsigned int hash)
298 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
302 static inline struct rhash_head __rcu **rht_bucket_insert(
303 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
305 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
310 * rht_for_each_continue - continue iterating over hash chain
311 * @pos: the &struct rhash_head to use as a loop cursor.
312 * @head: the previous &struct rhash_head to continue from
313 * @tbl: the &struct bucket_table
314 * @hash: the hash value / bucket index
316 #define rht_for_each_continue(pos, head, tbl, hash) \
317 for (pos = rht_dereference_bucket(head, tbl, hash); \
318 !rht_is_a_nulls(pos); \
319 pos = rht_dereference_bucket((pos)->next, tbl, hash))
322 * rht_for_each - iterate over hash chain
323 * @pos: the &struct rhash_head to use as a loop cursor.
324 * @tbl: the &struct bucket_table
325 * @hash: the hash value / bucket index
327 #define rht_for_each(pos, tbl, hash) \
328 rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
331 * rht_for_each_entry_continue - continue iterating over hash chain
332 * @tpos: the type * to use as a loop cursor.
333 * @pos: the &struct rhash_head to use as a loop cursor.
334 * @head: the previous &struct rhash_head to continue from
335 * @tbl: the &struct bucket_table
336 * @hash: the hash value / bucket index
337 * @member: name of the &struct rhash_head within the hashable struct.
339 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
340 for (pos = rht_dereference_bucket(head, tbl, hash); \
341 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
342 pos = rht_dereference_bucket((pos)->next, tbl, hash))
345 * rht_for_each_entry - iterate over hash chain of given type
346 * @tpos: the type * to use as a loop cursor.
347 * @pos: the &struct rhash_head to use as a loop cursor.
348 * @tbl: the &struct bucket_table
349 * @hash: the hash value / bucket index
350 * @member: name of the &struct rhash_head within the hashable struct.
352 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
353 rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
357 * rht_for_each_entry_safe - safely iterate over hash chain of given type
358 * @tpos: the type * to use as a loop cursor.
359 * @pos: the &struct rhash_head to use as a loop cursor.
360 * @next: the &struct rhash_head to use as next in loop cursor.
361 * @tbl: the &struct bucket_table
362 * @hash: the hash value / bucket index
363 * @member: name of the &struct rhash_head within the hashable struct.
365 * This hash chain list-traversal primitive allows for the looped code to
366 * remove the loop cursor from the list.
368 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
369 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
370 next = !rht_is_a_nulls(pos) ? \
371 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
372 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
374 next = !rht_is_a_nulls(pos) ? \
375 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
378 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
379 * @pos: the &struct rhash_head to use as a loop cursor.
380 * @head: the previous &struct rhash_head to continue from
381 * @tbl: the &struct bucket_table
382 * @hash: the hash value / bucket index
384 * This hash chain list-traversal primitive may safely run concurrently with
385 * the _rcu mutation primitives such as rhashtable_insert() as long as the
386 * traversal is guarded by rcu_read_lock().
388 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
389 for (({barrier(); }), \
390 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
391 !rht_is_a_nulls(pos); \
392 pos = rcu_dereference_raw(pos->next))
395 * rht_for_each_rcu - iterate over rcu hash chain
396 * @pos: the &struct rhash_head to use as a loop cursor.
397 * @tbl: the &struct bucket_table
398 * @hash: the hash value / bucket index
400 * This hash chain list-traversal primitive may safely run concurrently with
401 * the _rcu mutation primitives such as rhashtable_insert() as long as the
402 * traversal is guarded by rcu_read_lock().
404 #define rht_for_each_rcu(pos, tbl, hash) \
405 rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
408 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
409 * @tpos: the type * to use as a loop cursor.
410 * @pos: the &struct rhash_head to use as a loop cursor.
411 * @head: the previous &struct rhash_head to continue from
412 * @tbl: the &struct bucket_table
413 * @hash: the hash value / bucket index
414 * @member: name of the &struct rhash_head within the hashable struct.
416 * This hash chain list-traversal primitive may safely run concurrently with
417 * the _rcu mutation primitives such as rhashtable_insert() as long as the
418 * traversal is guarded by rcu_read_lock().
420 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
421 for (({barrier(); }), \
422 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
423 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
424 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
427 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
428 * @tpos: the type * to use as a loop cursor.
429 * @pos: the &struct rhash_head to use as a loop cursor.
430 * @tbl: the &struct bucket_table
431 * @hash: the hash value / bucket index
432 * @member: name of the &struct rhash_head within the hashable struct.
434 * This hash chain list-traversal primitive may safely run concurrently with
435 * the _rcu mutation primitives such as rhashtable_insert() as long as the
436 * traversal is guarded by rcu_read_lock().
438 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
439 rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
443 * rhl_for_each_rcu - iterate over rcu hash table list
444 * @pos: the &struct rlist_head to use as a loop cursor.
445 * @list: the head of the list
447 * This hash chain list-traversal primitive should be used on the
448 * list returned by rhltable_lookup.
450 #define rhl_for_each_rcu(pos, list) \
451 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
454 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
455 * @tpos: the type * to use as a loop cursor.
456 * @pos: the &struct rlist_head to use as a loop cursor.
457 * @list: the head of the list
458 * @member: name of the &struct rlist_head within the hashable struct.
460 * This hash chain list-traversal primitive should be used on the
461 * list returned by rhltable_lookup.
463 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
464 for (pos = list; pos && rht_entry(tpos, pos, member); \
465 pos = rcu_dereference_raw(pos->next))
467 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
470 struct rhashtable *ht = arg->ht;
471 const char *ptr = obj;
473 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
476 /* Internal function, do not use. */
477 static inline struct rhash_head *__rhashtable_lookup(
478 struct rhashtable *ht, const void *key,
479 const struct rhashtable_params params)
481 struct rhashtable_compare_arg arg = {
485 struct rhash_head __rcu * const *head;
486 struct bucket_table *tbl;
487 struct rhash_head *he;
490 tbl = rht_dereference_rcu(ht->tbl, ht);
492 hash = rht_key_hashfn(ht, tbl, key, params);
493 head = rht_bucket(tbl, hash);
495 rht_for_each_rcu_continue(he, *head, tbl, hash) {
496 if (params.obj_cmpfn ?
497 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
498 rhashtable_compare(&arg, rht_obj(ht, he)))
502 /* An object might have been moved to a different hash chain,
503 * while we walk along it - better check and retry.
505 } while (he != RHT_NULLS_MARKER(head));
507 /* Ensure we see any new tables. */
510 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
518 * rhashtable_lookup - search hash table
520 * @key: the pointer to the key
521 * @params: hash table parameters
523 * Computes the hash value for the key and traverses the bucket chain looking
524 * for a entry with an identical key. The first matching entry is returned.
526 * This must only be called under the RCU read lock.
528 * Returns the first entry on which the compare function returned true.
530 static inline void *rhashtable_lookup(
531 struct rhashtable *ht, const void *key,
532 const struct rhashtable_params params)
534 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
536 return he ? rht_obj(ht, he) : NULL;
540 * rhashtable_lookup_fast - search hash table, without RCU read lock
542 * @key: the pointer to the key
543 * @params: hash table parameters
545 * Computes the hash value for the key and traverses the bucket chain looking
546 * for a entry with an identical key. The first matching entry is returned.
548 * Only use this function when you have other mechanisms guaranteeing
549 * that the object won't go away after the RCU read lock is released.
551 * Returns the first entry on which the compare function returned true.
553 static inline void *rhashtable_lookup_fast(
554 struct rhashtable *ht, const void *key,
555 const struct rhashtable_params params)
560 obj = rhashtable_lookup(ht, key, params);
567 * rhltable_lookup - search hash list table
569 * @key: the pointer to the key
570 * @params: hash table parameters
572 * Computes the hash value for the key and traverses the bucket chain looking
573 * for a entry with an identical key. All matching entries are returned
576 * This must only be called under the RCU read lock.
578 * Returns the list of entries that match the given key.
580 static inline struct rhlist_head *rhltable_lookup(
581 struct rhltable *hlt, const void *key,
582 const struct rhashtable_params params)
584 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
586 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
589 /* Internal function, please use rhashtable_insert_fast() instead. This
590 * function returns the existing element already in hashes in there is a clash,
591 * otherwise it returns an error via ERR_PTR().
593 static inline void *__rhashtable_insert_fast(
594 struct rhashtable *ht, const void *key, struct rhash_head *obj,
595 const struct rhashtable_params params, bool rhlist)
597 struct rhashtable_compare_arg arg = {
601 struct rhash_head __rcu **pprev;
602 struct bucket_table *tbl;
603 struct rhash_head *head;
611 tbl = rht_dereference_rcu(ht->tbl, ht);
612 hash = rht_head_hashfn(ht, tbl, obj, params);
613 lock = rht_bucket_lock(tbl, hash);
616 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
618 spin_unlock_bh(lock);
620 return rhashtable_insert_slow(ht, key, obj);
623 elasticity = RHT_ELASTICITY;
624 pprev = rht_bucket_insert(ht, tbl, hash);
625 data = ERR_PTR(-ENOMEM);
629 rht_for_each_continue(head, *pprev, tbl, hash) {
630 struct rhlist_head *plist;
631 struct rhlist_head *list;
636 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
637 rhashtable_compare(&arg, rht_obj(ht, head)))) {
642 data = rht_obj(ht, head);
648 list = container_of(obj, struct rhlist_head, rhead);
649 plist = container_of(head, struct rhlist_head, rhead);
651 RCU_INIT_POINTER(list->next, plist);
652 head = rht_dereference_bucket(head->next, tbl, hash);
653 RCU_INIT_POINTER(list->rhead.next, head);
654 rcu_assign_pointer(*pprev, obj);
662 data = ERR_PTR(-E2BIG);
663 if (unlikely(rht_grow_above_max(ht, tbl)))
666 if (unlikely(rht_grow_above_100(ht, tbl)))
669 head = rht_dereference_bucket(*pprev, tbl, hash);
671 RCU_INIT_POINTER(obj->next, head);
673 struct rhlist_head *list;
675 list = container_of(obj, struct rhlist_head, rhead);
676 RCU_INIT_POINTER(list->next, NULL);
679 rcu_assign_pointer(*pprev, obj);
681 atomic_inc(&ht->nelems);
682 if (rht_grow_above_75(ht, tbl))
683 schedule_work(&ht->run_work);
689 spin_unlock_bh(lock);
696 * rhashtable_insert_fast - insert object into hash table
698 * @obj: pointer to hash head inside object
699 * @params: hash table parameters
701 * Will take a per bucket spinlock to protect against mutual mutations
702 * on the same bucket. Multiple insertions may occur in parallel unless
703 * they map to the same bucket lock.
705 * It is safe to call this function from atomic context.
707 * Will trigger an automatic deferred table resizing if residency in the
708 * table grows beyond 70%.
710 static inline int rhashtable_insert_fast(
711 struct rhashtable *ht, struct rhash_head *obj,
712 const struct rhashtable_params params)
716 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
720 return ret == NULL ? 0 : -EEXIST;
724 * rhltable_insert_key - insert object into hash list table
725 * @hlt: hash list table
726 * @key: the pointer to the key
727 * @list: pointer to hash list head inside object
728 * @params: hash table parameters
730 * Will take a per bucket spinlock to protect against mutual mutations
731 * on the same bucket. Multiple insertions may occur in parallel unless
732 * they map to the same bucket lock.
734 * It is safe to call this function from atomic context.
736 * Will trigger an automatic deferred table resizing if residency in the
737 * table grows beyond 70%.
739 static inline int rhltable_insert_key(
740 struct rhltable *hlt, const void *key, struct rhlist_head *list,
741 const struct rhashtable_params params)
743 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
748 * rhltable_insert - insert object into hash list table
749 * @hlt: hash list table
750 * @list: pointer to hash list head inside object
751 * @params: hash table parameters
753 * Will take a per bucket spinlock to protect against mutual mutations
754 * on the same bucket. Multiple insertions may occur in parallel unless
755 * they map to the same bucket lock.
757 * It is safe to call this function from atomic context.
759 * Will trigger an automatic deferred table resizing if residency in the
760 * table grows beyond 70%.
762 static inline int rhltable_insert(
763 struct rhltable *hlt, struct rhlist_head *list,
764 const struct rhashtable_params params)
766 const char *key = rht_obj(&hlt->ht, &list->rhead);
768 key += params.key_offset;
770 return rhltable_insert_key(hlt, key, list, params);
774 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
776 * @obj: pointer to hash head inside object
777 * @params: hash table parameters
779 * Locks down the bucket chain in both the old and new table if a resize
780 * is in progress to ensure that writers can't remove from the old table
781 * and can't insert to the new table during the atomic operation of search
782 * and insertion. Searches for duplicates in both the old and new table if
783 * a resize is in progress.
785 * This lookup function may only be used for fixed key hash table (key_len
786 * parameter set). It will BUG() if used inappropriately.
788 * It is safe to call this function from atomic context.
790 * Will trigger an automatic deferred table resizing if residency in the
791 * table grows beyond 70%.
793 static inline int rhashtable_lookup_insert_fast(
794 struct rhashtable *ht, struct rhash_head *obj,
795 const struct rhashtable_params params)
797 const char *key = rht_obj(ht, obj);
800 BUG_ON(ht->p.obj_hashfn);
802 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
807 return ret == NULL ? 0 : -EEXIST;
811 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
813 * @obj: pointer to hash head inside object
814 * @params: hash table parameters
816 * Just like rhashtable_lookup_insert_fast(), but this function returns the
817 * object if it exists, NULL if it did not and the insertion was successful,
818 * and an ERR_PTR otherwise.
820 static inline void *rhashtable_lookup_get_insert_fast(
821 struct rhashtable *ht, struct rhash_head *obj,
822 const struct rhashtable_params params)
824 const char *key = rht_obj(ht, obj);
826 BUG_ON(ht->p.obj_hashfn);
828 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
833 * rhashtable_lookup_insert_key - search and insert object to hash table
837 * @obj: pointer to hash head inside object
838 * @params: hash table parameters
840 * Locks down the bucket chain in both the old and new table if a resize
841 * is in progress to ensure that writers can't remove from the old table
842 * and can't insert to the new table during the atomic operation of search
843 * and insertion. Searches for duplicates in both the old and new table if
844 * a resize is in progress.
846 * Lookups may occur in parallel with hashtable mutations and resizing.
848 * Will trigger an automatic deferred table resizing if residency in the
849 * table grows beyond 70%.
851 * Returns zero on success.
853 static inline int rhashtable_lookup_insert_key(
854 struct rhashtable *ht, const void *key, struct rhash_head *obj,
855 const struct rhashtable_params params)
859 BUG_ON(!ht->p.obj_hashfn || !key);
861 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
865 return ret == NULL ? 0 : -EEXIST;
869 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
871 * @obj: pointer to hash head inside object
872 * @params: hash table parameters
873 * @data: pointer to element data already in hashes
875 * Just like rhashtable_lookup_insert_key(), but this function returns the
876 * object if it exists, NULL if it does not and the insertion was successful,
877 * and an ERR_PTR otherwise.
879 static inline void *rhashtable_lookup_get_insert_key(
880 struct rhashtable *ht, const void *key, struct rhash_head *obj,
881 const struct rhashtable_params params)
883 BUG_ON(!ht->p.obj_hashfn || !key);
885 return __rhashtable_insert_fast(ht, key, obj, params, false);
888 /* Internal function, please use rhashtable_remove_fast() instead */
889 static inline int __rhashtable_remove_fast_one(
890 struct rhashtable *ht, struct bucket_table *tbl,
891 struct rhash_head *obj, const struct rhashtable_params params,
894 struct rhash_head __rcu **pprev;
895 struct rhash_head *he;
900 hash = rht_head_hashfn(ht, tbl, obj, params);
901 lock = rht_bucket_lock(tbl, hash);
905 pprev = rht_bucket_var(tbl, hash);
906 rht_for_each_continue(he, *pprev, tbl, hash) {
907 struct rhlist_head *list;
909 list = container_of(he, struct rhlist_head, rhead);
912 struct rhlist_head __rcu **lpprev;
920 lpprev = &list->next;
921 list = rht_dereference_bucket(list->next,
923 } while (list && obj != &list->rhead);
928 list = rht_dereference_bucket(list->next, tbl, hash);
929 RCU_INIT_POINTER(*lpprev, list);
934 obj = rht_dereference_bucket(obj->next, tbl, hash);
938 list = rht_dereference_bucket(list->next, tbl, hash);
940 RCU_INIT_POINTER(list->rhead.next, obj);
946 rcu_assign_pointer(*pprev, obj);
950 spin_unlock_bh(lock);
953 atomic_dec(&ht->nelems);
954 if (unlikely(ht->p.automatic_shrinking &&
955 rht_shrink_below_30(ht, tbl)))
956 schedule_work(&ht->run_work);
963 /* Internal function, please use rhashtable_remove_fast() instead */
964 static inline int __rhashtable_remove_fast(
965 struct rhashtable *ht, struct rhash_head *obj,
966 const struct rhashtable_params params, bool rhlist)
968 struct bucket_table *tbl;
973 tbl = rht_dereference_rcu(ht->tbl, ht);
975 /* Because we have already taken (and released) the bucket
976 * lock in old_tbl, if we find that future_tbl is not yet
977 * visible then that guarantees the entry to still be in
978 * the old tbl if it exists.
980 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
982 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
991 * rhashtable_remove_fast - remove object from hash table
993 * @obj: pointer to hash head inside object
994 * @params: hash table parameters
996 * Since the hash chain is single linked, the removal operation needs to
997 * walk the bucket chain upon removal. The removal operation is thus
998 * considerable slow if the hash table is not correctly sized.
1000 * Will automatically shrink the table if permitted when residency drops
1003 * Returns zero on success, -ENOENT if the entry could not be found.
1005 static inline int rhashtable_remove_fast(
1006 struct rhashtable *ht, struct rhash_head *obj,
1007 const struct rhashtable_params params)
1009 return __rhashtable_remove_fast(ht, obj, params, false);
1013 * rhltable_remove - remove object from hash list table
1014 * @hlt: hash list table
1015 * @list: pointer to hash list head inside object
1016 * @params: hash table parameters
1018 * Since the hash chain is single linked, the removal operation needs to
1019 * walk the bucket chain upon removal. The removal operation is thus
1020 * considerable slow if the hash table is not correctly sized.
1022 * Will automatically shrink the table if permitted when residency drops
1025 * Returns zero on success, -ENOENT if the entry could not be found.
1027 static inline int rhltable_remove(
1028 struct rhltable *hlt, struct rhlist_head *list,
1029 const struct rhashtable_params params)
1031 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1034 /* Internal function, please use rhashtable_replace_fast() instead */
1035 static inline int __rhashtable_replace_fast(
1036 struct rhashtable *ht, struct bucket_table *tbl,
1037 struct rhash_head *obj_old, struct rhash_head *obj_new,
1038 const struct rhashtable_params params)
1040 struct rhash_head __rcu **pprev;
1041 struct rhash_head *he;
1046 /* Minimally, the old and new objects must have same hash
1047 * (which should mean identifiers are the same).
1049 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1050 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1053 lock = rht_bucket_lock(tbl, hash);
1057 pprev = rht_bucket_var(tbl, hash);
1058 rht_for_each_continue(he, *pprev, tbl, hash) {
1059 if (he != obj_old) {
1064 rcu_assign_pointer(obj_new->next, obj_old->next);
1065 rcu_assign_pointer(*pprev, obj_new);
1070 spin_unlock_bh(lock);
1076 * rhashtable_replace_fast - replace an object in hash table
1078 * @obj_old: pointer to hash head inside object being replaced
1079 * @obj_new: pointer to hash head inside object which is new
1080 * @params: hash table parameters
1082 * Replacing an object doesn't affect the number of elements in the hash table
1083 * or bucket, so we don't need to worry about shrinking or expanding the
1086 * Returns zero on success, -ENOENT if the entry could not be found,
1087 * -EINVAL if hash is not the same for the old and new objects.
1089 static inline int rhashtable_replace_fast(
1090 struct rhashtable *ht, struct rhash_head *obj_old,
1091 struct rhash_head *obj_new,
1092 const struct rhashtable_params params)
1094 struct bucket_table *tbl;
1099 tbl = rht_dereference_rcu(ht->tbl, ht);
1101 /* Because we have already taken (and released) the bucket
1102 * lock in old_tbl, if we find that future_tbl is not yet
1103 * visible then that guarantees the entry to still be in
1104 * the old tbl if it exists.
1106 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1107 obj_new, params)) &&
1108 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1117 * rhltable_walk_enter - Initialise an iterator
1118 * @hlt: Table to walk over
1119 * @iter: Hash table Iterator
1121 * This function prepares a hash table walk.
1123 * Note that if you restart a walk after rhashtable_walk_stop you
1124 * may see the same object twice. Also, you may miss objects if
1125 * there are removals in between rhashtable_walk_stop and the next
1126 * call to rhashtable_walk_start.
1128 * For a completely stable walk you should construct your own data
1129 * structure outside the hash table.
1131 * This function may be called from any process context, including
1132 * non-preemptable context, but cannot be called from softirq or
1135 * You must call rhashtable_walk_exit after this function returns.
1137 static inline void rhltable_walk_enter(struct rhltable *hlt,
1138 struct rhashtable_iter *iter)
1140 return rhashtable_walk_enter(&hlt->ht, iter);
1144 * rhltable_free_and_destroy - free elements and destroy hash list table
1145 * @hlt: the hash list table to destroy
1146 * @free_fn: callback to release resources of element
1147 * @arg: pointer passed to free_fn
1149 * See documentation for rhashtable_free_and_destroy.
1151 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1152 void (*free_fn)(void *ptr,
1156 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1159 static inline void rhltable_destroy(struct rhltable *hlt)
1161 return rhltable_free_and_destroy(hlt, NULL, NULL);
1164 #endif /* _LINUX_RHASHTABLE_H */