1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Resizable, Scalable, Concurrent Hash Table
5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/rculist.h>
28 #include <linux/bit_spinlock.h>
30 #define BIT(nr) (1UL << (nr))
32 #include <linux/rhashtable-types.h>
34 * Objects in an rhashtable have an embedded struct rhash_head
35 * which is linked into as hash chain from the hash table - or one
36 * of two or more hash tables when the rhashtable is being resized.
37 * The end of the chain is marked with a special nulls marks which has
38 * the least significant bit set but otherwise stores the address of
39 * the hash bucket. This allows us to be sure we've found the end
41 * The value stored in the hash bucket has BIT(0) used as a lock bit.
42 * This bit must be atomically set before any changes are made to
43 * the chain. To avoid dereferencing this pointer without clearing
44 * the bit first, we use an opaque 'struct rhash_lock_head *' for the
45 * pointer stored in the bucket. This struct needs to be defined so
46 * that rcu_dereference() works on it, but it has no content so a
47 * cast is needed for it to be useful. This ensures it isn't
48 * used by mistake with clearing the lock bit first.
50 struct rhash_lock_head {};
52 /* Maximum chain length before rehash
54 * The maximum (not average) chain length grows with the size of the hash
55 * table, at a rate of (log N)/(log log N).
57 * The value of 16 is selected so that even if the hash table grew to
58 * 2^32 you would not expect the maximum chain length to exceed it
59 * unless we are under attack (or extremely unlucky).
61 * As this limit is only to detect attacks, we don't need to set it to a
62 * lower value as you'd need the chain length to vastly exceed 16 to have
63 * any real effect on the system.
65 #define RHT_ELASTICITY 16u
68 * struct bucket_table - Table of hash buckets
69 * @size: Number of hash buckets
70 * @nest: Number of bits of first-level nested table.
71 * @rehash: Current bucket being rehashed
72 * @hash_rnd: Random seed to fold into hash
73 * @walkers: List of active walkers
74 * @rcu: RCU structure for freeing the table
75 * @future_tbl: Table under construction during rehashing
76 * @ntbl: Nested table used when out of memory.
77 * @buckets: size * hash buckets
83 struct list_head walkers;
86 struct bucket_table __rcu *future_tbl;
88 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
92 * NULLS_MARKER() expects a hash value with the low
93 * bits mostly likely to be significant, and it discards
95 * We give it an address, in which the bottom bit is
96 * always 0, and the msb might be significant.
97 * So we shift the address down one bit to align with
98 * expectations and avoid losing a significant bit.
100 * We never store the NULLS_MARKER in the hash table
101 * itself as we need the lsb for locking.
102 * Instead we store a NULL
104 #define RHT_NULLS_MARKER(ptr) \
105 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
106 #define INIT_RHT_NULLS_HEAD(ptr) \
109 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
111 return ((unsigned long) ptr & 1);
114 static inline void *rht_obj(const struct rhashtable *ht,
115 const struct rhash_head *he)
117 return (char *)he - ht->p.head_offset;
120 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
123 return hash & (tbl->size - 1);
126 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
127 const void *key, const struct rhashtable_params params,
128 unsigned int hash_rnd)
132 /* params must be equal to ht->p if it isn't constant. */
133 if (!__builtin_constant_p(params.key_len))
134 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
135 else if (params.key_len) {
136 unsigned int key_len = params.key_len;
139 hash = params.hashfn(key, key_len, hash_rnd);
140 else if (key_len & (sizeof(u32) - 1))
141 hash = jhash(key, key_len, hash_rnd);
143 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
145 unsigned int key_len = ht->p.key_len;
148 hash = params.hashfn(key, key_len, hash_rnd);
150 hash = jhash(key, key_len, hash_rnd);
156 static inline unsigned int rht_key_hashfn(
157 struct rhashtable *ht, const struct bucket_table *tbl,
158 const void *key, const struct rhashtable_params params)
160 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
162 return rht_bucket_index(tbl, hash);
165 static inline unsigned int rht_head_hashfn(
166 struct rhashtable *ht, const struct bucket_table *tbl,
167 const struct rhash_head *he, const struct rhashtable_params params)
169 const char *ptr = rht_obj(ht, he);
171 return likely(params.obj_hashfn) ?
172 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
175 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
179 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
181 * @tbl: current table
183 static inline bool rht_grow_above_75(const struct rhashtable *ht,
184 const struct bucket_table *tbl)
186 /* Expand table when exceeding 75% load */
187 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
188 (!ht->p.max_size || tbl->size < ht->p.max_size);
192 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
194 * @tbl: current table
196 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
197 const struct bucket_table *tbl)
199 /* Shrink table beneath 30% load */
200 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
201 tbl->size > ht->p.min_size;
205 * rht_grow_above_100 - returns true if nelems > table-size
207 * @tbl: current table
209 static inline bool rht_grow_above_100(const struct rhashtable *ht,
210 const struct bucket_table *tbl)
212 return atomic_read(&ht->nelems) > tbl->size &&
213 (!ht->p.max_size || tbl->size < ht->p.max_size);
217 * rht_grow_above_max - returns true if table is above maximum
219 * @tbl: current table
221 static inline bool rht_grow_above_max(const struct rhashtable *ht,
222 const struct bucket_table *tbl)
224 return atomic_read(&ht->nelems) >= ht->max_elems;
227 #ifdef CONFIG_PROVE_LOCKING
228 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
229 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
231 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
236 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
241 #endif /* CONFIG_PROVE_LOCKING */
243 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
244 struct rhash_head *obj);
246 void rhashtable_walk_enter(struct rhashtable *ht,
247 struct rhashtable_iter *iter);
248 void rhashtable_walk_exit(struct rhashtable_iter *iter);
249 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
251 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
253 (void)rhashtable_walk_start_check(iter);
256 void *rhashtable_walk_next(struct rhashtable_iter *iter);
257 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
258 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
260 void rhashtable_free_and_destroy(struct rhashtable *ht,
261 void (*free_fn)(void *ptr, void *arg),
263 void rhashtable_destroy(struct rhashtable *ht);
265 struct rhash_lock_head __rcu **rht_bucket_nested(
266 const struct bucket_table *tbl, unsigned int hash);
267 struct rhash_lock_head __rcu **__rht_bucket_nested(
268 const struct bucket_table *tbl, unsigned int hash);
269 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
270 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
272 #define rht_dereference(p, ht) \
275 #define rht_dereference_rcu(p, ht) \
278 #define rht_dereference_bucket(p, tbl, hash) \
281 #define rht_dereference_bucket_rcu(p, tbl, hash) \
284 #define rht_entry(tpos, pos, member) \
285 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
287 static inline struct rhash_lock_head __rcu *const *rht_bucket(
288 const struct bucket_table *tbl, unsigned int hash)
290 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
294 static inline struct rhash_lock_head __rcu **rht_bucket_var(
295 struct bucket_table *tbl, unsigned int hash)
297 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
301 static inline struct rhash_lock_head __rcu **rht_bucket_insert(
302 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
304 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
309 * We lock a bucket by setting BIT(0) in the pointer - this is always
310 * zero in real pointers. The NULLS mark is never stored in the bucket,
311 * rather we store NULL if the bucket is empty.
312 * bit_spin_locks do not handle contention well, but the whole point
313 * of the hashtable design is to achieve minimum per-bucket contention.
314 * A nested hash table might not have a bucket pointer. In that case
315 * we cannot get a lock. For remove and replace the bucket cannot be
316 * interesting and doesn't need locking.
317 * For insert we allocate the bucket if this is the last bucket_table,
318 * and then take the lock.
319 * Sometimes we unlock a bucket by writing a new pointer there. In that
320 * case we don't need to unlock, but we do need to reset state such as
321 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
322 * provides the same release semantics that bit_spin_unlock() provides,
324 * When we write to a bucket without unlocking, we use rht_assign_locked().
327 static inline void rht_lock(struct bucket_table *tbl,
328 struct rhash_lock_head __rcu **bkt)
330 bit_spin_lock(0, (unsigned long *)bkt);
333 static inline void rht_lock_nested(struct bucket_table *tbl,
334 struct rhash_lock_head __rcu **bucket,
335 unsigned int subclass)
337 bit_spin_lock(0, (unsigned long *)bucket);
340 static inline void rht_unlock(struct bucket_table *tbl,
341 struct rhash_lock_head __rcu **bkt)
343 bit_spin_unlock(0, (unsigned long *)bkt);
346 static inline struct rhash_head *__rht_ptr(
347 struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
349 return (struct rhash_head *)
350 ((unsigned long)p & ~BIT(0) ?:
351 (unsigned long)RHT_NULLS_MARKER(bkt));
355 * Where 'bkt' is a bucket and might be locked:
356 * rht_ptr_rcu() dereferences that pointer and clears the lock bit.
357 * rht_ptr() dereferences in a context where the bucket is locked.
358 * rht_ptr_exclusive() dereferences in a context where exclusive
359 * access is guaranteed, such as when destroying the table.
361 static inline struct rhash_head *rht_ptr_rcu(
362 struct rhash_lock_head __rcu *const *bkt)
364 return __rht_ptr(rcu_dereference(*bkt), bkt);
367 static inline struct rhash_head *rht_ptr(
368 struct rhash_lock_head __rcu *const *bkt,
369 struct bucket_table *tbl,
372 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
375 static inline struct rhash_head *rht_ptr_exclusive(
376 struct rhash_lock_head __rcu *const *bkt)
378 return __rht_ptr(rcu_dereference(*bkt), bkt);
381 static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
382 struct rhash_head *obj)
384 if (rht_is_a_nulls(obj))
386 rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
389 static inline void rht_assign_unlock(struct bucket_table *tbl,
390 struct rhash_lock_head __rcu **bkt,
391 struct rhash_head *obj)
393 if (rht_is_a_nulls(obj))
395 rcu_assign_pointer(*bkt, (void *)obj);
401 * rht_for_each_from - iterate over hash chain from given head
402 * @pos: the &struct rhash_head to use as a loop cursor.
403 * @head: the &struct rhash_head to start from
404 * @tbl: the &struct bucket_table
405 * @hash: the hash value / bucket index
407 #define rht_for_each_from(pos, head, tbl, hash) \
409 !rht_is_a_nulls(pos); \
410 pos = rht_dereference_bucket((pos)->next, tbl, hash))
413 * rht_for_each - iterate over hash chain
414 * @pos: the &struct rhash_head to use as a loop cursor.
415 * @tbl: the &struct bucket_table
416 * @hash: the hash value / bucket index
418 #define rht_for_each(pos, tbl, hash) \
419 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
423 * rht_for_each_entry_from - iterate over hash chain from given head
424 * @tpos: the type * to use as a loop cursor.
425 * @pos: the &struct rhash_head to use as a loop cursor.
426 * @head: the &struct rhash_head to start from
427 * @tbl: the &struct bucket_table
428 * @hash: the hash value / bucket index
429 * @member: name of the &struct rhash_head within the hashable struct.
431 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
433 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
434 pos = rht_dereference_bucket((pos)->next, tbl, hash))
437 * rht_for_each_entry - iterate over hash chain of given type
438 * @tpos: the type * to use as a loop cursor.
439 * @pos: the &struct rhash_head to use as a loop cursor.
440 * @tbl: the &struct bucket_table
441 * @hash: the hash value / bucket index
442 * @member: name of the &struct rhash_head within the hashable struct.
444 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
445 rht_for_each_entry_from(tpos, pos, \
446 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
450 * rht_for_each_entry_safe - safely iterate over hash chain of given type
451 * @tpos: the type * to use as a loop cursor.
452 * @pos: the &struct rhash_head to use as a loop cursor.
453 * @next: the &struct rhash_head to use as next in loop cursor.
454 * @tbl: the &struct bucket_table
455 * @hash: the hash value / bucket index
456 * @member: name of the &struct rhash_head within the hashable struct.
458 * This hash chain list-traversal primitive allows for the looped code to
459 * remove the loop cursor from the list.
461 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
462 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
463 next = !rht_is_a_nulls(pos) ? \
464 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
465 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
467 next = !rht_is_a_nulls(pos) ? \
468 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
471 * rht_for_each_rcu_from - iterate over rcu hash chain from given head
472 * @pos: the &struct rhash_head to use as a loop cursor.
473 * @head: the &struct rhash_head to start from
474 * @tbl: the &struct bucket_table
475 * @hash: the hash value / bucket index
477 * This hash chain list-traversal primitive may safely run concurrently with
478 * the _rcu mutation primitives such as rhashtable_insert() as long as the
479 * traversal is guarded by rcu_read_lock().
481 #define rht_for_each_rcu_from(pos, head, tbl, hash) \
482 for (({barrier(); }), \
484 !rht_is_a_nulls(pos); \
485 pos = rcu_dereference_raw(pos->next))
488 * rht_for_each_rcu - iterate over rcu hash chain
489 * @pos: the &struct rhash_head to use as a loop cursor.
490 * @tbl: the &struct bucket_table
491 * @hash: the hash value / bucket index
493 * This hash chain list-traversal primitive may safely run concurrently with
494 * the _rcu mutation primitives such as rhashtable_insert() as long as the
495 * traversal is guarded by rcu_read_lock().
497 #define rht_for_each_rcu(pos, tbl, hash) \
498 for (({barrier(); }), \
499 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
500 !rht_is_a_nulls(pos); \
501 pos = rcu_dereference_raw(pos->next))
504 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
505 * @tpos: the type * to use as a loop cursor.
506 * @pos: the &struct rhash_head to use as a loop cursor.
507 * @head: the &struct rhash_head to start from
508 * @tbl: the &struct bucket_table
509 * @hash: the hash value / bucket index
510 * @member: name of the &struct rhash_head within the hashable struct.
512 * This hash chain list-traversal primitive may safely run concurrently with
513 * the _rcu mutation primitives such as rhashtable_insert() as long as the
514 * traversal is guarded by rcu_read_lock().
516 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
517 for (({barrier(); }), \
519 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
520 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
523 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
524 * @tpos: the type * to use as a loop cursor.
525 * @pos: the &struct rhash_head to use as a loop cursor.
526 * @tbl: the &struct bucket_table
527 * @hash: the hash value / bucket index
528 * @member: name of the &struct rhash_head within the hashable struct.
530 * This hash chain list-traversal primitive may safely run concurrently with
531 * the _rcu mutation primitives such as rhashtable_insert() as long as the
532 * traversal is guarded by rcu_read_lock().
534 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
535 rht_for_each_entry_rcu_from(tpos, pos, \
536 rht_ptr_rcu(rht_bucket(tbl, hash)), \
540 * rhl_for_each_rcu - iterate over rcu hash table list
541 * @pos: the &struct rlist_head to use as a loop cursor.
542 * @list: the head of the list
544 * This hash chain list-traversal primitive should be used on the
545 * list returned by rhltable_lookup.
547 #define rhl_for_each_rcu(pos, list) \
548 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
551 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
552 * @tpos: the type * to use as a loop cursor.
553 * @pos: the &struct rlist_head to use as a loop cursor.
554 * @list: the head of the list
555 * @member: name of the &struct rlist_head within the hashable struct.
557 * This hash chain list-traversal primitive should be used on the
558 * list returned by rhltable_lookup.
560 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
561 for (pos = list; pos && rht_entry(tpos, pos, member); \
562 pos = rcu_dereference_raw(pos->next))
564 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
567 struct rhashtable *ht = arg->ht;
568 const char *ptr = obj;
570 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
573 /* Internal function, do not use. */
574 static inline struct rhash_head *__rhashtable_lookup(
575 struct rhashtable *ht, const void *key,
576 const struct rhashtable_params params)
578 struct rhashtable_compare_arg arg = {
582 struct rhash_lock_head __rcu *const *bkt;
583 struct bucket_table *tbl;
584 struct rhash_head *he;
587 tbl = rht_dereference_rcu(ht->tbl, ht);
589 hash = rht_key_hashfn(ht, tbl, key, params);
590 bkt = rht_bucket(tbl, hash);
592 rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
593 if (params.obj_cmpfn ?
594 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
595 rhashtable_compare(&arg, rht_obj(ht, he)))
599 /* An object might have been moved to a different hash chain,
600 * while we walk along it - better check and retry.
602 } while (he != RHT_NULLS_MARKER(bkt));
604 /* Ensure we see any new tables. */
607 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
615 * rhashtable_lookup - search hash table
617 * @key: the pointer to the key
618 * @params: hash table parameters
620 * Computes the hash value for the key and traverses the bucket chain looking
621 * for a entry with an identical key. The first matching entry is returned.
623 * This must only be called under the RCU read lock.
625 * Returns the first entry on which the compare function returned true.
627 static inline void *rhashtable_lookup(
628 struct rhashtable *ht, const void *key,
629 const struct rhashtable_params params)
631 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
633 return he ? rht_obj(ht, he) : NULL;
637 * rhashtable_lookup_fast - search hash table, without RCU read lock
639 * @key: the pointer to the key
640 * @params: hash table parameters
642 * Computes the hash value for the key and traverses the bucket chain looking
643 * for a entry with an identical key. The first matching entry is returned.
645 * Only use this function when you have other mechanisms guaranteeing
646 * that the object won't go away after the RCU read lock is released.
648 * Returns the first entry on which the compare function returned true.
650 static inline void *rhashtable_lookup_fast(
651 struct rhashtable *ht, const void *key,
652 const struct rhashtable_params params)
657 obj = rhashtable_lookup(ht, key, params);
664 * rhltable_lookup - search hash list table
666 * @key: the pointer to the key
667 * @params: hash table parameters
669 * Computes the hash value for the key and traverses the bucket chain looking
670 * for a entry with an identical key. All matching entries are returned
673 * This must only be called under the RCU read lock.
675 * Returns the list of entries that match the given key.
677 static inline struct rhlist_head *rhltable_lookup(
678 struct rhltable *hlt, const void *key,
679 const struct rhashtable_params params)
681 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
683 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
686 /* Internal function, please use rhashtable_insert_fast() instead. This
687 * function returns the existing element already in hashes in there is a clash,
688 * otherwise it returns an error via ERR_PTR().
690 static inline void *__rhashtable_insert_fast(
691 struct rhashtable *ht, const void *key, struct rhash_head *obj,
692 const struct rhashtable_params params, bool rhlist)
694 struct rhashtable_compare_arg arg = {
698 struct rhash_lock_head __rcu **bkt;
699 struct rhash_head __rcu **pprev;
700 struct bucket_table *tbl;
701 struct rhash_head *head;
708 tbl = rht_dereference_rcu(ht->tbl, ht);
709 hash = rht_head_hashfn(ht, tbl, obj, params);
710 elasticity = RHT_ELASTICITY;
711 bkt = rht_bucket_insert(ht, tbl, hash);
712 data = ERR_PTR(-ENOMEM);
718 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
720 rht_unlock(tbl, bkt);
722 return rhashtable_insert_slow(ht, key, obj);
725 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
726 struct rhlist_head *plist;
727 struct rhlist_head *list;
732 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
733 rhashtable_compare(&arg, rht_obj(ht, head)))) {
738 data = rht_obj(ht, head);
744 list = container_of(obj, struct rhlist_head, rhead);
745 plist = container_of(head, struct rhlist_head, rhead);
747 RCU_INIT_POINTER(list->next, plist);
748 head = rht_dereference_bucket(head->next, tbl, hash);
749 RCU_INIT_POINTER(list->rhead.next, head);
751 rcu_assign_pointer(*pprev, obj);
752 rht_unlock(tbl, bkt);
754 rht_assign_unlock(tbl, bkt, obj);
762 data = ERR_PTR(-E2BIG);
763 if (unlikely(rht_grow_above_max(ht, tbl)))
766 if (unlikely(rht_grow_above_100(ht, tbl)))
769 /* Inserting at head of list makes unlocking free. */
770 head = rht_ptr(bkt, tbl, hash);
772 RCU_INIT_POINTER(obj->next, head);
774 struct rhlist_head *list;
776 list = container_of(obj, struct rhlist_head, rhead);
777 RCU_INIT_POINTER(list->next, NULL);
780 atomic_inc(&ht->nelems);
781 rht_assign_unlock(tbl, bkt, obj);
783 if (rht_grow_above_75(ht, tbl))
784 schedule_work(&ht->run_work);
793 rht_unlock(tbl, bkt);
798 * rhashtable_insert_fast - insert object into hash table
800 * @obj: pointer to hash head inside object
801 * @params: hash table parameters
803 * Will take the per bucket bitlock to protect against mutual mutations
804 * on the same bucket. Multiple insertions may occur in parallel unless
805 * they map to the same bucket.
807 * It is safe to call this function from atomic context.
809 * Will trigger an automatic deferred table resizing if residency in the
810 * table grows beyond 70%.
812 static inline int rhashtable_insert_fast(
813 struct rhashtable *ht, struct rhash_head *obj,
814 const struct rhashtable_params params)
818 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
822 return ret == NULL ? 0 : -EEXIST;
826 * rhltable_insert_key - insert object into hash list table
827 * @hlt: hash list table
828 * @key: the pointer to the key
829 * @list: pointer to hash list head inside object
830 * @params: hash table parameters
832 * Will take the per bucket bitlock to protect against mutual mutations
833 * on the same bucket. Multiple insertions may occur in parallel unless
834 * they map to the same bucket.
836 * It is safe to call this function from atomic context.
838 * Will trigger an automatic deferred table resizing if residency in the
839 * table grows beyond 70%.
841 static inline int rhltable_insert_key(
842 struct rhltable *hlt, const void *key, struct rhlist_head *list,
843 const struct rhashtable_params params)
845 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
850 * rhltable_insert - insert object into hash list table
851 * @hlt: hash list table
852 * @list: pointer to hash list head inside object
853 * @params: hash table parameters
855 * Will take the per bucket bitlock to protect against mutual mutations
856 * on the same bucket. Multiple insertions may occur in parallel unless
857 * they map to the same bucket.
859 * It is safe to call this function from atomic context.
861 * Will trigger an automatic deferred table resizing if residency in the
862 * table grows beyond 70%.
864 static inline int rhltable_insert(
865 struct rhltable *hlt, struct rhlist_head *list,
866 const struct rhashtable_params params)
868 const char *key = rht_obj(&hlt->ht, &list->rhead);
870 key += params.key_offset;
872 return rhltable_insert_key(hlt, key, list, params);
876 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
878 * @obj: pointer to hash head inside object
879 * @params: hash table parameters
881 * This lookup function may only be used for fixed key hash table (key_len
882 * parameter set). It will BUG() if used inappropriately.
884 * It is safe to call this function from atomic context.
886 * Will trigger an automatic deferred table resizing if residency in the
887 * table grows beyond 70%.
889 static inline int rhashtable_lookup_insert_fast(
890 struct rhashtable *ht, struct rhash_head *obj,
891 const struct rhashtable_params params)
893 const char *key = rht_obj(ht, obj);
896 BUG_ON(ht->p.obj_hashfn);
898 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
903 return ret == NULL ? 0 : -EEXIST;
907 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
909 * @obj: pointer to hash head inside object
910 * @params: hash table parameters
912 * Just like rhashtable_lookup_insert_fast(), but this function returns the
913 * object if it exists, NULL if it did not and the insertion was successful,
914 * and an ERR_PTR otherwise.
916 static inline void *rhashtable_lookup_get_insert_fast(
917 struct rhashtable *ht, struct rhash_head *obj,
918 const struct rhashtable_params params)
920 const char *key = rht_obj(ht, obj);
922 BUG_ON(ht->p.obj_hashfn);
924 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
929 * rhashtable_lookup_insert_key - search and insert object to hash table
933 * @obj: pointer to hash head inside object
934 * @params: hash table parameters
936 * Lookups may occur in parallel with hashtable mutations and resizing.
938 * Will trigger an automatic deferred table resizing if residency in the
939 * table grows beyond 70%.
941 * Returns zero on success.
943 static inline int rhashtable_lookup_insert_key(
944 struct rhashtable *ht, const void *key, struct rhash_head *obj,
945 const struct rhashtable_params params)
949 BUG_ON(!ht->p.obj_hashfn || !key);
951 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
955 return ret == NULL ? 0 : -EEXIST;
959 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
962 * @obj: pointer to hash head inside object
963 * @params: hash table parameters
965 * Just like rhashtable_lookup_insert_key(), but this function returns the
966 * object if it exists, NULL if it does not and the insertion was successful,
967 * and an ERR_PTR otherwise.
969 static inline void *rhashtable_lookup_get_insert_key(
970 struct rhashtable *ht, const void *key, struct rhash_head *obj,
971 const struct rhashtable_params params)
973 BUG_ON(!ht->p.obj_hashfn || !key);
975 return __rhashtable_insert_fast(ht, key, obj, params, false);
978 /* Internal function, please use rhashtable_remove_fast() instead */
979 static inline int __rhashtable_remove_fast_one(
980 struct rhashtable *ht, struct bucket_table *tbl,
981 struct rhash_head *obj, const struct rhashtable_params params,
984 struct rhash_lock_head __rcu **bkt;
985 struct rhash_head __rcu **pprev;
986 struct rhash_head *he;
990 hash = rht_head_hashfn(ht, tbl, obj, params);
991 bkt = rht_bucket_var(tbl, hash);
997 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
998 struct rhlist_head *list;
1000 list = container_of(he, struct rhlist_head, rhead);
1003 struct rhlist_head __rcu **lpprev;
1011 lpprev = &list->next;
1012 list = rht_dereference_bucket(list->next,
1014 } while (list && obj != &list->rhead);
1019 list = rht_dereference_bucket(list->next, tbl, hash);
1020 RCU_INIT_POINTER(*lpprev, list);
1025 obj = rht_dereference_bucket(obj->next, tbl, hash);
1029 list = rht_dereference_bucket(list->next, tbl, hash);
1031 RCU_INIT_POINTER(list->rhead.next, obj);
1038 rcu_assign_pointer(*pprev, obj);
1039 rht_unlock(tbl, bkt);
1041 rht_assign_unlock(tbl, bkt, obj);
1046 rht_unlock(tbl, bkt);
1049 atomic_dec(&ht->nelems);
1050 if (unlikely(ht->p.automatic_shrinking &&
1051 rht_shrink_below_30(ht, tbl)))
1052 schedule_work(&ht->run_work);
1059 /* Internal function, please use rhashtable_remove_fast() instead */
1060 static inline int __rhashtable_remove_fast(
1061 struct rhashtable *ht, struct rhash_head *obj,
1062 const struct rhashtable_params params, bool rhlist)
1064 struct bucket_table *tbl;
1069 tbl = rht_dereference_rcu(ht->tbl, ht);
1071 /* Because we have already taken (and released) the bucket
1072 * lock in old_tbl, if we find that future_tbl is not yet
1073 * visible then that guarantees the entry to still be in
1074 * the old tbl if it exists.
1076 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1078 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1087 * rhashtable_remove_fast - remove object from hash table
1089 * @obj: pointer to hash head inside object
1090 * @params: hash table parameters
1092 * Since the hash chain is single linked, the removal operation needs to
1093 * walk the bucket chain upon removal. The removal operation is thus
1094 * considerable slow if the hash table is not correctly sized.
1096 * Will automatically shrink the table if permitted when residency drops
1099 * Returns zero on success, -ENOENT if the entry could not be found.
1101 static inline int rhashtable_remove_fast(
1102 struct rhashtable *ht, struct rhash_head *obj,
1103 const struct rhashtable_params params)
1105 return __rhashtable_remove_fast(ht, obj, params, false);
1109 * rhltable_remove - remove object from hash list table
1110 * @hlt: hash list table
1111 * @list: pointer to hash list head inside object
1112 * @params: hash table parameters
1114 * Since the hash chain is single linked, the removal operation needs to
1115 * walk the bucket chain upon removal. The removal operation is thus
1116 * considerable slow if the hash table is not correctly sized.
1118 * Will automatically shrink the table if permitted when residency drops
1121 * Returns zero on success, -ENOENT if the entry could not be found.
1123 static inline int rhltable_remove(
1124 struct rhltable *hlt, struct rhlist_head *list,
1125 const struct rhashtable_params params)
1127 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1130 /* Internal function, please use rhashtable_replace_fast() instead */
1131 static inline int __rhashtable_replace_fast(
1132 struct rhashtable *ht, struct bucket_table *tbl,
1133 struct rhash_head *obj_old, struct rhash_head *obj_new,
1134 const struct rhashtable_params params)
1136 struct rhash_lock_head __rcu **bkt;
1137 struct rhash_head __rcu **pprev;
1138 struct rhash_head *he;
1142 /* Minimally, the old and new objects must have same hash
1143 * (which should mean identifiers are the same).
1145 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1146 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1149 bkt = rht_bucket_var(tbl, hash);
1156 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1157 if (he != obj_old) {
1162 rcu_assign_pointer(obj_new->next, obj_old->next);
1164 rcu_assign_pointer(*pprev, obj_new);
1165 rht_unlock(tbl, bkt);
1167 rht_assign_unlock(tbl, bkt, obj_new);
1173 rht_unlock(tbl, bkt);
1180 * rhashtable_replace_fast - replace an object in hash table
1182 * @obj_old: pointer to hash head inside object being replaced
1183 * @obj_new: pointer to hash head inside object which is new
1184 * @params: hash table parameters
1186 * Replacing an object doesn't affect the number of elements in the hash table
1187 * or bucket, so we don't need to worry about shrinking or expanding the
1190 * Returns zero on success, -ENOENT if the entry could not be found,
1191 * -EINVAL if hash is not the same for the old and new objects.
1193 static inline int rhashtable_replace_fast(
1194 struct rhashtable *ht, struct rhash_head *obj_old,
1195 struct rhash_head *obj_new,
1196 const struct rhashtable_params params)
1198 struct bucket_table *tbl;
1203 tbl = rht_dereference_rcu(ht->tbl, ht);
1205 /* Because we have already taken (and released) the bucket
1206 * lock in old_tbl, if we find that future_tbl is not yet
1207 * visible then that guarantees the entry to still be in
1208 * the old tbl if it exists.
1210 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1211 obj_new, params)) &&
1212 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1221 * rhltable_walk_enter - Initialise an iterator
1222 * @hlt: Table to walk over
1223 * @iter: Hash table Iterator
1225 * This function prepares a hash table walk.
1227 * Note that if you restart a walk after rhashtable_walk_stop you
1228 * may see the same object twice. Also, you may miss objects if
1229 * there are removals in between rhashtable_walk_stop and the next
1230 * call to rhashtable_walk_start.
1232 * For a completely stable walk you should construct your own data
1233 * structure outside the hash table.
1235 * This function may be called from any process context, including
1236 * non-preemptable context, but cannot be called from softirq or
1239 * You must call rhashtable_walk_exit after this function returns.
1241 static inline void rhltable_walk_enter(struct rhltable *hlt,
1242 struct rhashtable_iter *iter)
1244 return rhashtable_walk_enter(&hlt->ht, iter);
1248 * rhltable_free_and_destroy - free elements and destroy hash list table
1249 * @hlt: the hash list table to destroy
1250 * @free_fn: callback to release resources of element
1251 * @arg: pointer passed to free_fn
1253 * See documentation for rhashtable_free_and_destroy.
1255 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1256 void (*free_fn)(void *ptr,
1260 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1263 static inline void rhltable_destroy(struct rhltable *hlt)
1265 return rhltable_free_and_destroy(hlt, NULL, NULL);
1268 #endif /* _LINUX_RHASHTABLE_H */