2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/atomic.h>
21 #include <linux/cache.h>
22 #include <linux/compiler.h>
23 #include <linux/cpumask.h>
24 #include <linux/err.h>
25 #include <linux/errno.h>
26 #include <linux/jhash.h>
27 #include <linux/list_nulls.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/spinlock.h>
31 #include <linux/rcupdate.h>
33 #define RHT_BASE_BITS 4
34 #define RHT_HASH_BITS 27
35 #define RHT_BASE_SHIFT RHT_HASH_BITS
36 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
39 struct rhash_head __rcu *next;
46 unsigned int locks_mask;
48 struct list_head walkers;
51 struct bucket_table __rcu *future_tbl;
53 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
56 struct rhashtable_compare_arg {
57 struct rhashtable *ht;
61 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
62 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
63 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
66 struct rhashtable_params {
71 unsigned int insecure_max_entries;
72 unsigned int max_size;
73 unsigned int min_size;
75 bool insecure_elasticity;
76 bool automatic_shrinking;
79 rht_obj_hashfn_t obj_hashfn;
80 rht_obj_cmpfn_t obj_cmpfn;
84 struct bucket_table __rcu *tbl;
87 unsigned int elasticity;
88 struct rhashtable_params p;
89 struct work_struct run_work;
94 struct rhashtable_walker {
95 struct list_head list;
96 struct bucket_table *tbl;
99 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
101 return NULLS_MARKER(ht->p.nulls_base + hash);
104 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
105 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
107 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
109 return ((unsigned long) ptr & 1);
112 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
114 return ((unsigned long) ptr) >> 1;
117 static inline void *rht_obj(const struct rhashtable *ht,
118 const struct rhash_head *he)
120 return (char *)he - ht->p.head_offset;
123 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
126 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
129 static inline unsigned int rht_key_hashfn(
130 struct rhashtable *ht, const struct bucket_table *tbl,
131 const void *key, const struct rhashtable_params params)
135 /* params must be equal to ht->p if it isn't constant. */
136 if (!__builtin_constant_p(params.key_len))
137 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
138 else if (params.key_len) {
139 unsigned int key_len = params.key_len;
142 hash = params.hashfn(key, key_len, tbl->hash_rnd);
143 else if (key_len & (sizeof(u32) - 1))
144 hash = jhash(key, key_len, tbl->hash_rnd);
146 hash = jhash2(key, key_len / sizeof(u32),
149 unsigned int key_len = ht->p.key_len;
152 hash = params.hashfn(key, key_len, tbl->hash_rnd);
154 hash = jhash(key, key_len, tbl->hash_rnd);
157 return rht_bucket_index(tbl, hash);
160 static inline unsigned int rht_head_hashfn(
161 struct rhashtable *ht, const struct bucket_table *tbl,
162 const struct rhash_head *he, const struct rhashtable_params params)
164 const char *ptr = rht_obj(ht, he);
166 return likely(params.obj_hashfn) ?
167 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
170 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
173 static inline bool rht_grow_above_75(const struct rhashtable *ht,
174 const struct bucket_table *tbl)
176 /* Expand table when exceeding 75% load */
177 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
178 (!ht->p.max_size || tbl->size < ht->p.max_size);
181 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
182 const struct bucket_table *tbl)
184 /* Shrink table beneath 30% load */
185 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
186 tbl->size > ht->p.min_size;
189 static inline bool rht_grow_above_100(const struct rhashtable *ht,
190 const struct bucket_table *tbl)
192 return atomic_read(&ht->nelems) > tbl->size &&
193 (!ht->p.max_size || tbl->size < ht->p.max_size);
196 static inline bool rht_grow_above_max(const struct rhashtable *ht,
197 const struct bucket_table *tbl)
199 return ht->p.insecure_max_entries &&
200 atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
203 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
206 return &tbl->locks[hash & tbl->locks_mask];
209 int rhashtable_insert_rehash(struct rhashtable *, struct bucket_table *);
210 struct bucket_table *rhashtable_insert_slow(struct rhashtable *,
213 struct bucket_table *);
215 int rhashtable_init(struct rhashtable *, const struct rhashtable_params *);
216 void rhashtable_destroy(struct rhashtable *);
218 #define rht_dereference(p, ht) rcu_dereference(p)
219 #define rht_dereference_rcu(p, ht) rcu_dereference(p)
220 #define rht_dereference_bucket(p, tbl, hash) rcu_dereference(p)
221 #define rht_dereference_bucket_rcu(p, tbl, hash) rcu_dereference(p)
223 #define rht_entry(tpos, pos, member) \
224 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
226 #define rht_for_each_continue(pos, head, tbl, hash) \
227 for (pos = rht_dereference_bucket(head, tbl, hash); \
228 !rht_is_a_nulls(pos); \
229 pos = rht_dereference_bucket((pos)->next, tbl, hash))
231 #define rht_for_each(pos, tbl, hash) \
232 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
234 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
235 for (({barrier(); }), \
236 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
237 !rht_is_a_nulls(pos); \
238 pos = rcu_dereference_raw(pos->next))
240 #define rht_for_each_rcu(pos, tbl, hash) \
241 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
243 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
244 for (({barrier(); }), \
245 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
246 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
247 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
249 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
250 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
253 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
256 struct rhashtable *ht = arg->ht;
257 const char *ptr = obj;
259 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
262 static inline void *rhashtable_lookup_fast(
263 struct rhashtable *ht, const void *key,
264 const struct rhashtable_params params)
266 struct rhashtable_compare_arg arg = {
270 const struct bucket_table *tbl;
271 struct rhash_head *he;
276 tbl = rht_dereference_rcu(ht->tbl, ht);
278 hash = rht_key_hashfn(ht, tbl, key, params);
279 rht_for_each_rcu(he, tbl, hash) {
280 if (params.obj_cmpfn ?
281 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
282 rhashtable_compare(&arg, rht_obj(ht, he)))
285 return rht_obj(ht, he);
288 /* Ensure we see any new tables. */
291 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
299 static inline int __rhashtable_insert_fast(
300 struct rhashtable *ht, const void *key, struct rhash_head *obj,
301 const struct rhashtable_params params)
303 struct rhashtable_compare_arg arg = {
307 struct bucket_table *tbl, *new_tbl;
308 struct rhash_head *head;
310 unsigned int elasticity;
317 tbl = rht_dereference_rcu(ht->tbl, ht);
319 /* All insertions must grab the oldest table containing
320 * the hashed bucket that is yet to be rehashed.
323 hash = rht_head_hashfn(ht, tbl, obj, params);
324 lock = rht_bucket_lock(tbl, hash);
327 if (tbl->rehash <= hash)
330 spin_unlock_bh(lock);
331 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
334 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
335 if (unlikely(new_tbl)) {
336 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
337 if (!IS_ERR_OR_NULL(tbl))
345 if (unlikely(rht_grow_above_max(ht, tbl)))
348 if (unlikely(rht_grow_above_100(ht, tbl))) {
350 spin_unlock_bh(lock);
351 err = rhashtable_insert_rehash(ht, tbl);
360 elasticity = ht->elasticity;
361 rht_for_each(head, tbl, hash) {
363 unlikely(!(params.obj_cmpfn ?
364 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
365 rhashtable_compare(&arg, rht_obj(ht, head)))))
373 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
375 RCU_INIT_POINTER(obj->next, head);
377 rcu_assign_pointer(tbl->buckets[hash], obj);
379 atomic_inc(&ht->nelems);
380 if (rht_grow_above_75(ht, tbl))
381 schedule_work(&ht->run_work);
384 spin_unlock_bh(lock);
390 static inline int rhashtable_lookup_insert_fast(
391 struct rhashtable *ht, struct rhash_head *obj,
392 const struct rhashtable_params params)
394 const char *key = rht_obj(ht, obj);
396 BUG_ON(ht->p.obj_hashfn);
398 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
402 static inline int __rhashtable_remove_fast(
403 struct rhashtable *ht, struct bucket_table *tbl,
404 struct rhash_head *obj, const struct rhashtable_params params)
406 struct rhash_head __rcu **pprev;
407 struct rhash_head *he;
412 hash = rht_head_hashfn(ht, tbl, obj, params);
413 lock = rht_bucket_lock(tbl, hash);
417 pprev = &tbl->buckets[hash];
418 rht_for_each(he, tbl, hash) {
424 rcu_assign_pointer(*pprev, obj->next);
429 spin_unlock_bh(lock);
434 static inline int rhashtable_remove_fast(
435 struct rhashtable *ht, struct rhash_head *obj,
436 const struct rhashtable_params params)
438 struct bucket_table *tbl;
443 tbl = rht_dereference_rcu(ht->tbl, ht);
445 /* Because we have already taken (and released) the bucket
446 * lock in old_tbl, if we find that future_tbl is not yet
447 * visible then that guarantees the entry to still be in
448 * the old tbl if it exists.
450 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
451 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
457 atomic_dec(&ht->nelems);
458 if (unlikely(ht->p.automatic_shrinking &&
459 rht_shrink_below_30(ht, tbl)))
460 schedule_work(&ht->run_work);
468 #endif /* _LINUX_RHASHTABLE_H */