summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '3rdParty/Unbound/src/src/util/storage')
-rw-r--r--3rdParty/Unbound/src/src/util/storage/dnstree.c282
-rw-r--r--3rdParty/Unbound/src/src/util/storage/dnstree.h192
-rw-r--r--3rdParty/Unbound/src/src/util/storage/lookup3.c1011
-rw-r--r--3rdParty/Unbound/src/src/util/storage/lookup3.h71
-rw-r--r--3rdParty/Unbound/src/src/util/storage/lruhash.c544
-rw-r--r--3rdParty/Unbound/src/src/util/storage/lruhash.h414
-rw-r--r--3rdParty/Unbound/src/src/util/storage/slabhash.c219
-rw-r--r--3rdParty/Unbound/src/src/util/storage/slabhash.h211
8 files changed, 2944 insertions, 0 deletions
diff --git a/3rdParty/Unbound/src/src/util/storage/dnstree.c b/3rdParty/Unbound/src/src/util/storage/dnstree.c
new file mode 100644
index 0000000..003e8af
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/dnstree.c
@@ -0,0 +1,282 @@
+/*
+ * util/storage/dnstree.c - support for rbtree types suitable for DNS code.
+ *
+ * Copyright (c) 2008, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains structures combining types and functions to
+ * manipulate those structures that help building DNS lookup trees.
+ */
+#include "config.h"
+#include "util/storage/dnstree.h"
+#include "util/data/dname.h"
+#include "util/net_help.h"
+
+int name_tree_compare(const void* k1, const void* k2)
+{
+ struct name_tree_node* x = (struct name_tree_node*)k1;
+ struct name_tree_node* y = (struct name_tree_node*)k2;
+ int m;
+ if(x->dclass != y->dclass) {
+ if(x->dclass < y->dclass)
+ return -1;
+ return 1;
+ }
+ return dname_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
+}
+
+int addr_tree_compare(const void* k1, const void* k2)
+{
+ struct addr_tree_node* n1 = (struct addr_tree_node*)k1;
+ struct addr_tree_node* n2 = (struct addr_tree_node*)k2;
+ int r = sockaddr_cmp_addr(&n1->addr, n1->addrlen, &n2->addr,
+ n2->addrlen);
+ if(r != 0) return r;
+ if(n1->net < n2->net)
+ return -1;
+ if(n1->net > n2->net)
+ return 1;
+ return 0;
+}
+
+void name_tree_init(rbtree_t* tree)
+{
+ rbtree_init(tree, &name_tree_compare);
+}
+
+void addr_tree_init(rbtree_t* tree)
+{
+ rbtree_init(tree, &addr_tree_compare);
+}
+
+int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
+ uint8_t* name, size_t len, int labs, uint16_t dclass)
+{
+ node->node.key = node;
+ node->name = name;
+ node->len = len;
+ node->labs = labs;
+ node->dclass = dclass;
+ node->parent = NULL;
+ return rbtree_insert(tree, &node->node) != NULL;
+}
+
+int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
+ struct sockaddr_storage* addr, socklen_t addrlen, int net)
+{
+ node->node.key = node;
+ memcpy(&node->addr, addr, addrlen);
+ node->addrlen = addrlen;
+ node->net = net;
+ node->parent = NULL;
+ return rbtree_insert(tree, &node->node) != NULL;
+}
+
+void addr_tree_init_parents(rbtree_t* tree)
+{
+ struct addr_tree_node* node, *prev = NULL, *p;
+ int m;
+ RBTREE_FOR(node, struct addr_tree_node*, tree) {
+ node->parent = NULL;
+ if(!prev || prev->addrlen != node->addrlen) {
+ prev = node;
+ continue;
+ }
+ m = addr_in_common(&prev->addr, prev->net, &node->addr,
+ node->net, node->addrlen);
+ /* sort order like: ::/0, 1::/2, 1::/4, ... 2::/2 */
+ /* find the previous, or parent-parent-parent */
+ for(p = prev; p; p = p->parent)
+ if(p->net <= m) {
+ /* ==: since prev matched m, this is closest*/
+ /* <: prev matches more, but is not a parent,
+ * this one is a (grand)parent */
+ node->parent = p;
+ break;
+ }
+ prev = node;
+ }
+}
+
+void name_tree_init_parents(rbtree_t* tree)
+{
+ struct name_tree_node* node, *prev = NULL, *p;
+ int m;
+ RBTREE_FOR(node, struct name_tree_node*, tree) {
+ node->parent = NULL;
+ if(!prev || prev->dclass != node->dclass) {
+ prev = node;
+ continue;
+ }
+ (void)dname_lab_cmp(prev->name, prev->labs, node->name,
+ node->labs, &m); /* we know prev is smaller */
+ /* sort order like: . com. bla.com. zwb.com. net. */
+ /* find the previous, or parent-parent-parent */
+ for(p = prev; p; p = p->parent)
+ if(p->labs <= m) {
+ /* ==: since prev matched m, this is closest*/
+ /* <: prev matches more, but is not a parent,
+ * this one is a (grand)parent */
+ node->parent = p;
+ break;
+ }
+ prev = node;
+ }
+}
+
+struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
+ size_t len, int labs, uint16_t dclass)
+{
+ struct name_tree_node key;
+ key.node.key = &key;
+ key.name = name;
+ key.len = len;
+ key.labs = labs;
+ key.dclass = dclass;
+ return (struct name_tree_node*)rbtree_search(tree, &key);
+}
+
+struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
+ size_t len, int labs, uint16_t dclass)
+{
+ rbnode_t* res = NULL;
+ struct name_tree_node *result;
+ struct name_tree_node key;
+ key.node.key = &key;
+ key.name = name;
+ key.len = len;
+ key.labs = labs;
+ key.dclass = dclass;
+ if(rbtree_find_less_equal(tree, &key, &res)) {
+ /* exact */
+ result = (struct name_tree_node*)res;
+ } else {
+ /* smaller element (or no element) */
+ int m;
+ result = (struct name_tree_node*)res;
+ if(!result || result->dclass != dclass)
+ return NULL;
+ /* count number of labels matched */
+ (void)dname_lab_cmp(result->name, result->labs, key.name,
+ key.labs, &m);
+ while(result) { /* go up until qname is subdomain of stub */
+ if(result->labs <= m)
+ break;
+ result = result->parent;
+ }
+ }
+ return result;
+}
+
+struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
+ struct sockaddr_storage* addr, socklen_t addrlen)
+{
+ rbnode_t* res = NULL;
+ struct addr_tree_node* result;
+ struct addr_tree_node key;
+ key.node.key = &key;
+ memcpy(&key.addr, addr, addrlen);
+ key.addrlen = addrlen;
+ key.net = (addr_is_ip6(addr, addrlen)?128:32);
+ if(rbtree_find_less_equal(tree, &key, &res)) {
+ /* exact */
+ return (struct addr_tree_node*)res;
+ } else {
+ /* smaller element (or no element) */
+ int m;
+ result = (struct addr_tree_node*)res;
+ if(!result || result->addrlen != addrlen)
+ return 0;
+ /* count number of bits matched */
+ m = addr_in_common(&result->addr, result->net, addr,
+ key.net, addrlen);
+ while(result) { /* go up until addr is inside netblock */
+ if(result->net <= m)
+ break;
+ result = result->parent;
+ }
+ }
+ return result;
+}
+
+int
+name_tree_next_root(rbtree_t* tree, uint16_t* dclass)
+{
+ struct name_tree_node key;
+ rbnode_t* n;
+ struct name_tree_node* p;
+ if(*dclass == 0) {
+ /* first root item is first item in tree */
+ n = rbtree_first(tree);
+ if(n == RBTREE_NULL)
+ return 0;
+ p = (struct name_tree_node*)n;
+ if(dname_is_root(p->name)) {
+ *dclass = p->dclass;
+ return 1;
+ }
+ /* root not first item? search for higher items */
+ *dclass = p->dclass + 1;
+ return name_tree_next_root(tree, dclass);
+ }
+ /* find class n in tree, we may get a direct hit, or if we don't
+ * this is the last item of the previous class so rbtree_next() takes
+ * us to the next root (if any) */
+ key.node.key = &key;
+ key.name = (uint8_t*)"\000";
+ key.len = 1;
+ key.labs = 0;
+ key.dclass = *dclass;
+ n = NULL;
+ if(rbtree_find_less_equal(tree, &key, &n)) {
+ /* exact */
+ return 1;
+ } else {
+ /* smaller element */
+ if(!n || n == RBTREE_NULL)
+ return 0; /* nothing found */
+ n = rbtree_next(n);
+ if(n == RBTREE_NULL)
+ return 0; /* no higher */
+ p = (struct name_tree_node*)n;
+ if(dname_is_root(p->name)) {
+ *dclass = p->dclass;
+ return 1;
+ }
+ /* not a root node, return next higher item */
+ *dclass = p->dclass+1;
+ return name_tree_next_root(tree, dclass);
+ }
+}
diff --git a/3rdParty/Unbound/src/src/util/storage/dnstree.h b/3rdParty/Unbound/src/src/util/storage/dnstree.h
new file mode 100644
index 0000000..3ecbd12
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/dnstree.h
@@ -0,0 +1,192 @@
+/*
+ * util/storage/dnstree.h - support for rbtree types suitable for DNS code.
+ *
+ * Copyright (c) 2008, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains structures combining types and functions to
+ * manipulate those structures that help building DNS lookup trees.
+ */
+
+#ifndef UTIL_STORAGE_DNSTREE_H
+#define UTIL_STORAGE_DNSTREE_H
+#include "util/rbtree.h"
+
+/**
+ * Tree of domain names. Sorted first by class then by name.
+ * This is not sorted canonically, but fast.
+ * This can be looked up to obtain a closest encloser parent name.
+ *
+ * The tree itself is a rbtree_t.
+ * This is the element node put as first entry in the client structure.
+ */
+struct name_tree_node {
+ /** rbtree node, key is this struct : dclass and name */
+ rbnode_t node;
+ /** parent in tree */
+ struct name_tree_node* parent;
+ /** name in uncompressed wireformat */
+ uint8_t* name;
+ /** length of name */
+ size_t len;
+ /** labels in name */
+ int labs;
+ /** the class of the name (host order) */
+ uint16_t dclass;
+};
+
+/**
+ * Tree of IP addresses. Sorted first by protocol, then by bits.
+ * This can be looked up to obtain the enclosing subnet.
+ *
+ * The tree itself is a rbtree_t.
+ * This is the element node put as first entry in the client structure.
+ */
+struct addr_tree_node {
+ /** rbtree node, key is this struct : proto and subnet */
+ rbnode_t node;
+ /** parent in tree */
+ struct addr_tree_node* parent;
+ /** address */
+ struct sockaddr_storage addr;
+ /** length of addr */
+ socklen_t addrlen;
+ /** netblock size */
+ int net;
+};
+
+/**
+ * Init a name tree to be empty
+ * @param tree: to init.
+ */
+void name_tree_init(rbtree_t* tree);
+
+/**
+ * insert element into name tree.
+ * @param tree: name tree
+ * @param node: node element (at start of a structure that caller
+ * has allocated).
+ * @param name: name to insert (wireformat)
+ * this node has been allocated by the caller and it itself inserted.
+ * @param len: length of name
+ * @param labs: labels in name
+ * @param dclass: class of name
+ * @return false on error (duplicate element).
+ */
+int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
+ uint8_t* name, size_t len, int labs, uint16_t dclass);
+
+/**
+ * Initialize parent pointers in name tree.
+ * Should be performed after insertions are done, before lookups
+ * @param tree: name tree
+ */
+void name_tree_init_parents(rbtree_t* tree);
+
+/**
+ * Lookup exact match in name tree
+ * @param tree: name tree
+ * @param name: wireformat name
+ * @param len: length of name
+ * @param labs: labels in name
+ * @param dclass: class of name
+ * @return node or NULL if not found.
+ */
+struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
+ size_t len, int labs, uint16_t dclass);
+
+/**
+ * Lookup closest encloser in name tree.
+ * @param tree: name tree
+ * @param name: wireformat name
+ * @param len: length of name
+ * @param labs: labels in name
+ * @param dclass: class of name
+ * @return closest enclosing node (could be equal) or NULL if not found.
+ */
+struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
+ size_t len, int labs, uint16_t dclass);
+
+/**
+ * Find next root item in name tree.
+ * @param tree: the nametree.
+ * @param dclass: the class to look for next (or higher).
+ * @return false if no classes found, true means class put into c.
+ */
+int name_tree_next_root(rbtree_t* tree, uint16_t* dclass);
+
+/**
+ * Init addr tree to be empty.
+ * @param tree: to init.
+ */
+void addr_tree_init(rbtree_t* tree);
+
+/**
+ * insert element into addr tree.
+ * @param tree: addr tree
+ * @param node: node element (at start of a structure that caller
+ * has allocated).
+ * @param addr: to insert (copied).
+ * @param addrlen: length of addr
+ * @param net: size of subnet.
+ * @return false on error (duplicate element).
+ */
+int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
+ struct sockaddr_storage* addr, socklen_t addrlen, int net);
+
+/**
+ * Initialize parent pointers in addr tree.
+ * Should be performed after insertions are done, before lookups
+ * @param tree: addr tree
+ */
+void addr_tree_init_parents(rbtree_t* tree);
+
+/**
+ * Lookup closest encloser in addr tree.
+ * @param tree: addr tree
+ * @param addr: to lookup.
+ * @param addrlen: length of addr
+ * @return closest enclosing node (could be equal) or NULL if not found.
+ */
+struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
+ struct sockaddr_storage* addr, socklen_t addrlen);
+
+/** compare name tree nodes */
+int name_tree_compare(const void* k1, const void* k2);
+
+/** compare addr tree nodes */
+int addr_tree_compare(const void* k1, const void* k2);
+
+#endif /* UTIL_STORAGE_DNSTREE_H */
diff --git a/3rdParty/Unbound/src/src/util/storage/lookup3.c b/3rdParty/Unbound/src/src/util/storage/lookup3.c
new file mode 100644
index 0000000..65e0ad2
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/lookup3.c
@@ -0,0 +1,1011 @@
+/*
+ January 2012(Wouter) added randomised initial value, fallout from 28c3.
+ March 2007(Wouter) adapted from lookup3.c original, add config.h include.
+ added #ifdef VALGRIND to remove 298,384,660 'unused variable k8' warnings.
+ added include of lookup3.h to check definitions match declarations.
+ removed include of stdint - config.h takes care of platform independence.
+ url http://burtleburtle.net/bob/hash/index.html.
+*/
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hashword(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+-------------------------------------------------------------------------------
+*/
+/*#define SELF_TEST 1*/
+
+#include "config.h"
+#include "util/storage/lookup3.h"
+#include <stdio.h> /* defines printf for tests */
+#include <time.h> /* defines time_t for timings in the test */
+/*#include <stdint.h> defines uint32_t etc (from config.h) */
+#include <sys/param.h> /* attempt to define endianness */
+#ifdef linux
+# include <endian.h> /* attempt to define endianness */
+#endif
+
+/* random initial value */
+static uint32_t raninit = 0xdeadbeef;
+
+void
+hash_set_raninit(uint32_t v)
+{
+ raninit = v;
+}
+
+/*
+ * My best guess at if you are big-endian or little-endian. This may
+ * need adjustment.
+ */
+#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
+ __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(i386) || defined(__i386__) || defined(__i486__) || \
+ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
+ __BYTE_ORDER == __BIG_ENDIAN) || \
+ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 0
+#endif
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+/*
+--------------------------------------------------------------------
+ This works on all machines. To be useful, it requires
+ -- that the key be an array of uint32_t's, and
+ -- that the length be the number of uint32_t's in the key
+
+ The function hashword() is identical to hashlittle() on little-endian
+ machines, and identical to hashbig() on big-endian machines,
+ except that the length has to be measured in uint32_ts rather than in
+ bytes. hashlittle() is more complicated than hashword() only because
+ hashlittle() has to dance around fitting the key bytes into registers.
+--------------------------------------------------------------------
+*/
+uint32_t hashword(
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = raninit + (((uint32_t)length)<<2) + initval;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ return c;
+}
+
+
+#ifdef SELF_TEST
+
+/*
+--------------------------------------------------------------------
+hashword2() -- same as hashword(), but take two seeds and return two
+32-bit values. pc and pb must both be nonnull, and *pc and *pb must
+both be initialized with seeds. If you pass in (*pb)==0, the output
+(*pc) will be the same as the return value from hashword().
+--------------------------------------------------------------------
+*/
+void hashword2 (
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t *pc, /* IN: seed OUT: primary hash value */
+uint32_t *pb) /* IN: more seed OUT: secondary hash value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = raninit + ((uint32_t)(length<<2)) + *pc;
+ c += *pb;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ *pc=c; *pb=b;
+}
+
+#endif /* SELF_TEST */
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ initval : can be any 4-byte value
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+uint32_t hashlittle( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = raninit + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+#ifdef VALGRIND
+ const uint8_t *k8;
+#endif
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticably faster for short strings (like English words).
+ */
+#ifndef VALGRIND
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+#ifdef SELF_TEST
+
+/*
+ * hashlittle2: return 2 32-bit hash values
+ *
+ * This is identical to hashlittle(), except it returns two 32-bit hash
+ * values instead of just one. This is good enough for hash table
+ * lookup with 2^^64 buckets, or if you want a second hash if you're not
+ * happy with the first, or if you want a probably-unique 64-bit ID for
+ * the key. *pc is better mixed than *pb, so use *pc first. If you want
+ * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
+ */
+void hashlittle2(
+ const void *key, /* the key to hash */
+ size_t length, /* length of the key */
+ uint32_t *pc, /* IN: primary initval, OUT: primary hash */
+ uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = raninit + ((uint32_t)length) + *pc;
+ c += *pb;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+#ifdef VALGRIND
+ const uint8_t *k8;
+#endif
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticably faster for short strings (like English words).
+ */
+#ifndef VALGRIND
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ }
+
+ final(a,b,c);
+ *pc=c; *pb=b;
+}
+
+#endif /* SELF_TEST */
+
+#if 0 /* currently not used */
+
+/*
+ * hashbig():
+ * This is the same as hashword() on big-endian machines. It is different
+ * from hashlittle() on all machines. hashbig() takes advantage of
+ * big-endian byte ordering.
+ */
+uint32_t hashbig( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c;
+ union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
+
+ /* Set up the internal state */
+ a = b = c = raninit + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+#ifdef VALGRIND
+ const uint8_t *k8;
+#endif
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]<<8" actually reads beyond the end of the string, but
+ * then shifts out the part it's not allowed to read. Because the
+ * string is aligned, the illegal read is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticably faster for short strings (like English words).
+ */
+#ifndef VALGRIND
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff00; break;
+ case 2 : a+=k[0]&0xffff0000; break;
+ case 1 : a+=k[0]&0xff000000; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
+ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
+ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
+ case 1 : a+=((uint32_t)k8[0])<<24; break;
+ case 0 : return c;
+ }
+
+#endif /* !VALGRIND */
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += ((uint32_t)k[0])<<24;
+ a += ((uint32_t)k[1])<<16;
+ a += ((uint32_t)k[2])<<8;
+ a += ((uint32_t)k[3]);
+ b += ((uint32_t)k[4])<<24;
+ b += ((uint32_t)k[5])<<16;
+ b += ((uint32_t)k[6])<<8;
+ b += ((uint32_t)k[7]);
+ c += ((uint32_t)k[8])<<24;
+ c += ((uint32_t)k[9])<<16;
+ c += ((uint32_t)k[10])<<8;
+ c += ((uint32_t)k[11]);
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[11];
+ case 11: c+=((uint32_t)k[10])<<8;
+ case 10: c+=((uint32_t)k[9])<<16;
+ case 9 : c+=((uint32_t)k[8])<<24;
+ case 8 : b+=k[7];
+ case 7 : b+=((uint32_t)k[6])<<8;
+ case 6 : b+=((uint32_t)k[5])<<16;
+ case 5 : b+=((uint32_t)k[4])<<24;
+ case 4 : a+=k[3];
+ case 3 : a+=((uint32_t)k[2])<<8;
+ case 2 : a+=((uint32_t)k[1])<<16;
+ case 1 : a+=((uint32_t)k[0])<<24;
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+#endif /* 0 == currently not used */
+
+#ifdef SELF_TEST
+
+/* used for timings */
+void driver1()
+{
+ uint8_t buf[256];
+ uint32_t i;
+ uint32_t h=0;
+ time_t a,z;
+
+ time(&a);
+ for (i=0; i<256; ++i) buf[i] = 'x';
+ for (i=0; i<1; ++i)
+ {
+ h = hashlittle(&buf[0],1,h);
+ }
+ time(&z);
+ if (z-a > 0) printf("time %d %.8x\n", z-a, h);
+}
+
+/* check that every input bit changes every output bit half the time */
+#define HASHSTATE 1
+#define HASHLEN 1
+#define MAXPAIR 60
+#define MAXLEN 70
+void driver2()
+{
+ uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1];
+ uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z;
+ uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE];
+ uint32_t x[HASHSTATE],y[HASHSTATE];
+ uint32_t hlen;
+
+ printf("No more than %d trials should ever be needed \n",MAXPAIR/2);
+ for (hlen=0; hlen < MAXLEN; ++hlen)
+ {
+ z=0;
+ for (i=0; i<hlen; ++i) /*----------------------- for each input byte, */
+ {
+ for (j=0; j<8; ++j) /*------------------------ for each input bit, */
+ {
+ for (m=1; m<8; ++m) /*------------ for serveral possible initvals, */
+ {
+ for (l=0; l<HASHSTATE; ++l)
+ e[l]=f[l]=g[l]=h[l]=x[l]=y[l]=~((uint32_t)0);
+
+ /*---- check that every output bit is affected by that input bit */
+ for (k=0; k<MAXPAIR; k+=2)
+ {
+ uint32_t finished=1;
+ /* keys have one bit different */
+ for (l=0; l<hlen+1; ++l) {a[l] = b[l] = (uint8_t)0;}
+ /* have a and b be two keys differing in only one bit */
+ a[i] ^= (k<<j);
+ a[i] ^= (k>>(8-j));
+ c[0] = hashlittle(a, hlen, m);
+ b[i] ^= ((k+1)<<j);
+ b[i] ^= ((k+1)>>(8-j));
+ d[0] = hashlittle(b, hlen, m);
+ /* check every bit is 1, 0, set, and not set at least once */
+ for (l=0; l<HASHSTATE; ++l)
+ {
+ e[l] &= (c[l]^d[l]);
+ f[l] &= ~(c[l]^d[l]);
+ g[l] &= c[l];
+ h[l] &= ~c[l];
+ x[l] &= d[l];
+ y[l] &= ~d[l];
+ if (e[l]|f[l]|g[l]|h[l]|x[l]|y[l]) finished=0;
+ }
+ if (finished) break;
+ }
+ if (k>z) z=k;
+ if (k==MAXPAIR)
+ {
+ printf("Some bit didn't change: ");
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x ",
+ e[0],f[0],g[0],h[0],x[0],y[0]);
+ printf("i %d j %d m %d len %d\n", i, j, m, hlen);
+ }
+ if (z==MAXPAIR) goto done;
+ }
+ }
+ }
+ done:
+ if (z < MAXPAIR)
+ {
+ printf("Mix success %2d bytes %2d initvals ",i,m);
+ printf("required %d trials\n", z/2);
+ }
+ }
+ printf("\n");
+}
+
+/* Check for reading beyond the end of the buffer and alignment problems */
+void driver3()
+{
+ uint8_t buf[MAXLEN+20], *b;
+ uint32_t len;
+ uint8_t q[] = "This is the time for all good men to come to the aid of their country...";
+ uint32_t h;
+ uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country...";
+ uint32_t i;
+ uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country...";
+ uint32_t j;
+ uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country...";
+ uint32_t ref,x,y;
+ uint8_t *p;
+
+ printf("Endianness. These lines should all be the same (for values filled in):\n");
+ printf("%.8x %.8x %.8x\n",
+ hashword((const uint32_t *)q, (sizeof(q)-1)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-5)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-9)/4, 13));
+ p = q;
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qq[1];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqq[2];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqqq[3];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ printf("\n");
+
+ /* check that hashlittle2 and hashlittle produce the same results */
+ i=47; j=0;
+ hashlittle2(q, sizeof(q), &i, &j);
+ if (hashlittle(q, sizeof(q), 47) != i)
+ printf("hashlittle2 and hashlittle mismatch\n");
+
+ /* check that hashword2 and hashword produce the same results */
+ len = raninit;
+ i=47, j=0;
+ hashword2(&len, 1, &i, &j);
+ if (hashword(&len, 1, 47) != i)
+ printf("hashword2 and hashword mismatch %x %x\n",
+ i, hashword(&len, 1, 47));
+
+ /* check hashlittle doesn't read before or after the ends of the string */
+ for (h=0, b=buf+1; h<8; ++h, ++b)
+ {
+ for (i=0; i<MAXLEN; ++i)
+ {
+ len = i;
+ for (j=0; j<i; ++j) *(b+j)=0;
+
+ /* these should all be equal */
+ ref = hashlittle(b, len, (uint32_t)1);
+ *(b+i)=(uint8_t)~0;
+ *(b-1)=(uint8_t)~0;
+ x = hashlittle(b, len, (uint32_t)1);
+ y = hashlittle(b, len, (uint32_t)1);
+ if ((ref != x) || (ref != y))
+ {
+ printf("alignment error: %.8x %.8x %.8x %d %d\n",ref,x,y,
+ h, i);
+ }
+ }
+ }
+}
+
+/* check for problems with nulls */
+ void driver4()
+{
+ uint8_t buf[1];
+ uint32_t h,i,state[HASHSTATE];
+
+
+ buf[0] = ~0;
+ for (i=0; i<HASHSTATE; ++i) state[i] = 1;
+ printf("These should all be different\n");
+ for (i=0, h=0; i<8; ++i)
+ {
+ h = hashlittle(buf, 0, h);
+ printf("%2ld 0-byte strings, hash is %.8x\n", i, h);
+ }
+}
+
+
+int main()
+{
+ driver1(); /* test that the key is hashed: used for timings */
+ driver2(); /* test that whole key is hashed thoroughly */
+ driver3(); /* test that nothing but the key is hashed */
+ driver4(); /* test hashing multiple buffers (all buffers are null) */
+ return 1;
+}
+
+#endif /* SELF_TEST */
diff --git a/3rdParty/Unbound/src/src/util/storage/lookup3.h b/3rdParty/Unbound/src/src/util/storage/lookup3.h
new file mode 100644
index 0000000..06211fd
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/lookup3.h
@@ -0,0 +1,71 @@
+/*
+ * util/storage/lookup3.h - header file for hashing functions.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains header definitions for the hash functions we use.
+ * The hash functions are public domain (see lookup3.c).
+ */
+
+#ifndef UTIL_STORAGE_LOOKUP3_H
+#define UTIL_STORAGE_LOOKUP3_H
+
+/**
+ * Hash key made of 4byte chunks.
+ * @param k: the key, an array of uint32_t values
+ * @param length: the length of the key, in uint32_ts
+ * @param initval: the previous hash, or an arbitrary value
+ * @return: hash value.
+ */
+uint32_t hashword(const uint32_t *k, size_t length, uint32_t initval);
+
+/**
+ * Hash key data.
+ * @param k: the key, array of uint8_t
+ * @param length: the length of the key, in uint8_ts
+ * @param initval: the previous hash, or an arbitrary value
+ * @return: hash value.
+ */
+uint32_t hashlittle(const void *k, size_t length, uint32_t initval);
+
+/**
+ * Set the randomisation initial value, set this before threads start,
+ * and before hashing stuff (because it changes subsequent results).
+ * @param v: value
+ */
+void hash_set_raninit(uint32_t v);
+
+#endif /* UTIL_STORAGE_LOOKUP3_H */
diff --git a/3rdParty/Unbound/src/src/util/storage/lruhash.c b/3rdParty/Unbound/src/src/util/storage/lruhash.c
new file mode 100644
index 0000000..c22278d
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/lruhash.c
@@ -0,0 +1,544 @@
+/*
+ * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a hashtable with LRU keeping of entries.
+ *
+ */
+
+#include "config.h"
+#include "util/storage/lruhash.h"
+#include "util/fptr_wlist.h"
+
+void
+bin_init(struct lruhash_bin* array, size_t size)
+{
+ size_t i;
+#ifdef THREADS_DISABLED
+ (void)array;
+#endif
+ for(i=0; i<size; i++) {
+ lock_quick_init(&array[i].lock);
+ lock_protect(&array[i].lock, &array[i],
+ sizeof(struct lruhash_bin));
+ }
+}
+
+struct lruhash*
+lruhash_create(size_t start_size, size_t maxmem, lruhash_sizefunc_t sizefunc,
+ lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
+ lruhash_deldatafunc_t deldatafunc, void* arg)
+{
+ struct lruhash* table = (struct lruhash*)calloc(1,
+ sizeof(struct lruhash));
+ if(!table)
+ return NULL;
+ lock_quick_init(&table->lock);
+ table->sizefunc = sizefunc;
+ table->compfunc = compfunc;
+ table->delkeyfunc = delkeyfunc;
+ table->deldatafunc = deldatafunc;
+ table->cb_arg = arg;
+ table->size = start_size;
+ table->size_mask = (int)(start_size-1);
+ table->lru_start = NULL;
+ table->lru_end = NULL;
+ table->num = 0;
+ table->space_used = 0;
+ table->space_max = maxmem;
+ table->array = calloc(table->size, sizeof(struct lruhash_bin));
+ if(!table->array) {
+ lock_quick_destroy(&table->lock);
+ free(table);
+ return NULL;
+ }
+ bin_init(table->array, table->size);
+ lock_protect(&table->lock, table, sizeof(*table));
+ lock_protect(&table->lock, table->array,
+ table->size*sizeof(struct lruhash_bin));
+ return table;
+}
+
+void
+bin_delete(struct lruhash* table, struct lruhash_bin* bin)
+{
+ struct lruhash_entry* p, *np;
+ void *d;
+ if(!bin)
+ return;
+ lock_quick_destroy(&bin->lock);
+ p = bin->overflow_list;
+ bin->overflow_list = NULL;
+ while(p) {
+ np = p->overflow_next;
+ d = p->data;
+ (*table->delkeyfunc)(p->key, table->cb_arg);
+ (*table->deldatafunc)(d, table->cb_arg);
+ p = np;
+ }
+}
+
+void
+bin_split(struct lruhash* table, struct lruhash_bin* newa,
+ int newmask)
+{
+ size_t i;
+ struct lruhash_entry *p, *np;
+ struct lruhash_bin* newbin;
+ /* move entries to new table. Notice that since hash x is mapped to
+ * bin x & mask, and new mask uses one more bit, so all entries in
+ * one bin will go into the old bin or bin | newbit */
+#ifndef THREADS_DISABLED
+ int newbit = newmask - table->size_mask;
+#endif
+ /* so, really, this task could also be threaded, per bin. */
+ /* LRU list is not changed */
+ for(i=0; i<table->size; i++)
+ {
+ lock_quick_lock(&table->array[i].lock);
+ p = table->array[i].overflow_list;
+ /* lock both destination bins */
+ lock_quick_lock(&newa[i].lock);
+ lock_quick_lock(&newa[newbit|i].lock);
+ while(p) {
+ np = p->overflow_next;
+ /* link into correct new bin */
+ newbin = &newa[p->hash & newmask];
+ p->overflow_next = newbin->overflow_list;
+ newbin->overflow_list = p;
+ p=np;
+ }
+ lock_quick_unlock(&newa[i].lock);
+ lock_quick_unlock(&newa[newbit|i].lock);
+ lock_quick_unlock(&table->array[i].lock);
+ }
+}
+
+void
+lruhash_delete(struct lruhash* table)
+{
+ size_t i;
+ if(!table)
+ return;
+ /* delete lock on hashtable to force check its OK */
+ lock_quick_destroy(&table->lock);
+ for(i=0; i<table->size; i++)
+ bin_delete(table, &table->array[i]);
+ free(table->array);
+ free(table);
+}
+
+void
+bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
+{
+ struct lruhash_entry* p = bin->overflow_list;
+ struct lruhash_entry** prevp = &bin->overflow_list;
+ while(p) {
+ if(p == entry) {
+ *prevp = p->overflow_next;
+ return;
+ }
+ prevp = &p->overflow_next;
+ p = p->overflow_next;
+ }
+}
+
+void
+reclaim_space(struct lruhash* table, struct lruhash_entry** list)
+{
+ struct lruhash_entry* d;
+ struct lruhash_bin* bin;
+ log_assert(table);
+ /* does not delete MRU entry, so table will not be empty. */
+ while(table->num > 1 && table->space_used > table->space_max) {
+ /* notice that since we hold the hashtable lock, nobody
+ can change the lru chain. So it cannot be deleted underneath
+ us. We still need the hashbin and entry write lock to make
+ sure we flush all users away from the entry.
+ which is unlikely, since it is LRU, if someone got a rdlock
+ it would be moved to front, but to be sure. */
+ d = table->lru_end;
+ /* specialised, delete from end of double linked list,
+ and we know num>1, so there is a previous lru entry. */
+ log_assert(d && d->lru_prev);
+ table->lru_end = d->lru_prev;
+ d->lru_prev->lru_next = NULL;
+ /* schedule entry for deletion */
+ bin = &table->array[d->hash & table->size_mask];
+ table->num --;
+ lock_quick_lock(&bin->lock);
+ bin_overflow_remove(bin, d);
+ d->overflow_next = *list;
+ *list = d;
+ lock_rw_wrlock(&d->lock);
+ table->space_used -= table->sizefunc(d->key, d->data);
+ if(table->markdelfunc)
+ (*table->markdelfunc)(d->key);
+ lock_rw_unlock(&d->lock);
+ lock_quick_unlock(&bin->lock);
+ }
+}
+
+struct lruhash_entry*
+bin_find_entry(struct lruhash* table,
+ struct lruhash_bin* bin, hashvalue_t hash, void* key)
+{
+ struct lruhash_entry* p = bin->overflow_list;
+ while(p) {
+ if(p->hash == hash && table->compfunc(p->key, key) == 0)
+ return p;
+ p = p->overflow_next;
+ }
+ return NULL;
+}
+
+void
+table_grow(struct lruhash* table)
+{
+ struct lruhash_bin* newa;
+ int newmask;
+ size_t i;
+ if(table->size_mask == (int)(((size_t)-1)>>1)) {
+ log_err("hash array malloc: size_t too small");
+ return;
+ }
+ /* try to allocate new array, if not fail */
+ newa = calloc(table->size*2, sizeof(struct lruhash_bin));
+ if(!newa) {
+ log_err("hash grow: malloc failed");
+ /* continue with smaller array. Though its slower. */
+ return;
+ }
+ bin_init(newa, table->size*2);
+ newmask = (table->size_mask << 1) | 1;
+ bin_split(table, newa, newmask);
+ /* delete the old bins */
+ lock_unprotect(&table->lock, table->array);
+ for(i=0; i<table->size; i++) {
+ lock_quick_destroy(&table->array[i].lock);
+ }
+ free(table->array);
+
+ table->size *= 2;
+ table->size_mask = newmask;
+ table->array = newa;
+ lock_protect(&table->lock, table->array,
+ table->size*sizeof(struct lruhash_bin));
+ return;
+}
+
+void
+lru_front(struct lruhash* table, struct lruhash_entry* entry)
+{
+ entry->lru_prev = NULL;
+ entry->lru_next = table->lru_start;
+ if(!table->lru_start)
+ table->lru_end = entry;
+ else table->lru_start->lru_prev = entry;
+ table->lru_start = entry;
+}
+
+void
+lru_remove(struct lruhash* table, struct lruhash_entry* entry)
+{
+ if(entry->lru_prev)
+ entry->lru_prev->lru_next = entry->lru_next;
+ else table->lru_start = entry->lru_next;
+ if(entry->lru_next)
+ entry->lru_next->lru_prev = entry->lru_prev;
+ else table->lru_end = entry->lru_prev;
+}
+
+void
+lru_touch(struct lruhash* table, struct lruhash_entry* entry)
+{
+ log_assert(table && entry);
+ if(entry == table->lru_start)
+ return; /* nothing to do */
+ /* remove from current lru position */
+ lru_remove(table, entry);
+ /* add at front */
+ lru_front(table, entry);
+}
+
+void
+lruhash_insert(struct lruhash* table, hashvalue_t hash,
+ struct lruhash_entry* entry, void* data, void* cb_arg)
+{
+ struct lruhash_bin* bin;
+ struct lruhash_entry* found, *reclaimlist=NULL;
+ size_t need_size;
+ fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
+ fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
+ fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
+ fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
+ fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
+ need_size = table->sizefunc(entry->key, data);
+ if(cb_arg == NULL) cb_arg = table->cb_arg;
+
+ /* find bin */
+ lock_quick_lock(&table->lock);
+ bin = &table->array[hash & table->size_mask];
+ lock_quick_lock(&bin->lock);
+
+ /* see if entry exists already */
+ if(!(found=bin_find_entry(table, bin, hash, entry->key))) {
+ /* if not: add to bin */
+ entry->overflow_next = bin->overflow_list;
+ bin->overflow_list = entry;
+ lru_front(table, entry);
+ table->num++;
+ table->space_used += need_size;
+ } else {
+ /* if so: update data - needs a writelock */
+ table->space_used += need_size -
+ (*table->sizefunc)(found->key, found->data);
+ (*table->delkeyfunc)(entry->key, cb_arg);
+ lru_touch(table, found);
+ lock_rw_wrlock(&found->lock);
+ (*table->deldatafunc)(found->data, cb_arg);
+ found->data = data;
+ lock_rw_unlock(&found->lock);
+ }
+ lock_quick_unlock(&bin->lock);
+ if(table->space_used > table->space_max)
+ reclaim_space(table, &reclaimlist);
+ if(table->num >= table->size)
+ table_grow(table);
+ lock_quick_unlock(&table->lock);
+
+ /* finish reclaim if any (outside of critical region) */
+ while(reclaimlist) {
+ struct lruhash_entry* n = reclaimlist->overflow_next;
+ void* d = reclaimlist->data;
+ (*table->delkeyfunc)(reclaimlist->key, cb_arg);
+ (*table->deldatafunc)(d, cb_arg);
+ reclaimlist = n;
+ }
+}
+
+struct lruhash_entry*
+lruhash_lookup(struct lruhash* table, hashvalue_t hash, void* key, int wr)
+{
+ struct lruhash_entry* entry;
+ struct lruhash_bin* bin;
+ fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
+
+ lock_quick_lock(&table->lock);
+ bin = &table->array[hash & table->size_mask];
+ lock_quick_lock(&bin->lock);
+ if((entry=bin_find_entry(table, bin, hash, key)))
+ lru_touch(table, entry);
+ lock_quick_unlock(&table->lock);
+
+ if(entry) {
+ if(wr) { lock_rw_wrlock(&entry->lock); }
+ else { lock_rw_rdlock(&entry->lock); }
+ }
+ lock_quick_unlock(&bin->lock);
+ return entry;
+}
+
+void
+lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
+{
+ struct lruhash_entry* entry;
+ struct lruhash_bin* bin;
+ void *d;
+ fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
+ fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
+ fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
+ fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
+ fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
+
+ lock_quick_lock(&table->lock);
+ bin = &table->array[hash & table->size_mask];
+ lock_quick_lock(&bin->lock);
+ if((entry=bin_find_entry(table, bin, hash, key))) {
+ bin_overflow_remove(bin, entry);
+ lru_remove(table, entry);
+ } else {
+ lock_quick_unlock(&table->lock);
+ lock_quick_unlock(&bin->lock);
+ return;
+ }
+ table->num--;
+ table->space_used -= (*table->sizefunc)(entry->key, entry->data);
+ lock_quick_unlock(&table->lock);
+ lock_rw_wrlock(&entry->lock);
+ if(table->markdelfunc)
+ (*table->markdelfunc)(entry->key);
+ lock_rw_unlock(&entry->lock);
+ lock_quick_unlock(&bin->lock);
+ /* finish removal */
+ d = entry->data;
+ (*table->delkeyfunc)(entry->key, table->cb_arg);
+ (*table->deldatafunc)(d, table->cb_arg);
+}
+
+/** clear bin, respecting locks, does not do space, LRU */
+static void
+bin_clear(struct lruhash* table, struct lruhash_bin* bin)
+{
+ struct lruhash_entry* p, *np;
+ void *d;
+ lock_quick_lock(&bin->lock);
+ p = bin->overflow_list;
+ while(p) {
+ lock_rw_wrlock(&p->lock);
+ np = p->overflow_next;
+ d = p->data;
+ if(table->markdelfunc)
+ (*table->markdelfunc)(p->key);
+ lock_rw_unlock(&p->lock);
+ (*table->delkeyfunc)(p->key, table->cb_arg);
+ (*table->deldatafunc)(d, table->cb_arg);
+ p = np;
+ }
+ bin->overflow_list = NULL;
+ lock_quick_unlock(&bin->lock);
+}
+
+void
+lruhash_clear(struct lruhash* table)
+{
+ size_t i;
+ if(!table)
+ return;
+ fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
+ fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
+ fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
+
+ lock_quick_lock(&table->lock);
+ for(i=0; i<table->size; i++) {
+ bin_clear(table, &table->array[i]);
+ }
+ table->lru_start = NULL;
+ table->lru_end = NULL;
+ table->num = 0;
+ table->space_used = 0;
+ lock_quick_unlock(&table->lock);
+}
+
+void
+lruhash_status(struct lruhash* table, const char* id, int extended)
+{
+ lock_quick_lock(&table->lock);
+ log_info("%s: %u entries, memory %u / %u",
+ id, (unsigned)table->num, (unsigned)table->space_used,
+ (unsigned)table->space_max);
+ log_info(" itemsize %u, array %u, mask %d",
+ (unsigned)(table->num? table->space_used/table->num : 0),
+ (unsigned)table->size, table->size_mask);
+ if(extended) {
+ size_t i;
+ int min=(int)table->size*2, max=-2;
+ for(i=0; i<table->size; i++) {
+ int here = 0;
+ struct lruhash_entry *en;
+ lock_quick_lock(&table->array[i].lock);
+ en = table->array[i].overflow_list;
+ while(en) {
+ here ++;
+ en = en->overflow_next;
+ }
+ lock_quick_unlock(&table->array[i].lock);
+ if(extended >= 2)
+ log_info("bin[%d] %d", (int)i, here);
+ if(here > max) max = here;
+ if(here < min) min = here;
+ }
+ log_info(" bin min %d, avg %.2lf, max %d", min,
+ (double)table->num/(double)table->size, max);
+ }
+ lock_quick_unlock(&table->lock);
+}
+
+size_t
+lruhash_get_mem(struct lruhash* table)
+{
+ size_t s;
+ lock_quick_lock(&table->lock);
+ s = sizeof(struct lruhash) + table->space_used;
+#ifdef USE_THREAD_DEBUG
+ if(table->size != 0) {
+ size_t i;
+ for(i=0; i<table->size; i++)
+ s += sizeof(struct lruhash_bin) +
+ lock_get_mem(&table->array[i].lock);
+ }
+#else /* no THREAD_DEBUG */
+ if(table->size != 0)
+ s += (table->size)*(sizeof(struct lruhash_bin) +
+ lock_get_mem(&table->array[0].lock));
+#endif
+ lock_quick_unlock(&table->lock);
+ s += lock_get_mem(&table->lock);
+ return s;
+}
+
+void
+lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md)
+{
+ lock_quick_lock(&table->lock);
+ table->markdelfunc = md;
+ lock_quick_unlock(&table->lock);
+}
+
+void
+lruhash_traverse(struct lruhash* h, int wr,
+ void (*func)(struct lruhash_entry*, void*), void* arg)
+{
+ size_t i;
+ struct lruhash_entry* e;
+
+ lock_quick_lock(&h->lock);
+ for(i=0; i<h->size; i++) {
+ lock_quick_lock(&h->array[i].lock);
+ for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
+ if(wr) {
+ lock_rw_wrlock(&e->lock);
+ } else {
+ lock_rw_rdlock(&e->lock);
+ }
+ (*func)(e, arg);
+ lock_rw_unlock(&e->lock);
+ }
+ lock_quick_unlock(&h->array[i].lock);
+ }
+ lock_quick_unlock(&h->lock);
+}
diff --git a/3rdParty/Unbound/src/src/util/storage/lruhash.h b/3rdParty/Unbound/src/src/util/storage/lruhash.h
new file mode 100644
index 0000000..d0efe2d
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/lruhash.h
@@ -0,0 +1,414 @@
+/*
+ * util/storage/lruhash.h - hashtable, hash function, LRU keeping.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a hashtable with LRU keeping of entries.
+ *
+ * The hash table keeps a maximum memory size. Old entries are removed
+ * to make space for new entries.
+ *
+ * The locking strategy is as follows:
+ * o since (almost) every read also implies a LRU update, the
+ * hashtable lock is a spinlock, not rwlock.
+ * o the idea is to move every thread through the hash lock quickly,
+ * so that the next thread can access the lookup table.
+ * o User performs hash function.
+ *
+ * For read:
+ * o lock hashtable.
+ * o lookup hash bin.
+ * o lock hash bin.
+ * o find entry (if failed, unlock hash, unl bin, exit).
+ * o swizzle pointers for LRU update.
+ * o unlock hashtable.
+ * o lock entry (rwlock).
+ * o unlock hash bin.
+ * o work on entry.
+ * o unlock entry.
+ *
+ * To update an entry, gain writelock and change the entry.
+ * (the entry must keep the same hashvalue, so a data update.)
+ * (you cannot upgrade a readlock to a writelock, because the item may
+ * be deleted, it would cause race conditions. So instead, unlock and
+ * relookup it in the hashtable.)
+ *
+ * To delete an entry:
+ * o unlock the entry if you hold the lock already.
+ * o lock hashtable.
+ * o lookup hash bin.
+ * o lock hash bin.
+ * o find entry (if failed, unlock hash, unl bin, exit).
+ * o remove entry from hashtable bin overflow chain.
+ * o unlock hashtable.
+ * o lock entry (writelock).
+ * o unlock hash bin.
+ * o unlock entry (nobody else should be waiting for this lock,
+ * since you removed it from hashtable, and you got writelock while
+ * holding the hashbinlock so you are the only one.)
+ * Note you are only allowed to obtain a lock while holding hashbinlock.
+ * o delete entry.
+ *
+ * The above sequence is:
+ * o race free, works with read, write and delete.
+ * o but has a queue, imagine someone needing a writelock on an item.
+ * but there are still readlocks. The writelocker waits, but holds
+ * the hashbinlock. The next thread that comes in and needs the same
+ * hashbin will wait for the lock while holding the hashtable lock.
+ * thus halting the entire system on hashtable.
+ * This is because of the delete protection.
+ * Readlocks will be easier on the rwlock on entries.
+ * While the writer is holding writelock, similar problems happen with
+ * a reader or writer needing the same item.
+ * the scenario requires more than three threads.
+ * o so the queue length is 3 threads in a bad situation. The fourth is
+ * unable to use the hashtable.
+ *
+ * If you need to acquire locks on multiple items from the hashtable.
+ * o you MUST release all locks on items from the hashtable before
+ * doing the next lookup/insert/delete/whatever.
+ * o To acquire multiple items you should use a special routine that
+ * obtains the locks on those multiple items in one go.
+ */
+
+#ifndef UTIL_STORAGE_LRUHASH_H
+#define UTIL_STORAGE_LRUHASH_H
+#include "util/locks.h"
+struct lruhash_bin;
+struct lruhash_entry;
+
+/** default start size for hash arrays */
+#define HASH_DEFAULT_STARTARRAY 1024 /* entries in array */
+/** default max memory for hash arrays */
+#define HASH_DEFAULT_MAXMEM 4*1024*1024 /* bytes */
+
+/** the type of a hash value */
+typedef uint32_t hashvalue_t;
+
+/**
+ * Type of function that calculates the size of an entry.
+ * Result must include the size of struct lruhash_entry.
+ * Keys that are identical must also calculate to the same size.
+ * size = func(key, data).
+ */
+typedef size_t (*lruhash_sizefunc_t)(void*, void*);
+
+/** type of function that compares two keys. return 0 if equal. */
+typedef int (*lruhash_compfunc_t)(void*, void*);
+
+/** old keys are deleted.
+ * The RRset type has to revoke its ID number, markdel() is used first.
+ * This function is called: func(key, userarg) */
+typedef void (*lruhash_delkeyfunc_t)(void*, void*);
+
+/** old data is deleted. This function is called: func(data, userarg). */
+typedef void (*lruhash_deldatafunc_t)(void*, void*);
+
+/** mark a key as pending to be deleted (and not to be used by anyone).
+ * called: func(key) */
+typedef void (*lruhash_markdelfunc_t)(void*);
+
+/**
+ * Hash table that keeps LRU list of entries.
+ */
+struct lruhash {
+ /** lock for exclusive access, to the lookup array */
+ lock_quick_t lock;
+ /** the size function for entries in this table */
+ lruhash_sizefunc_t sizefunc;
+ /** the compare function for entries in this table. */
+ lruhash_compfunc_t compfunc;
+ /** how to delete keys. */
+ lruhash_delkeyfunc_t delkeyfunc;
+ /** how to delete data. */
+ lruhash_deldatafunc_t deldatafunc;
+ /** how to mark a key pending deletion */
+ lruhash_markdelfunc_t markdelfunc;
+ /** user argument for user functions */
+ void* cb_arg;
+
+ /** the size of the lookup array */
+ size_t size;
+ /** size bitmask - since size is a power of 2 */
+ int size_mask;
+ /** lookup array of bins */
+ struct lruhash_bin* array;
+
+ /** the lru list, start and end, noncyclical double linked list. */
+ struct lruhash_entry* lru_start;
+ /** lru list end item (least recently used) */
+ struct lruhash_entry* lru_end;
+
+ /** the number of entries in the hash table. */
+ size_t num;
+ /** the amount of space used, roughly the number of bytes in use. */
+ size_t space_used;
+ /** the amount of space the hash table is maximally allowed to use. */
+ size_t space_max;
+};
+
+/**
+ * A single bin with a linked list of entries in it.
+ */
+struct lruhash_bin {
+ /**
+ * Lock for exclusive access to the linked list
+ * This lock makes deletion of items safe in this overflow list.
+ */
+ lock_quick_t lock;
+ /** linked list of overflow entries */
+ struct lruhash_entry* overflow_list;
+};
+
+/**
+ * An entry into the hash table.
+ * To change overflow_next you need to hold the bin lock.
+ * To change the lru items you need to hold the hashtable lock.
+ * This structure is designed as part of key struct. And key pointer helps
+ * to get the surrounding structure. Data should be allocated on its own.
+ */
+struct lruhash_entry {
+ /**
+ * rwlock for access to the contents of the entry
+ * Note that it does _not_ cover the lru_ and overflow_ ptrs.
+ * Even with a writelock, you cannot change hash and key.
+ * You need to delete it to change hash or key.
+ */
+ lock_rw_t lock;
+ /** next entry in overflow chain. Covered by hashlock and binlock. */
+ struct lruhash_entry* overflow_next;
+ /** next entry in lru chain. covered by hashlock. */
+ struct lruhash_entry* lru_next;
+ /** prev entry in lru chain. covered by hashlock. */
+ struct lruhash_entry* lru_prev;
+ /** hash value of the key. It may not change, until entry deleted. */
+ hashvalue_t hash;
+ /** key */
+ void* key;
+ /** data */
+ void* data;
+};
+
+/**
+ * Create new hash table.
+ * @param start_size: size of hashtable array at start, must be power of 2.
+ * @param maxmem: maximum amount of memory this table is allowed to use.
+ * @param sizefunc: calculates memory usage of entries.
+ * @param compfunc: compares entries, 0 on equality.
+ * @param delkeyfunc: deletes key.
+ * Calling both delkey and deldata will also free the struct lruhash_entry.
+ * Make it part of the key structure and delete it in delkeyfunc.
+ * @param deldatafunc: deletes data.
+ * @param arg: user argument that is passed to user function calls.
+ * @return: new hash table or NULL on malloc failure.
+ */
+struct lruhash* lruhash_create(size_t start_size, size_t maxmem,
+ lruhash_sizefunc_t sizefunc, lruhash_compfunc_t compfunc,
+ lruhash_delkeyfunc_t delkeyfunc, lruhash_deldatafunc_t deldatafunc,
+ void* arg);
+
+/**
+ * Delete hash table. Entries are all deleted.
+ * @param table: to delete.
+ */
+void lruhash_delete(struct lruhash* table);
+
+/**
+ * Clear hash table. Entries are all deleted, while locking them before
+ * doing so. At end the table is empty.
+ * @param table: to make empty.
+ */
+void lruhash_clear(struct lruhash* table);
+
+/**
+ * Insert a new element into the hashtable.
+ * If key is already present data pointer in that entry is updated.
+ * The space calculation function is called with the key, data.
+ * If necessary the least recently used entries are deleted to make space.
+ * If necessary the hash array is grown up.
+ *
+ * @param table: hash table.
+ * @param hash: hash value. User calculates the hash.
+ * @param entry: identifies the entry.
+ * If key already present, this entry->key is deleted immediately.
+ * But entry->data is set to NULL before deletion, and put into
+ * the existing entry. The data is then freed.
+ * @param data: the data.
+ * @param cb_override: if not null overrides the cb_arg for the deletefunc.
+ */
+void lruhash_insert(struct lruhash* table, hashvalue_t hash,
+ struct lruhash_entry* entry, void* data, void* cb_override);
+
+/**
+ * Lookup an entry in the hashtable.
+ * At the end of the function you hold a (read/write)lock on the entry.
+ * The LRU is updated for the entry (if found).
+ * @param table: hash table.
+ * @param hash: hash of key.
+ * @param key: what to look for, compared against entries in overflow chain.
+ * the hash value must be set, and must work with compare function.
+ * @param wr: set to true if you desire a writelock on the entry.
+ * with a writelock you can update the data part.
+ * @return: pointer to the entry or NULL. The entry is locked.
+ * The user must unlock the entry when done.
+ */
+struct lruhash_entry* lruhash_lookup(struct lruhash* table, hashvalue_t hash,
+ void* key, int wr);
+
+/**
+ * Touch entry, so it becomes the most recently used in the LRU list.
+ * Caller must hold hash table lock. The entry must be inserted already.
+ * @param table: hash table.
+ * @param entry: entry to make first in LRU.
+ */
+void lru_touch(struct lruhash* table, struct lruhash_entry* entry);
+
+/**
+ * Set the markdelfunction (or NULL)
+ */
+void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md);
+
+/************************* Internal functions ************************/
+/*** these are only exposed for unit tests. ***/
+
+/**
+ * Remove entry from hashtable. Does nothing if not found in hashtable.
+ * Delfunc is called for the entry.
+ * @param table: hash table.
+ * @param hash: hash of key.
+ * @param key: what to look for.
+ */
+void lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key);
+
+/** init the hash bins for the table */
+void bin_init(struct lruhash_bin* array, size_t size);
+
+/** delete the hash bin and entries inside it */
+void bin_delete(struct lruhash* table, struct lruhash_bin* bin);
+
+/**
+ * Find entry in hash bin. You must have locked the bin.
+ * @param table: hash table with function pointers.
+ * @param bin: hash bin to look into.
+ * @param hash: hash value to look for.
+ * @param key: key to look for.
+ * @return: the entry or NULL if not found.
+ */
+struct lruhash_entry* bin_find_entry(struct lruhash* table,
+ struct lruhash_bin* bin, hashvalue_t hash, void* key);
+
+/**
+ * Remove entry from bin overflow chain.
+ * You must have locked the bin.
+ * @param bin: hash bin to look into.
+ * @param entry: entry ptr that needs removal.
+ */
+void bin_overflow_remove(struct lruhash_bin* bin,
+ struct lruhash_entry* entry);
+
+/**
+ * Split hash bin into two new ones. Based on increased size_mask.
+ * Caller must hold hash table lock.
+ * At the end the routine acquires all hashbin locks (in the old array).
+ * This makes it wait for other threads to finish with the bins.
+ * So the bins are ready to be deleted after this function.
+ * @param table: hash table with function pointers.
+ * @param newa: new increased array.
+ * @param newmask: new lookup mask.
+ */
+void bin_split(struct lruhash* table, struct lruhash_bin* newa,
+ int newmask);
+
+/**
+ * Try to make space available by deleting old entries.
+ * Assumes that the lock on the hashtable is being held by caller.
+ * Caller must not hold bin locks.
+ * @param table: hash table.
+ * @param list: list of entries that are to be deleted later.
+ * Entries have been removed from the hash table and writelock is held.
+ */
+void reclaim_space(struct lruhash* table, struct lruhash_entry** list);
+
+/**
+ * Grow the table lookup array. Becomes twice as large.
+ * Caller must hold the hash table lock. Must not hold any bin locks.
+ * Tries to grow, on malloc failure, nothing happened.
+ * @param table: hash table.
+ */
+void table_grow(struct lruhash* table);
+
+/**
+ * Put entry at front of lru. entry must be unlinked from lru.
+ * Caller must hold hash table lock.
+ * @param table: hash table with lru head and tail.
+ * @param entry: entry to make most recently used.
+ */
+void lru_front(struct lruhash* table, struct lruhash_entry* entry);
+
+/**
+ * Remove entry from lru list.
+ * Caller must hold hash table lock.
+ * @param table: hash table with lru head and tail.
+ * @param entry: entry to remove from lru.
+ */
+void lru_remove(struct lruhash* table, struct lruhash_entry* entry);
+
+/**
+ * Output debug info to the log as to state of the hash table.
+ * @param table: hash table.
+ * @param id: string printed with table to identify the hash table.
+ * @param extended: set to true to print statistics on overflow bin lengths.
+ */
+void lruhash_status(struct lruhash* table, const char* id, int extended);
+
+/**
+ * Get memory in use now by the lruhash table.
+ * @param table: hash table. Will be locked before use. And unlocked after.
+ * @return size in bytes.
+ */
+size_t lruhash_get_mem(struct lruhash* table);
+
+/**
+ * Traverse a lruhash. Call back for every element in the table.
+ * @param h: hash table. Locked before use.
+ * @param wr: if true writelock is obtained on element, otherwise readlock.
+ * @param func: function for every element. Do not lock or unlock elements.
+ * @param arg: user argument to func.
+ */
+void lruhash_traverse(struct lruhash* h, int wr,
+ void (*func)(struct lruhash_entry*, void*), void* arg);
+
+#endif /* UTIL_STORAGE_LRUHASH_H */
diff --git a/3rdParty/Unbound/src/src/util/storage/slabhash.c b/3rdParty/Unbound/src/src/util/storage/slabhash.c
new file mode 100644
index 0000000..9c0c507
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/slabhash.c
@@ -0,0 +1,219 @@
+/*
+ * util/storage/slabhash.c - hashtable consisting of several smaller tables.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * Implementation of hash table that consists of smaller hash tables.
+ * This results in a partitioned lruhash table.
+ * It cannot grow, but that gives it the ability to have multiple
+ * locks. Also this means there are multiple LRU lists.
+ */
+
+#include "config.h"
+#include "util/storage/slabhash.h"
+
+struct slabhash* slabhash_create(size_t numtables, size_t start_size,
+ size_t maxmem, lruhash_sizefunc_t sizefunc,
+ lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
+ lruhash_deldatafunc_t deldatafunc, void* arg)
+{
+ size_t i;
+ struct slabhash* sl = (struct slabhash*)calloc(1,
+ sizeof(struct slabhash));
+ if(!sl) return NULL;
+ sl->size = numtables;
+ log_assert(sl->size > 0);
+ sl->array = (struct lruhash**)calloc(sl->size, sizeof(struct lruhash*));
+ if(!sl->array) {
+ free(sl);
+ return NULL;
+ }
+ sl->mask = (uint32_t)(sl->size - 1);
+ if(sl->mask == 0) {
+ sl->shift = 0;
+ } else {
+ log_assert( (sl->size & sl->mask) == 0
+ /* size must be power of 2 */ );
+ sl->shift = 0;
+ while(!(sl->mask & 0x80000000)) {
+ sl->mask <<= 1;
+ sl->shift ++;
+ }
+ }
+ for(i=0; i<sl->size; i++) {
+ sl->array[i] = lruhash_create(start_size, maxmem / sl->size,
+ sizefunc, compfunc, delkeyfunc, deldatafunc, arg);
+ if(!sl->array[i]) {
+ slabhash_delete(sl);
+ return NULL;
+ }
+ }
+ return sl;
+}
+
+void slabhash_delete(struct slabhash* sl)
+{
+ if(!sl)
+ return;
+ if(sl->array) {
+ size_t i;
+ for(i=0; i<sl->size; i++)
+ lruhash_delete(sl->array[i]);
+ free(sl->array);
+ }
+ free(sl);
+}
+
+void slabhash_clear(struct slabhash* sl)
+{
+ size_t i;
+ if(!sl)
+ return;
+ for(i=0; i<sl->size; i++)
+ lruhash_clear(sl->array[i]);
+}
+
+/** helper routine to calculate the slabhash index */
+static unsigned int
+slab_idx(struct slabhash* sl, hashvalue_t hash)
+{
+ return ((hash & sl->mask) >> sl->shift);
+}
+
+void slabhash_insert(struct slabhash* sl, hashvalue_t hash,
+ struct lruhash_entry* entry, void* data, void* arg)
+{
+ lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data, arg);
+}
+
+struct lruhash_entry* slabhash_lookup(struct slabhash* sl,
+ hashvalue_t hash, void* key, int wr)
+{
+ return lruhash_lookup(sl->array[slab_idx(sl, hash)], hash, key, wr);
+}
+
+void slabhash_remove(struct slabhash* sl, hashvalue_t hash, void* key)
+{
+ lruhash_remove(sl->array[slab_idx(sl, hash)], hash, key);
+}
+
+void slabhash_status(struct slabhash* sl, const char* id, int extended)
+{
+ size_t i;
+ char num[17];
+ log_info("Slabhash %s: %u tables mask=%x shift=%d",
+ id, (unsigned)sl->size, (unsigned)sl->mask, sl->shift);
+ for(i=0; i<sl->size; i++) {
+ snprintf(num, sizeof(num), "table %u", (unsigned)i);
+ lruhash_status(sl->array[i], num, extended);
+ }
+}
+
+size_t slabhash_get_size(struct slabhash* sl)
+{
+ size_t i, total = 0;
+ for(i=0; i<sl->size; i++) {
+ lock_quick_lock(&sl->array[i]->lock);
+ total += sl->array[i]->space_max;
+ lock_quick_unlock(&sl->array[i]->lock);
+ }
+ return total;
+}
+
+size_t slabhash_get_mem(struct slabhash* sl)
+{
+ size_t i, total = sizeof(*sl);
+ total += sizeof(struct lruhash*)*sl->size;
+ for(i=0; i<sl->size; i++) {
+ total += lruhash_get_mem(sl->array[i]);
+ }
+ return total;
+}
+
+struct lruhash* slabhash_gettable(struct slabhash* sl, hashvalue_t hash)
+{
+ return sl->array[slab_idx(sl, hash)];
+}
+
+/* test code, here to avoid linking problems with fptr_wlist */
+/** delete key */
+static void delkey(struct slabhash_testkey* k) {
+ lock_rw_destroy(&k->entry.lock); free(k);}
+/** delete data */
+static void deldata(struct slabhash_testdata* d) {free(d);}
+
+size_t test_slabhash_sizefunc(void* ATTR_UNUSED(key), void* ATTR_UNUSED(data))
+{
+ return sizeof(struct slabhash_testkey) +
+ sizeof(struct slabhash_testdata);
+}
+
+int test_slabhash_compfunc(void* key1, void* key2)
+{
+ struct slabhash_testkey* k1 = (struct slabhash_testkey*)key1;
+ struct slabhash_testkey* k2 = (struct slabhash_testkey*)key2;
+ if(k1->id == k2->id)
+ return 0;
+ if(k1->id > k2->id)
+ return 1;
+ return -1;
+}
+
+void test_slabhash_delkey(void* key, void* ATTR_UNUSED(arg))
+{
+ delkey((struct slabhash_testkey*)key);
+}
+
+void test_slabhash_deldata(void* data, void* ATTR_UNUSED(arg))
+{
+ deldata((struct slabhash_testdata*)data);
+}
+
+void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_t md)
+{
+ size_t i;
+ for(i=0; i<sl->size; i++) {
+ lruhash_setmarkdel(sl->array[i], md);
+ }
+}
+
+void slabhash_traverse(struct slabhash* sh, int wr,
+ void (*func)(struct lruhash_entry*, void*), void* arg)
+{
+ size_t i;
+ for(i=0; i<sh->size; i++)
+ lruhash_traverse(sh->array[i], wr, func, arg);
+}
diff --git a/3rdParty/Unbound/src/src/util/storage/slabhash.h b/3rdParty/Unbound/src/src/util/storage/slabhash.h
new file mode 100644
index 0000000..93228fe
--- /dev/null
+++ b/3rdParty/Unbound/src/src/util/storage/slabhash.h
@@ -0,0 +1,211 @@
+/*
+ * util/storage/slabhash.h - hashtable consisting of several smaller tables.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * Hash table that consists of smaller hash tables.
+ * It cannot grow, but that gives it the ability to have multiple
+ * locks. Also this means there are multiple LRU lists.
+ */
+
+#ifndef UTIL_STORAGE_SLABHASH_H
+#define UTIL_STORAGE_SLABHASH_H
+#include "util/storage/lruhash.h"
+
+/** default number of slabs */
+#define HASH_DEFAULT_SLABS 4
+
+/**
+ * Hash table formed from several smaller ones.
+ * This results in a partitioned lruhash table, a 'slashtable'.
+ * None of the data inside the slabhash may be altered.
+ * Therefore, no locks are needed to access the structure.
+ */
+struct slabhash {
+ /** the size of the array - must be power of 2 */
+ size_t size;
+ /** size bitmask - uses high bits. */
+ uint32_t mask;
+ /** shift right this many bits to get index into array. */
+ unsigned int shift;
+ /** lookup array of hash tables */
+ struct lruhash** array;
+};
+
+/**
+ * Create new slabbed hash table.
+ * @param numtables: number of hash tables to use, other parameters used to
+ * initialize these smaller hashtables.
+ * @param start_size: size of hashtable array at start, must be power of 2.
+ * @param maxmem: maximum amount of memory this table is allowed to use.
+ * so every table gets maxmem/numtables to use for itself.
+ * @param sizefunc: calculates memory usage of entries.
+ * @param compfunc: compares entries, 0 on equality.
+ * @param delkeyfunc: deletes key.
+ * @param deldatafunc: deletes data.
+ * @param arg: user argument that is passed to user function calls.
+ * @return: new hash table or NULL on malloc failure.
+ */
+struct slabhash* slabhash_create(size_t numtables, size_t start_size,
+ size_t maxmem, lruhash_sizefunc_t sizefunc,
+ lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
+ lruhash_deldatafunc_t deldatafunc, void* arg);
+
+/**
+ * Delete hash table. Entries are all deleted.
+ * @param table: to delete.
+ */
+void slabhash_delete(struct slabhash* table);
+
+/**
+ * Clear hash table. Entries are all deleted.
+ * @param table: to make empty.
+ */
+void slabhash_clear(struct slabhash* table);
+
+/**
+ * Insert a new element into the hashtable, uses lruhash_insert.
+ * If key is already present data pointer in that entry is updated.
+ *
+ * @param table: hash table.
+ * @param hash: hash value. User calculates the hash.
+ * @param entry: identifies the entry.
+ * If key already present, this entry->key is deleted immediately.
+ * But entry->data is set to NULL before deletion, and put into
+ * the existing entry. The data is then freed.
+ * @param data: the data.
+ * @param cb_override: if not NULL overrides the cb_arg for deletfunc.
+ */
+void slabhash_insert(struct slabhash* table, hashvalue_t hash,
+ struct lruhash_entry* entry, void* data, void* cb_override);
+
+/**
+ * Lookup an entry in the hashtable. Uses lruhash_lookup.
+ * At the end of the function you hold a (read/write)lock on the entry.
+ * The LRU is updated for the entry (if found).
+ * @param table: hash table.
+ * @param hash: hash of key.
+ * @param key: what to look for, compared against entries in overflow chain.
+ * the hash value must be set, and must work with compare function.
+ * @param wr: set to true if you desire a writelock on the entry.
+ * with a writelock you can update the data part.
+ * @return: pointer to the entry or NULL. The entry is locked.
+ * The user must unlock the entry when done.
+ */
+struct lruhash_entry* slabhash_lookup(struct slabhash* table,
+ hashvalue_t hash, void* key, int wr);
+
+/**
+ * Remove entry from hashtable. Does nothing if not found in hashtable.
+ * Delfunc is called for the entry. Uses lruhash_remove.
+ * @param table: hash table.
+ * @param hash: hash of key.
+ * @param key: what to look for.
+ */
+void slabhash_remove(struct slabhash* table, hashvalue_t hash, void* key);
+
+/**
+ * Output debug info to the log as to state of the hash table.
+ * @param table: hash table.
+ * @param id: string printed with table to identify the hash table.
+ * @param extended: set to true to print statistics on overflow bin lengths.
+ */
+void slabhash_status(struct slabhash* table, const char* id, int extended);
+
+/**
+ * Retrieve slab hash total size.
+ * @param table: hash table.
+ * @return size configured as max.
+ */
+size_t slabhash_get_size(struct slabhash* table);
+
+/**
+ * Retrieve slab hash current memory use.
+ * @param table: hash table.
+ * @return memory in use.
+ */
+size_t slabhash_get_mem(struct slabhash* table);
+
+/**
+ * Get lruhash table for a given hash value
+ * @param table: slabbed hash table.
+ * @param hash: hash value.
+ * @return the lru hash table.
+ */
+struct lruhash* slabhash_gettable(struct slabhash* table, hashvalue_t hash);
+
+/**
+ * Set markdel function
+ * @param table: slabbed hash table.
+ * @param md: markdel function ptr.
+ */
+void slabhash_setmarkdel(struct slabhash* table, lruhash_markdelfunc_t md);
+
+/**
+ * Traverse a slabhash.
+ * @param table: slabbed hash table.
+ * @param wr: if true, writelock is obtained, otherwise readlock.
+ * @param func: function to call for every element.
+ * @param arg: user argument to function.
+ */
+void slabhash_traverse(struct slabhash* table, int wr,
+ void (*func)(struct lruhash_entry*, void*), void* arg);
+
+/* --- test representation --- */
+/** test structure contains test key */
+struct slabhash_testkey {
+ /** the key id */
+ int id;
+ /** the entry */
+ struct lruhash_entry entry;
+};
+/** test structure contains test data */
+struct slabhash_testdata {
+ /** data value */
+ int data;
+};
+
+/** test sizefunc for lruhash */
+size_t test_slabhash_sizefunc(void*, void*);
+/** test comparefunc for lruhash */
+int test_slabhash_compfunc(void*, void*);
+/** test delkey for lruhash */
+void test_slabhash_delkey(void*, void*);
+/** test deldata for lruhash */
+void test_slabhash_deldata(void*, void*);
+/* --- end test representation --- */
+
+#endif /* UTIL_STORAGE_SLABHASH_H */