summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '3rdParty/Unbound/src/src/iterator')
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_delegpt.c494
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_delegpt.h349
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_donotq.c153
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_donotq.h101
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_fwd.c442
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_fwd.h173
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_hints.c488
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_hints.h142
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_priv.c263
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_priv.h110
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_resptype.c286
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_resptype.h127
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_scrub.c751
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_scrub.h69
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_utils.c976
-rw-r--r--3rdParty/Unbound/src/src/iterator/iter_utils.h312
-rw-r--r--3rdParty/Unbound/src/src/iterator/iterator.c2767
-rw-r--r--3rdParty/Unbound/src/src/iterator/iterator.h374
18 files changed, 8377 insertions, 0 deletions
diff --git a/3rdParty/Unbound/src/src/iterator/iter_delegpt.c b/3rdParty/Unbound/src/src/iterator/iter_delegpt.c
new file mode 100644
index 0000000..f49048d
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_delegpt.c
@@ -0,0 +1,494 @@
+/*
+ * iterator/iter_delegpt.c - delegation point with NS and address information.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file implements the Delegation Point. It contains a list of name servers
+ * and their addresses if known.
+ */
+#include "config.h"
+#include "iterator/iter_delegpt.h"
+#include "services/cache/dns.h"
+#include "util/regional.h"
+#include "util/data/dname.h"
+#include "util/data/packed_rrset.h"
+#include "util/data/msgreply.h"
+#include "util/net_help.h"
+
+struct delegpt*
+delegpt_create(struct regional* region)
+{
+ struct delegpt* dp=(struct delegpt*)regional_alloc(
+ region, sizeof(*dp));
+ if(!dp)
+ return NULL;
+ memset(dp, 0, sizeof(*dp));
+ return dp;
+}
+
+struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region)
+{
+ struct delegpt* copy = delegpt_create(region);
+ struct delegpt_ns* ns;
+ struct delegpt_addr* a;
+ if(!copy)
+ return NULL;
+ if(!delegpt_set_name(copy, region, dp->name))
+ return NULL;
+ copy->bogus = dp->bogus;
+ copy->has_parent_side_NS = dp->has_parent_side_NS;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ if(!delegpt_add_ns(copy, region, ns->name, (int)ns->lame))
+ return NULL;
+ copy->nslist->resolved = ns->resolved;
+ copy->nslist->got4 = ns->got4;
+ copy->nslist->got6 = ns->got6;
+ copy->nslist->done_pside4 = ns->done_pside4;
+ copy->nslist->done_pside6 = ns->done_pside6;
+ }
+ for(a = dp->target_list; a; a = a->next_target) {
+ if(!delegpt_add_addr(copy, region, &a->addr, a->addrlen,
+ a->bogus, a->lame))
+ return NULL;
+ }
+ return copy;
+}
+
+int
+delegpt_set_name(struct delegpt* dp, struct regional* region, uint8_t* name)
+{
+ dp->namelabs = dname_count_size_labels(name, &dp->namelen);
+ dp->name = regional_alloc_init(region, name, dp->namelen);
+ return dp->name != 0;
+}
+
+int
+delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
+ int lame)
+{
+ struct delegpt_ns* ns;
+ size_t len;
+ (void)dname_count_size_labels(name, &len);
+ /* slow check for duplicates to avoid counting failures when
+ * adding the same server as a dependency twice */
+ if(delegpt_find_ns(dp, name, len))
+ return 1;
+ ns = (struct delegpt_ns*)regional_alloc(region,
+ sizeof(struct delegpt_ns));
+ if(!ns)
+ return 0;
+ ns->next = dp->nslist;
+ ns->namelen = len;
+ dp->nslist = ns;
+ ns->name = regional_alloc_init(region, name, ns->namelen);
+ ns->resolved = 0;
+ ns->got4 = 0;
+ ns->got6 = 0;
+ ns->lame = (uint8_t)lame;
+ ns->done_pside4 = 0;
+ ns->done_pside6 = 0;
+ return 1;
+}
+
+struct delegpt_ns*
+delegpt_find_ns(struct delegpt* dp, uint8_t* name, size_t namelen)
+{
+ struct delegpt_ns* p = dp->nslist;
+ while(p) {
+ if(namelen == p->namelen &&
+ query_dname_compare(name, p->name) == 0) {
+ return p;
+ }
+ p = p->next;
+ }
+ return NULL;
+}
+
+struct delegpt_addr*
+delegpt_find_addr(struct delegpt* dp, struct sockaddr_storage* addr,
+ socklen_t addrlen)
+{
+ struct delegpt_addr* p = dp->target_list;
+ while(p) {
+ if(sockaddr_cmp_addr(addr, addrlen, &p->addr, p->addrlen)==0) {
+ return p;
+ }
+ p = p->next_target;
+ }
+ return NULL;
+}
+
+int
+delegpt_add_target(struct delegpt* dp, struct regional* region,
+ uint8_t* name, size_t namelen, struct sockaddr_storage* addr,
+ socklen_t addrlen, int bogus, int lame)
+{
+ struct delegpt_ns* ns = delegpt_find_ns(dp, name, namelen);
+ if(!ns) {
+ /* ignore it */
+ return 1;
+ }
+ if(!lame) {
+ if(addr_is_ip6(addr, addrlen))
+ ns->got6 = 1;
+ else ns->got4 = 1;
+ if(ns->got4 && ns->got6)
+ ns->resolved = 1;
+ }
+ return delegpt_add_addr(dp, region, addr, addrlen, bogus, lame);
+}
+
+int
+delegpt_add_addr(struct delegpt* dp, struct regional* region,
+ struct sockaddr_storage* addr, socklen_t addrlen, int bogus,
+ int lame)
+{
+ struct delegpt_addr* a;
+ /* check for duplicates */
+ if((a = delegpt_find_addr(dp, addr, addrlen))) {
+ if(bogus)
+ a->bogus = bogus;
+ if(!lame)
+ a->lame = 0;
+ return 1;
+ }
+
+ a = (struct delegpt_addr*)regional_alloc(region,
+ sizeof(struct delegpt_addr));
+ if(!a)
+ return 0;
+ a->next_target = dp->target_list;
+ dp->target_list = a;
+ a->next_result = 0;
+ a->next_usable = dp->usable_list;
+ dp->usable_list = a;
+ memcpy(&a->addr, addr, addrlen);
+ a->addrlen = addrlen;
+ a->attempts = 0;
+ a->bogus = bogus;
+ a->lame = lame;
+ return 1;
+}
+
+void
+delegpt_count_ns(struct delegpt* dp, size_t* numns, size_t* missing)
+{
+ struct delegpt_ns* ns;
+ *numns = 0;
+ *missing = 0;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ (*numns)++;
+ if(!ns->resolved)
+ (*missing)++;
+ }
+}
+
+void
+delegpt_count_addr(struct delegpt* dp, size_t* numaddr, size_t* numres,
+ size_t* numavail)
+{
+ struct delegpt_addr* a;
+ *numaddr = 0;
+ *numres = 0;
+ *numavail = 0;
+ for(a = dp->target_list; a; a = a->next_target) {
+ (*numaddr)++;
+ }
+ for(a = dp->result_list; a; a = a->next_result) {
+ (*numres)++;
+ }
+ for(a = dp->usable_list; a; a = a->next_usable) {
+ (*numavail)++;
+ }
+}
+
+void delegpt_log(enum verbosity_value v, struct delegpt* dp)
+{
+ char buf[LDNS_MAX_DOMAINLEN+1];
+ struct delegpt_ns* ns;
+ struct delegpt_addr* a;
+ size_t missing=0, numns=0, numaddr=0, numres=0, numavail=0;
+ if(verbosity < v)
+ return;
+ dname_str(dp->name, buf);
+ if(dp->nslist == NULL && dp->target_list == NULL) {
+ log_info("DelegationPoint<%s>: empty", buf);
+ return;
+ }
+ delegpt_count_ns(dp, &numns, &missing);
+ delegpt_count_addr(dp, &numaddr, &numres, &numavail);
+ log_info("DelegationPoint<%s>: %u names (%u missing), "
+ "%u addrs (%u result, %u avail)%s",
+ buf, (unsigned)numns, (unsigned)missing,
+ (unsigned)numaddr, (unsigned)numres, (unsigned)numavail,
+ (dp->has_parent_side_NS?" parentNS":" cacheNS"));
+ if(verbosity >= VERB_ALGO) {
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ dname_str(ns->name, buf);
+ log_info(" %s %s%s%s%s%s%s%s", buf,
+ (ns->resolved?"*":""),
+ (ns->got4?" A":""), (ns->got6?" AAAA":""),
+ (dp->bogus?" BOGUS":""), (ns->lame?" PARENTSIDE":""),
+ (ns->done_pside4?" PSIDE_A":""),
+ (ns->done_pside6?" PSIDE_AAAA":""));
+ }
+ for(a = dp->target_list; a; a = a->next_target) {
+ const char* str = " ";
+ if(a->bogus && a->lame) str = " BOGUS ADDR_LAME ";
+ else if(a->bogus) str = " BOGUS ";
+ else if(a->lame) str = " ADDR_LAME ";
+ log_addr(VERB_ALGO, str, &a->addr, a->addrlen);
+ }
+ }
+}
+
+void
+delegpt_add_unused_targets(struct delegpt* dp)
+{
+ struct delegpt_addr* usa = dp->usable_list;
+ dp->usable_list = NULL;
+ while(usa) {
+ usa->next_result = dp->result_list;
+ dp->result_list = usa;
+ usa = usa->next_usable;
+ }
+}
+
+size_t
+delegpt_count_targets(struct delegpt* dp)
+{
+ struct delegpt_addr* a;
+ size_t n = 0;
+ for(a = dp->target_list; a; a = a->next_target)
+ n++;
+ return n;
+}
+
+size_t
+delegpt_count_missing_targets(struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ size_t n = 0;
+ for(ns = dp->nslist; ns; ns = ns->next)
+ if(!ns->resolved)
+ n++;
+ return n;
+}
+
+/** find NS rrset in given list */
+static struct ub_packed_rrset_key*
+find_NS(struct reply_info* rep, size_t from, size_t to)
+{
+ size_t i;
+ for(i=from; i<to; i++) {
+ if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NS)
+ return rep->rrsets[i];
+ }
+ return NULL;
+}
+
+struct delegpt*
+delegpt_from_message(struct dns_msg* msg, struct regional* region)
+{
+ struct ub_packed_rrset_key* ns_rrset = NULL;
+ struct delegpt* dp;
+ size_t i;
+ /* look for NS records in the authority section... */
+ ns_rrset = find_NS(msg->rep, msg->rep->an_numrrsets,
+ msg->rep->an_numrrsets+msg->rep->ns_numrrsets);
+
+ /* In some cases (even legitimate, perfectly legal cases), the
+ * NS set for the "referral" might be in the answer section. */
+ if(!ns_rrset)
+ ns_rrset = find_NS(msg->rep, 0, msg->rep->an_numrrsets);
+
+ /* If there was no NS rrset in the authority section, then this
+ * wasn't a referral message. (It might not actually be a
+ * referral message anyway) */
+ if(!ns_rrset)
+ return NULL;
+
+ /* If we found any, then Yay! we have a delegation point. */
+ dp = delegpt_create(region);
+ if(!dp)
+ return NULL;
+ dp->has_parent_side_NS = 1; /* created from message */
+ if(!delegpt_set_name(dp, region, ns_rrset->rk.dname))
+ return NULL;
+ if(!delegpt_rrset_add_ns(dp, region, ns_rrset, 0))
+ return NULL;
+
+ /* add glue, A and AAAA in answer and additional section */
+ for(i=0; i<msg->rep->rrset_count; i++) {
+ struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
+ /* skip auth section. FIXME really needed?*/
+ if(msg->rep->an_numrrsets <= i &&
+ i < (msg->rep->an_numrrsets+msg->rep->ns_numrrsets))
+ continue;
+
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_A) {
+ if(!delegpt_add_rrset_A(dp, region, s, 0))
+ return NULL;
+ } else if(ntohs(s->rk.type) == LDNS_RR_TYPE_AAAA) {
+ if(!delegpt_add_rrset_AAAA(dp, region, s, 0))
+ return NULL;
+ }
+ }
+ return dp;
+}
+
+int
+delegpt_rrset_add_ns(struct delegpt* dp, struct regional* region,
+ struct ub_packed_rrset_key* ns_rrset, int lame)
+{
+ struct packed_rrset_data* nsdata = (struct packed_rrset_data*)
+ ns_rrset->entry.data;
+ size_t i;
+ if(nsdata->security == sec_status_bogus)
+ dp->bogus = 1;
+ for(i=0; i<nsdata->count; i++) {
+ if(nsdata->rr_len[i] < 2+1) continue; /* len + root label */
+ if(dname_valid(nsdata->rr_data[i]+2, nsdata->rr_len[i]-2) !=
+ (size_t)ldns_read_uint16(nsdata->rr_data[i]))
+ continue; /* bad format */
+ /* add rdata of NS (= wirefmt dname), skip rdatalen bytes */
+ if(!delegpt_add_ns(dp, region, nsdata->rr_data[i]+2, lame))
+ return 0;
+ }
+ return 1;
+}
+
+int
+delegpt_add_rrset_A(struct delegpt* dp, struct regional* region,
+ struct ub_packed_rrset_key* ak, int lame)
+{
+ struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data;
+ size_t i;
+ struct sockaddr_in sa;
+ socklen_t len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin_family = AF_INET;
+ sa.sin_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ for(i=0; i<d->count; i++) {
+ if(d->rr_len[i] != 2 + INET_SIZE)
+ continue;
+ memmove(&sa.sin_addr, d->rr_data[i]+2, INET_SIZE);
+ if(!delegpt_add_target(dp, region, ak->rk.dname,
+ ak->rk.dname_len, (struct sockaddr_storage*)&sa,
+ len, (d->security==sec_status_bogus), lame))
+ return 0;
+ }
+ return 1;
+}
+
+int
+delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* region,
+ struct ub_packed_rrset_key* ak, int lame)
+{
+ struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data;
+ size_t i;
+ struct sockaddr_in6 sa;
+ socklen_t len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin6_family = AF_INET6;
+ sa.sin6_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ for(i=0; i<d->count; i++) {
+ if(d->rr_len[i] != 2 + INET6_SIZE) /* rdatalen + len of IP6 */
+ continue;
+ memmove(&sa.sin6_addr, d->rr_data[i]+2, INET6_SIZE);
+ if(!delegpt_add_target(dp, region, ak->rk.dname,
+ ak->rk.dname_len, (struct sockaddr_storage*)&sa,
+ len, (d->security==sec_status_bogus), lame))
+ return 0;
+ }
+ return 1;
+}
+
+int
+delegpt_add_rrset(struct delegpt* dp, struct regional* region,
+ struct ub_packed_rrset_key* rrset, int lame)
+{
+ if(!rrset)
+ return 1;
+ if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_NS)
+ return delegpt_rrset_add_ns(dp, region, rrset, lame);
+ else if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_A)
+ return delegpt_add_rrset_A(dp, region, rrset, lame);
+ else if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_AAAA)
+ return delegpt_add_rrset_AAAA(dp, region, rrset, lame);
+ log_warn("Unknown rrset type added to delegpt");
+ return 1;
+}
+
+void delegpt_add_neg_msg(struct delegpt* dp, struct msgreply_entry* msg)
+{
+ struct reply_info* rep = (struct reply_info*)msg->entry.data;
+ if(!rep) return;
+
+ /* if error or no answers */
+ if(FLAGS_GET_RCODE(rep->flags) != 0 || rep->an_numrrsets == 0) {
+ struct delegpt_ns* ns = delegpt_find_ns(dp, msg->key.qname,
+ msg->key.qname_len);
+ if(ns) {
+ if(msg->key.qtype == LDNS_RR_TYPE_A)
+ ns->got4 = 1;
+ else if(msg->key.qtype == LDNS_RR_TYPE_AAAA)
+ ns->got6 = 1;
+ if(ns->got4 && ns->got6)
+ ns->resolved = 1;
+ }
+ }
+}
+
+void delegpt_no_ipv6(struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ /* no ipv6, so only ipv4 is enough to resolve a nameserver */
+ if(ns->got4)
+ ns->resolved = 1;
+ }
+}
+
+void delegpt_no_ipv4(struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ /* no ipv4, so only ipv6 is enough to resolve a nameserver */
+ if(ns->got6)
+ ns->resolved = 1;
+ }
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_delegpt.h b/3rdParty/Unbound/src/src/iterator/iter_delegpt.h
new file mode 100644
index 0000000..c4ca62d
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_delegpt.h
@@ -0,0 +1,349 @@
+/*
+ * iterator/iter_delegpt.h - delegation point with NS and address information.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file implements the Delegation Point. It contains a list of name servers
+ * and their addresses if known.
+ */
+
+#ifndef ITERATOR_ITER_DELEGPT_H
+#define ITERATOR_ITER_DELEGPT_H
+#include "util/log.h"
+struct regional;
+struct delegpt_ns;
+struct delegpt_addr;
+struct dns_msg;
+struct ub_packed_rrset_key;
+struct msgreply_entry;
+
+/**
+ * Delegation Point.
+ * For a domain name, the NS rrset, and the A and AAAA records for those.
+ */
+struct delegpt {
+ /** the domain name of the delegation point. */
+ uint8_t* name;
+ /** length of the delegation point name */
+ size_t namelen;
+ /** number of labels in delegation point */
+ int namelabs;
+
+ /** the nameservers, names from the NS RRset rdata. */
+ struct delegpt_ns* nslist;
+ /** the target addresses for delegation */
+ struct delegpt_addr* target_list;
+ /** the list of usable targets; subset of target_list
+ * the items in this list are not part of the result list. */
+ struct delegpt_addr* usable_list;
+ /** the list of returned targets; subset of target_list */
+ struct delegpt_addr* result_list;
+
+ /** if true, the NS RRset was bogus. All info is bad. */
+ int bogus;
+ /** if true, the parent-side NS record has been applied:
+ * its names have been added and their addresses can follow later.
+ * Also true if the delegationpoint was created from a delegation
+ * message and thus contains the parent-side-info already. */
+ uint8_t has_parent_side_NS;
+};
+
+/**
+ * Nameservers for a delegation point.
+ */
+struct delegpt_ns {
+ /** next in list */
+ struct delegpt_ns* next;
+ /** name of nameserver */
+ uint8_t* name;
+ /** length of name */
+ size_t namelen;
+ /**
+ * If the name has been resolved. false if not queried for yet.
+ * true if the A, AAAA queries have been generated.
+ * marked true if those queries fail.
+ * and marked true if got4 and got6 are both true.
+ */
+ int resolved;
+ /** if the ipv4 address is in the delegpt */
+ uint8_t got4;
+ /** if the ipv6 address is in the delegpt */
+ uint8_t got6;
+ /**
+ * If the name is parent-side only and thus dispreferred.
+ * Its addresses become dispreferred as well
+ */
+ uint8_t lame;
+ /** if the parent-side ipv4 address has been looked up (last resort).
+ * Also enabled if a parent-side cache entry exists, or a parent-side
+ * negative-cache entry exists. */
+ uint8_t done_pside4;
+ /** if the parent-side ipv6 address has been looked up (last resort).
+ * Also enabled if a parent-side cache entry exists, or a parent-side
+ * negative-cache entry exists. */
+ uint8_t done_pside6;
+};
+
+/**
+ * Address of target nameserver in delegation point.
+ */
+struct delegpt_addr {
+ /** next delegation point in results */
+ struct delegpt_addr* next_result;
+ /** next delegation point in usable list */
+ struct delegpt_addr* next_usable;
+ /** next delegation point in all targets list */
+ struct delegpt_addr* next_target;
+
+ /** delegation point address */
+ struct sockaddr_storage addr;
+ /** length of addr */
+ socklen_t addrlen;
+ /** number of attempts for this addr */
+ int attempts;
+ /** rtt stored here in the selection algorithm */
+ int sel_rtt;
+ /** if true, the A or AAAA RR was bogus, so this address is bad.
+ * Also check the dp->bogus to see if everything is bogus. */
+ int bogus;
+ /** if true, this address is dispreferred: it is a lame IP address */
+ int lame;
+};
+
+/**
+ * Create new delegation point.
+ * @param regional: where to allocate it.
+ * @return new delegation point or NULL on error.
+ */
+struct delegpt* delegpt_create(struct regional* regional);
+
+/**
+ * Create a copy of a delegation point.
+ * @param dp: delegation point to copy.
+ * @param regional: where to allocate it.
+ * @return new delegation point or NULL on error.
+ */
+struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* regional);
+
+/**
+ * Set name of delegation point.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the name copy.
+ * @param name: name to use.
+ * @return false on error.
+ */
+int delegpt_set_name(struct delegpt* dp, struct regional* regional,
+ uint8_t* name);
+
+/**
+ * Add a name to the delegation point.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param name: domain name in wire format.
+ * @param lame: name is lame, disprefer it.
+ * @return false on error.
+ */
+int delegpt_add_ns(struct delegpt* dp, struct regional* regional,
+ uint8_t* name, int lame);
+
+/**
+ * Add NS rrset; calls add_ns repeatedly.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param ns_rrset: NS rrset.
+ * @param lame: rrset is lame, disprefer it.
+ * @return 0 on alloc error.
+ */
+int delegpt_rrset_add_ns(struct delegpt* dp, struct regional* regional,
+ struct ub_packed_rrset_key* ns_rrset, int lame);
+
+/**
+ * Add target address to the delegation point.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param name: name for which target was found (must be in nslist).
+ * This name is marked resolved.
+ * @param namelen: length of name.
+ * @param addr: the address.
+ * @param addrlen: the length of addr.
+ * @param bogus: security status for the address, pass true if bogus.
+ * @param lame: address is lame.
+ * @return false on error.
+ */
+int delegpt_add_target(struct delegpt* dp, struct regional* regional,
+ uint8_t* name, size_t namelen, struct sockaddr_storage* addr,
+ socklen_t addrlen, int bogus, int lame);
+
+/**
+ * Add A RRset to delegpt.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param rrset: RRset A to add.
+ * @param lame: rrset is lame, disprefer it.
+ * @return 0 on alloc error.
+ */
+int delegpt_add_rrset_A(struct delegpt* dp, struct regional* regional,
+ struct ub_packed_rrset_key* rrset, int lame);
+
+/**
+ * Add AAAA RRset to delegpt.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param rrset: RRset AAAA to add.
+ * @param lame: rrset is lame, disprefer it.
+ * @return 0 on alloc error.
+ */
+int delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* regional,
+ struct ub_packed_rrset_key* rrset, int lame);
+
+/**
+ * Add any RRset to delegpt.
+ * Does not check for duplicates added.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param rrset: RRset to add, NS, A, AAAA.
+ * @param lame: rrset is lame, disprefer it.
+ * @return 0 on alloc error.
+ */
+int delegpt_add_rrset(struct delegpt* dp, struct regional* regional,
+ struct ub_packed_rrset_key* rrset, int lame);
+
+/**
+ * Add address to the delegation point. No servername is associated or checked.
+ * @param dp: delegation point.
+ * @param regional: where to allocate the info.
+ * @param addr: the address.
+ * @param addrlen: the length of addr.
+ * @param bogus: if address is bogus.
+ * @param lame: if address is lame.
+ * @return false on error.
+ */
+int delegpt_add_addr(struct delegpt* dp, struct regional* regional,
+ struct sockaddr_storage* addr, socklen_t addrlen, int bogus, int lame);
+
+/**
+ * Find NS record in name list of delegation point.
+ * @param dp: delegation point.
+ * @param name: name of nameserver to look for, uncompressed wireformat.
+ * @param namelen: length of name.
+ * @return the ns structure or NULL if not found.
+ */
+struct delegpt_ns* delegpt_find_ns(struct delegpt* dp, uint8_t* name,
+ size_t namelen);
+
+/**
+ * Find address record in total list of delegation point.
+ * @param dp: delegation point.
+ * @param addr: address
+ * @param addrlen: length of addr
+ * @return the addr structure or NULL if not found.
+ */
+struct delegpt_addr* delegpt_find_addr(struct delegpt* dp,
+ struct sockaddr_storage* addr, socklen_t addrlen);
+
+/**
+ * Print the delegation point to the log. For debugging.
+ * @param v: verbosity value that is needed to emit to log.
+ * @param dp: delegation point.
+ */
+void delegpt_log(enum verbosity_value v, struct delegpt* dp);
+
+/** count NS and number missing for logging */
+void delegpt_count_ns(struct delegpt* dp, size_t* numns, size_t* missing);
+
+/** count addresses, and number in result and available lists, for logging */
+void delegpt_count_addr(struct delegpt* dp, size_t* numaddr, size_t* numres,
+ size_t* numavail);
+
+/**
+ * Add all usable targets to the result list.
+ * @param dp: delegation point.
+ */
+void delegpt_add_unused_targets(struct delegpt* dp);
+
+/**
+ * Count number of missing targets. These are ns names with no resolved flag.
+ * @param dp: delegation point.
+ * @return number of missing targets (or 0).
+ */
+size_t delegpt_count_missing_targets(struct delegpt* dp);
+
+/** count total number of targets in dp */
+size_t delegpt_count_targets(struct delegpt* dp);
+
+/**
+ * Create new delegation point from a dns message
+ *
+ * Note that this method does not actually test to see if the message is an
+ * actual referral. It really is just checking to see if it can construct a
+ * delegation point, so the message could be of some other type (some ANSWER
+ * messages, some CNAME messages, generally.) Note that the resulting
+ * DelegationPoint will contain targets for all "relevant" glue (i.e.,
+ * address records whose ownernames match the target of one of the NS
+ * records), so if policy dictates that some glue should be discarded beyond
+ * that, discard it before calling this method. Note that this method will
+ * find "glue" in either the ADDITIONAL section or the ANSWER section.
+ *
+ * @param msg: the dns message, referral.
+ * @param regional: where to allocate delegation point.
+ * @return new delegation point or NULL on alloc error, or if the
+ * message was not appropriate.
+ */
+struct delegpt* delegpt_from_message(struct dns_msg* msg,
+ struct regional* regional);
+
+/**
+ * Add negative message to delegation point.
+ * @param dp: delegation point.
+ * @param msg: the message added, marks off A or AAAA from an NS entry.
+ */
+void delegpt_add_neg_msg(struct delegpt* dp, struct msgreply_entry* msg);
+
+/**
+ * Register the fact that there is no ipv6 and thus AAAAs are not going
+ * to be queried for or be useful.
+ * @param dp: the delegation point. Updated to reflect no ipv6.
+ */
+void delegpt_no_ipv6(struct delegpt* dp);
+
+/**
+ * Register the fact that there is no ipv4 and thus As are not going
+ * to be queried for or be useful.
+ * @param dp: the delegation point. Updated to reflect no ipv4.
+ */
+void delegpt_no_ipv4(struct delegpt* dp);
+
+#endif /* ITERATOR_ITER_DELEGPT_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_donotq.c b/3rdParty/Unbound/src/src/iterator/iter_donotq.c
new file mode 100644
index 0000000..bd60633
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_donotq.c
@@ -0,0 +1,153 @@
+/*
+ * iterator/iter_donotq.c - iterative resolver donotqueryaddresses storage.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * The donotqueryaddresses are stored and looked up. These addresses
+ * (like 127.0.0.1) must not be used to send queries to, and can be
+ * discarded immediately from the server selection.
+ */
+#include "config.h"
+#include "iterator/iter_donotq.h"
+#include "util/regional.h"
+#include "util/log.h"
+#include "util/config_file.h"
+#include "util/net_help.h"
+
+struct iter_donotq*
+donotq_create(void)
+{
+ struct iter_donotq* dq = (struct iter_donotq*)calloc(1,
+ sizeof(struct iter_donotq));
+ if(!dq)
+ return NULL;
+ dq->region = regional_create();
+ if(!dq->region) {
+ donotq_delete(dq);
+ return NULL;
+ }
+ return dq;
+}
+
+void
+donotq_delete(struct iter_donotq* dq)
+{
+ if(!dq)
+ return;
+ regional_destroy(dq->region);
+ free(dq);
+}
+
+/** insert new address into donotq structure */
+static int
+donotq_insert(struct iter_donotq* dq, struct sockaddr_storage* addr,
+ socklen_t addrlen, int net)
+{
+ struct addr_tree_node* node = (struct addr_tree_node*)regional_alloc(
+ dq->region, sizeof(*node));
+ if(!node)
+ return 0;
+ if(!addr_tree_insert(&dq->tree, node, addr, addrlen, net)) {
+ verbose(VERB_QUERY, "duplicate donotquery address ignored.");
+ }
+ return 1;
+}
+
+/** apply donotq string */
+static int
+donotq_str_cfg(struct iter_donotq* dq, const char* str)
+{
+ struct sockaddr_storage addr;
+ int net;
+ socklen_t addrlen;
+ verbose(VERB_ALGO, "donotq: %s", str);
+ if(!netblockstrtoaddr(str, UNBOUND_DNS_PORT, &addr, &addrlen, &net)) {
+ log_err("cannot parse donotquery netblock: %s", str);
+ return 0;
+ }
+ if(!donotq_insert(dq, &addr, addrlen, net)) {
+ log_err("out of memory");
+ return 0;
+ }
+ return 1;
+}
+
+/** read donotq config */
+static int
+read_donotq(struct iter_donotq* dq, struct config_file* cfg)
+{
+ struct config_strlist* p;
+ for(p = cfg->donotqueryaddrs; p; p = p->next) {
+ log_assert(p->str);
+ if(!donotq_str_cfg(dq, p->str))
+ return 0;
+ }
+ return 1;
+}
+
+int
+donotq_apply_cfg(struct iter_donotq* dq, struct config_file* cfg)
+{
+ regional_free_all(dq->region);
+ addr_tree_init(&dq->tree);
+ if(!read_donotq(dq, cfg))
+ return 0;
+ if(cfg->donotquery_localhost) {
+ if(!donotq_str_cfg(dq, "127.0.0.0/8"))
+ return 0;
+ if(cfg->do_ip6) {
+ if(!donotq_str_cfg(dq, "::1"))
+ return 0;
+ }
+ }
+ addr_tree_init_parents(&dq->tree);
+ return 1;
+}
+
+int
+donotq_lookup(struct iter_donotq* donotq, struct sockaddr_storage* addr,
+ socklen_t addrlen)
+{
+ return addr_tree_lookup(&donotq->tree, addr, addrlen) != NULL;
+}
+
+size_t
+donotq_get_mem(struct iter_donotq* donotq)
+{
+ if(!donotq) return 0;
+ return sizeof(*donotq) + regional_get_mem(donotq->region);
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_donotq.h b/3rdParty/Unbound/src/src/iterator/iter_donotq.h
new file mode 100644
index 0000000..4c4fcb2
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_donotq.h
@@ -0,0 +1,101 @@
+/*
+ * iterator/iter_donotq.h - iterative resolver donotqueryaddresses storage.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of the donotquery addresses and lookup fast.
+ */
+
+#ifndef ITERATOR_ITER_DONOTQ_H
+#define ITERATOR_ITER_DONOTQ_H
+#include "util/storage/dnstree.h"
+struct iter_env;
+struct config_file;
+struct regional;
+
+/**
+ * Iterator donotqueryaddresses structure
+ */
+struct iter_donotq {
+ /** regional for allocation */
+ struct regional* region;
+ /**
+ * Tree of the address spans that are blocked.
+ * contents of type addr_tree_node. Each node is an address span
+ * that must not be used to send queries to.
+ */
+ rbtree_t tree;
+};
+
+/**
+ * Create donotqueryaddresses structure
+ * @return new structure or NULL on error.
+ */
+struct iter_donotq* donotq_create(void);
+
+/**
+ * Delete donotqueryaddresses structure.
+ * @param donotq: to delete.
+ */
+void donotq_delete(struct iter_donotq* donotq);
+
+/**
+ * Process donotqueryaddresses config.
+ * @param donotq: where to store.
+ * @param cfg: config options.
+ * @return 0 on error.
+ */
+int donotq_apply_cfg(struct iter_donotq* donotq, struct config_file* cfg);
+
+/**
+ * See if an address is blocked.
+ * @param donotq: structure for address storage.
+ * @param addr: address to check
+ * @param addrlen: length of addr.
+ * @return: true if the address must not be queried. false if unlisted.
+ */
+int donotq_lookup(struct iter_donotq* donotq, struct sockaddr_storage* addr,
+ socklen_t addrlen);
+
+/**
+ * Get memory used by donotqueryaddresses structure.
+ * @param donotq: structure for address storage.
+ * @return bytes in use.
+ */
+size_t donotq_get_mem(struct iter_donotq* donotq);
+
+#endif /* ITERATOR_ITER_DONOTQ_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_fwd.c b/3rdParty/Unbound/src/src/iterator/iter_fwd.c
new file mode 100644
index 0000000..2df1f9c
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_fwd.c
@@ -0,0 +1,442 @@
+/*
+ * iterator/iter_fwd.c - iterative resolver module forward zones.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of forward zones and config settings.
+ */
+#include "config.h"
+#include <ldns/rdata.h>
+#include <ldns/dname.h>
+#include <ldns/rr.h>
+#include "iterator/iter_fwd.h"
+#include "iterator/iter_delegpt.h"
+#include "util/regional.h"
+#include "util/log.h"
+#include "util/config_file.h"
+#include "util/net_help.h"
+#include "util/data/dname.h"
+
+int
+fwd_cmp(const void* k1, const void* k2)
+{
+ int m;
+ struct iter_forward_zone* n1 = (struct iter_forward_zone*)k1;
+ struct iter_forward_zone* n2 = (struct iter_forward_zone*)k2;
+ if(n1->dclass != n2->dclass) {
+ if(n1->dclass < n2->dclass)
+ return -1;
+ return 1;
+ }
+ return dname_lab_cmp(n1->name, n1->namelabs, n2->name, n2->namelabs,
+ &m);
+}
+
+struct iter_forwards*
+forwards_create(void)
+{
+ struct iter_forwards* fwd = (struct iter_forwards*)calloc(1,
+ sizeof(struct iter_forwards));
+ if(!fwd)
+ return NULL;
+ fwd->region = regional_create();
+ if(!fwd->region) {
+ forwards_delete(fwd);
+ return NULL;
+ }
+ return fwd;
+}
+
+void
+forwards_delete(struct iter_forwards* fwd)
+{
+ if(!fwd)
+ return;
+ regional_destroy(fwd->region);
+ free(fwd->tree);
+ free(fwd);
+}
+
+/** insert info into forward structure */
+static int
+forwards_insert_data(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
+ size_t nmlen, int nmlabs, struct delegpt* dp)
+{
+ struct iter_forward_zone* node = regional_alloc(fwd->region,
+ sizeof(struct iter_forward_zone));
+ if(!node)
+ return 0;
+ node->node.key = node;
+ node->dclass = c;
+ node->name = regional_alloc_init(fwd->region, nm, nmlen);
+ if(!node->name)
+ return 0;
+ node->namelen = nmlen;
+ node->namelabs = nmlabs;
+ node->dp = dp;
+ if(!rbtree_insert(fwd->tree, &node->node)) {
+ log_err("duplicate forward zone ignored.");
+ }
+ return 1;
+}
+
+/** insert new info into forward structure given dp */
+static int
+forwards_insert(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
+{
+ return forwards_insert_data(fwd, c, dp->name, dp->namelen,
+ dp->namelabs, dp);
+}
+
+/** initialise parent pointers in the tree */
+static void
+fwd_init_parents(struct iter_forwards* fwd)
+{
+ struct iter_forward_zone* node, *prev = NULL, *p;
+ int m;
+ RBTREE_FOR(node, struct iter_forward_zone*, fwd->tree) {
+ node->parent = NULL;
+ if(!prev || prev->dclass != node->dclass) {
+ prev = node;
+ continue;
+ }
+ (void)dname_lab_cmp(prev->name, prev->namelabs, node->name,
+ node->namelabs, &m); /* we know prev is smaller */
+ /* sort order like: . com. bla.com. zwb.com. net. */
+ /* find the previous, or parent-parent-parent */
+ for(p = prev; p; p = p->parent)
+ /* looking for name with few labels, a parent */
+ if(p->namelabs <= m) {
+ /* ==: since prev matched m, this is closest*/
+ /* <: prev matches more, but is not a parent,
+ * this one is a (grand)parent */
+ node->parent = p;
+ break;
+ }
+ prev = node;
+ }
+}
+
+/** set zone name */
+static int
+read_fwds_name(struct iter_forwards* fwd, struct config_stub* s,
+ struct delegpt* dp)
+{
+ ldns_rdf* rdf;
+ if(!s->name) {
+ log_err("forward zone without a name (use name \".\" to forward everything)");
+ return 0;
+ }
+ rdf = ldns_dname_new_frm_str(s->name);
+ if(!rdf) {
+ log_err("cannot parse forward zone name %s", s->name);
+ return 0;
+ }
+ if(!delegpt_set_name(dp, fwd->region, ldns_rdf_data(rdf))) {
+ ldns_rdf_deep_free(rdf);
+ log_err("out of memory");
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ return 1;
+}
+
+/** set fwd host names */
+static int
+read_fwds_host(struct iter_forwards* fwd, struct config_stub* s,
+ struct delegpt* dp)
+{
+ struct config_strlist* p;
+ ldns_rdf* rdf;
+ for(p = s->hosts; p; p = p->next) {
+ log_assert(p->str);
+ rdf = ldns_dname_new_frm_str(p->str);
+ if(!rdf) {
+ log_err("cannot parse forward %s server name: '%s'",
+ s->name, p->str);
+ return 0;
+ }
+ if(!delegpt_add_ns(dp, fwd->region, ldns_rdf_data(rdf), 0)) {
+ ldns_rdf_deep_free(rdf);
+ log_err("out of memory");
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ }
+ return 1;
+}
+
+/** set fwd server addresses */
+static int
+read_fwds_addr(struct iter_forwards* fwd, struct config_stub* s,
+ struct delegpt* dp)
+{
+ struct config_strlist* p;
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ for(p = s->addrs; p; p = p->next) {
+ log_assert(p->str);
+ if(!extstrtoaddr(p->str, &addr, &addrlen)) {
+ log_err("cannot parse forward %s ip address: '%s'",
+ s->name, p->str);
+ return 0;
+ }
+ if(!delegpt_add_addr(dp, fwd->region, &addr, addrlen, 0, 0)) {
+ log_err("out of memory");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/** read forwards config */
+static int
+read_forwards(struct iter_forwards* fwd, struct config_file* cfg)
+{
+ struct config_stub* s;
+ for(s = cfg->forwards; s; s = s->next) {
+ struct delegpt* dp = delegpt_create(fwd->region);
+ if(!dp) {
+ log_err("out of memory");
+ return 0;
+ }
+ /* set flag that parent side NS information is included.
+ * Asking a (higher up) server on the internet is not useful */
+ dp->has_parent_side_NS = 1;
+ if(!read_fwds_name(fwd, s, dp) ||
+ !read_fwds_host(fwd, s, dp) ||
+ !read_fwds_addr(fwd, s, dp))
+ return 0;
+ if(!forwards_insert(fwd, LDNS_RR_CLASS_IN, dp))
+ return 0;
+ verbose(VERB_QUERY, "Forward zone server list:");
+ delegpt_log(VERB_QUERY, dp);
+ }
+ return 1;
+}
+
+/** see if zone needs to have a hole inserted */
+static int
+need_hole_insert(rbtree_t* tree, struct iter_forward_zone* zone)
+{
+ struct iter_forward_zone k;
+ if(rbtree_search(tree, zone))
+ return 0; /* exact match exists */
+ k = *zone;
+ k.node.key = &k;
+ /* search up the tree */
+ do {
+ dname_remove_label(&k.name, &k.namelen);
+ k.namelabs --;
+ if(rbtree_search(tree, &k))
+ return 1; /* found an upper forward zone, need hole */
+ } while(k.namelabs > 1);
+ return 0; /* no forwards above, no holes needed */
+}
+
+/** make NULL entries for stubs */
+static int
+make_stub_holes(struct iter_forwards* fwd, struct config_file* cfg)
+{
+ struct config_stub* s;
+ struct iter_forward_zone key;
+ key.node.key = &key;
+ key.dclass = LDNS_RR_CLASS_IN;
+ for(s = cfg->stubs; s; s = s->next) {
+ ldns_rdf* rdf = ldns_dname_new_frm_str(s->name);
+ if(!rdf) {
+ log_err("cannot parse stub name '%s'", s->name);
+ return 0;
+ }
+ key.name = ldns_rdf_data(rdf);
+ key.namelabs = dname_count_size_labels(key.name, &key.namelen);
+ if(!need_hole_insert(fwd->tree, &key)) {
+ ldns_rdf_deep_free(rdf);
+ continue;
+ }
+ if(!forwards_insert_data(fwd, key.dclass, key.name,
+ key.namelen, key.namelabs, NULL)) {
+ ldns_rdf_deep_free(rdf);
+ log_err("out of memory");
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ }
+ return 1;
+}
+
+int
+forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg)
+{
+ free(fwd->tree);
+ regional_free_all(fwd->region);
+ fwd->tree = rbtree_create(fwd_cmp);
+ if(!fwd->tree)
+ return 0;
+
+ /* read forward zones */
+ if(!read_forwards(fwd, cfg))
+ return 0;
+ if(!make_stub_holes(fwd, cfg))
+ return 0;
+ fwd_init_parents(fwd);
+ return 1;
+}
+
+struct delegpt*
+forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
+{
+ /* lookup the forward zone in the tree */
+ rbnode_t* res = NULL;
+ struct iter_forward_zone *result;
+ struct iter_forward_zone key;
+ key.node.key = &key;
+ key.dclass = qclass;
+ key.name = qname;
+ key.namelabs = dname_count_size_labels(qname, &key.namelen);
+ if(rbtree_find_less_equal(fwd->tree, &key, &res)) {
+ /* exact */
+ result = (struct iter_forward_zone*)res;
+ } else {
+ /* smaller element (or no element) */
+ int m;
+ result = (struct iter_forward_zone*)res;
+ if(!result || result->dclass != qclass)
+ return NULL;
+ /* count number of labels matched */
+ (void)dname_lab_cmp(result->name, result->namelabs, key.name,
+ key.namelabs, &m);
+ while(result) { /* go up until qname is subdomain of stub */
+ if(result->namelabs <= m)
+ break;
+ result = result->parent;
+ }
+ }
+ if(result)
+ return result->dp;
+ return NULL;
+}
+
+struct delegpt*
+forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass)
+{
+ uint8_t root = 0;
+ return forwards_lookup(fwd, &root, qclass);
+}
+
+int
+forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
+{
+ struct iter_forward_zone key;
+ rbnode_t* n;
+ struct iter_forward_zone* p;
+ if(*dclass == 0) {
+ /* first root item is first item in tree */
+ n = rbtree_first(fwd->tree);
+ if(n == RBTREE_NULL)
+ return 0;
+ p = (struct iter_forward_zone*)n;
+ if(dname_is_root(p->name)) {
+ *dclass = p->dclass;
+ return 1;
+ }
+ /* root not first item? search for higher items */
+ *dclass = p->dclass + 1;
+ return forwards_next_root(fwd, dclass);
+ }
+ /* find class n in tree, we may get a direct hit, or if we don't
+ * this is the last item of the previous class so rbtree_next() takes
+ * us to the next root (if any) */
+ key.node.key = &key;
+ key.name = (uint8_t*)"\000";
+ key.namelen = 1;
+ key.namelabs = 0;
+ key.dclass = *dclass;
+ n = NULL;
+ if(rbtree_find_less_equal(fwd->tree, &key, &n)) {
+ /* exact */
+ return 1;
+ } else {
+ /* smaller element */
+ if(!n || n == RBTREE_NULL)
+ return 0; /* nothing found */
+ n = rbtree_next(n);
+ if(n == RBTREE_NULL)
+ return 0; /* no higher */
+ p = (struct iter_forward_zone*)n;
+ if(dname_is_root(p->name)) {
+ *dclass = p->dclass;
+ return 1;
+ }
+ /* not a root node, return next higher item */
+ *dclass = p->dclass+1;
+ return forwards_next_root(fwd, dclass);
+ }
+}
+
+size_t
+forwards_get_mem(struct iter_forwards* fwd)
+{
+ if(!fwd)
+ return 0;
+ return sizeof(*fwd) + sizeof(*fwd->tree) +
+ regional_get_mem(fwd->region);
+}
+
+int
+forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
+{
+ if(!forwards_insert(fwd, c, dp))
+ return 0;
+ fwd_init_parents(fwd);
+ return 1;
+}
+
+void
+forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+{
+ struct iter_forward_zone key;
+ key.node.key = &key;
+ key.dclass = c;
+ key.name = nm;
+ key.namelabs = dname_count_size_labels(nm, &key.namelen);
+ if(!rbtree_search(fwd->tree, &key))
+ return; /* nothing to do */
+ (void)rbtree_delete(fwd->tree, &key);
+ fwd_init_parents(fwd);
+}
+
diff --git a/3rdParty/Unbound/src/src/iterator/iter_fwd.h b/3rdParty/Unbound/src/src/iterator/iter_fwd.h
new file mode 100644
index 0000000..8f3bc1f
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_fwd.h
@@ -0,0 +1,173 @@
+/*
+ * iterator/iter_fwd.h - iterative resolver module forward zones.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of forward zones, and read those from config.
+ */
+
+#ifndef ITERATOR_ITER_FWD_H
+#define ITERATOR_ITER_FWD_H
+#include "util/rbtree.h"
+struct config_file;
+struct delegpt;
+struct regional;
+
+/**
+ * Iterator forward zones structure
+ */
+struct iter_forwards {
+ /** regional where forward zone server addresses are allocated */
+ struct regional* region;
+ /**
+ * Zones are stored in this tree. Sort order is specially chosen.
+ * first sorted on qclass. Then on dname in nsec-like order, so that
+ * a lookup on class, name will return an exact match or the closest
+ * match which gives the ancestor needed.
+ * contents of type iter_forward_zone.
+ */
+ rbtree_t* tree;
+};
+
+/**
+ * Iterator forward servers for a particular zone.
+ */
+struct iter_forward_zone {
+ /** redblacktree node, key is this structure: class and name */
+ rbnode_t node;
+ /** name */
+ uint8_t* name;
+ /** length of name */
+ size_t namelen;
+ /** number of labels in name */
+ int namelabs;
+ /** delegation point with forward server information for this zone.
+ * If NULL then this forward entry is used to indicate that a
+ * stub-zone with the same name exists, and should be used. */
+ struct delegpt* dp;
+ /** pointer to parent in tree (or NULL if none) */
+ struct iter_forward_zone* parent;
+ /** class. host order. */
+ uint16_t dclass;
+};
+
+/**
+ * Create forwards
+ * @return new forwards or NULL on error.
+ */
+struct iter_forwards* forwards_create(void);
+
+/**
+ * Delete forwards.
+ * @param fwd: to delete.
+ */
+void forwards_delete(struct iter_forwards* fwd);
+
+/**
+ * Process forwards config.
+ * @param fwd: where to store.
+ * @param cfg: config options.
+ * @return 0 on error.
+ */
+int forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg);
+
+/**
+ * Find forward zone information
+ * For this qname/qclass find forward zone information, returns delegation
+ * point with server names and addresses, or NULL if no forwarding is needed.
+ *
+ * @param fwd: forward storage.
+ * @param qname: The qname of the query.
+ * @param qclass: The qclass of the query.
+ * @return: A delegation point if the query has to be forwarded to that list,
+ * otherwise null.
+ */
+struct delegpt* forwards_lookup(struct iter_forwards* fwd,
+ uint8_t* qname, uint16_t qclass);
+
+/**
+ * Same as forwards_lookup, but for the root only
+ * @param fwd: forward storage.
+ * @param qclass: The qclass of the query.
+ * @return: A delegation point if root forward exists, otherwise null.
+ */
+struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
+ uint16_t qclass);
+
+/**
+ * Find next root item in forwards lookup tree.
+ * @param fwd: the forward storage
+ * @param qclass: class to look at next, or higher.
+ * @return false if none found, or if true stored in qclass.
+ */
+int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass);
+
+/**
+ * Get memory in use by forward storage
+ * @param fwd: forward storage.
+ * @return bytes in use
+ */
+size_t forwards_get_mem(struct iter_forwards* fwd);
+
+/** compare two fwd entries */
+int fwd_cmp(const void* k1, const void* k2);
+
+/**
+ * Add zone to forward structure. For external use since it recalcs
+ * the tree parents.
+ * @param fwd: the forward data structure
+ * @param c: class of zone
+ * @param dp: delegation point with name and target nameservers for new
+ * forward zone. This delegation point and all its data must be
+ * malloced in the fwd->region. (then it is freed when the fwd is
+ * deleted).
+ * @return false on failure (out of memory);
+ */
+int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
+ struct delegpt* dp);
+
+/**
+ * Remove zone from forward structure. For external use since it
+ * recalcs the tree parents. Does not actually release any memory, the region
+ * is unchanged.
+ * @param fwd: the forward data structure
+ * @param c: class of zone
+ * @param nm: name of zone (in uncompressed wireformat).
+ */
+void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
+
+#endif /* ITERATOR_ITER_FWD_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_hints.c b/3rdParty/Unbound/src/src/iterator/iter_hints.c
new file mode 100644
index 0000000..01c50b9
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_hints.c
@@ -0,0 +1,488 @@
+/*
+ * iterator/iter_hints.c - iterative resolver module stub and root hints.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of stub and root hints, and read those from config.
+ */
+#include "config.h"
+#include <ldns/dname.h>
+#include <ldns/rr.h>
+#include "iterator/iter_hints.h"
+#include "iterator/iter_delegpt.h"
+#include "util/regional.h"
+#include "util/log.h"
+#include "util/config_file.h"
+#include "util/net_help.h"
+#include "util/data/dname.h"
+
+struct iter_hints*
+hints_create(void)
+{
+ struct iter_hints* hints = (struct iter_hints*)calloc(1,
+ sizeof(struct iter_hints));
+ if(!hints)
+ return NULL;
+ hints->region = regional_create();
+ if(!hints->region) {
+ hints_delete(hints);
+ return NULL;
+ }
+ return hints;
+}
+
+void
+hints_delete(struct iter_hints* hints)
+{
+ if(!hints)
+ return;
+ regional_destroy(hints->region);
+ free(hints);
+}
+
+/** add hint to delegation hints */
+static int
+ah(struct delegpt* dp, struct regional* r, const char* sv, const char* ip)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ ldns_rdf* rdf = ldns_dname_new_frm_str(sv);
+ if(!rdf) {
+ log_err("could not parse %s", sv);
+ return 0;
+ }
+ if(!delegpt_add_ns(dp, r, ldns_rdf_data(rdf), 0) ||
+ !extstrtoaddr(ip, &addr, &addrlen) ||
+ !delegpt_add_target(dp, r, ldns_rdf_data(rdf), ldns_rdf_size(rdf),
+ &addr, addrlen, 0, 0)) {
+ ldns_rdf_deep_free(rdf);
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ return 1;
+}
+
+/** obtain compiletime provided root hints */
+static struct delegpt*
+compile_time_root_prime(struct regional* r, int do_ip4, int do_ip6)
+{
+ /* from:
+ ; This file is made available by InterNIC
+ ; under anonymous FTP as
+ ; file /domain/named.cache
+ ; on server FTP.INTERNIC.NET
+ ; -OR- RS.INTERNIC.NET
+ ;
+ ; related version of root zone: 2010061700
+ */
+ struct delegpt* dp = delegpt_create(r);
+ if(!dp)
+ return NULL;
+ dp->has_parent_side_NS = 1;
+ if(!delegpt_set_name(dp, r, (uint8_t*)"\000"))
+ return NULL;
+ if(do_ip4) {
+ if(!ah(dp, r, "A.ROOT-SERVERS.NET.", "198.41.0.4")) return 0;
+ if(!ah(dp, r, "B.ROOT-SERVERS.NET.", "192.228.79.201")) return 0;
+ if(!ah(dp, r, "C.ROOT-SERVERS.NET.", "192.33.4.12")) return 0;
+ if(!ah(dp, r, "D.ROOT-SERVERS.NET.", "128.8.10.90")) return 0;
+ if(!ah(dp, r, "E.ROOT-SERVERS.NET.", "192.203.230.10")) return 0;
+ if(!ah(dp, r, "F.ROOT-SERVERS.NET.", "192.5.5.241")) return 0;
+ if(!ah(dp, r, "G.ROOT-SERVERS.NET.", "192.112.36.4")) return 0;
+ if(!ah(dp, r, "H.ROOT-SERVERS.NET.", "128.63.2.53")) return 0;
+ if(!ah(dp, r, "I.ROOT-SERVERS.NET.", "192.36.148.17")) return 0;
+ if(!ah(dp, r, "J.ROOT-SERVERS.NET.", "192.58.128.30")) return 0;
+ if(!ah(dp, r, "K.ROOT-SERVERS.NET.", "193.0.14.129")) return 0;
+ if(!ah(dp, r, "L.ROOT-SERVERS.NET.", "199.7.83.42")) return 0;
+ if(!ah(dp, r, "M.ROOT-SERVERS.NET.", "202.12.27.33")) return 0;
+ }
+ if(do_ip6) {
+ if(!ah(dp, r, "A.ROOT-SERVERS.NET.", "2001:503:ba3e::2:30")) return 0;
+ if(!ah(dp, r, "D.ROOT-SERVERS.NET.", "2001:500:2d::d")) return 0;
+ if(!ah(dp, r, "F.ROOT-SERVERS.NET.", "2001:500:2f::f")) return 0;
+ if(!ah(dp, r, "H.ROOT-SERVERS.NET.", "2001:500:1::803f:235")) return 0;
+ if(!ah(dp, r, "I.ROOT-SERVERS.NET.", "2001:7fe::53")) return 0;
+ if(!ah(dp, r, "J.ROOT-SERVERS.NET.", "2001:503:c27::2:30")) return 0;
+ if(!ah(dp, r, "K.ROOT-SERVERS.NET.", "2001:7fd::1")) return 0;
+ if(!ah(dp, r, "L.ROOT-SERVERS.NET.", "2001:500:3::42")) return 0;
+ if(!ah(dp, r, "M.ROOT-SERVERS.NET.", "2001:dc3::35")) return 0;
+ }
+ return dp;
+}
+
+/** insert new hint info into hint structure */
+static int
+hints_insert(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
+ int noprime)
+{
+ struct iter_hints_stub* node = regional_alloc(hints->region,
+ sizeof(struct iter_hints_stub));
+ uint8_t* nm;
+ if(!node)
+ return 0;
+ nm = regional_alloc_init(hints->region, dp->name, dp->namelen);
+ if(!nm)
+ return 0;
+ node->dp = dp;
+ node->noprime = (uint8_t)noprime;
+ if(!name_tree_insert(&hints->tree, &node->node, nm, dp->namelen,
+ dp->namelabs, c)) {
+ log_err("second hints ignored.");
+ }
+ return 1;
+}
+
+/** set stub name */
+static int
+read_stubs_name(struct iter_hints* hints, struct config_stub* s,
+ struct delegpt* dp)
+{
+ ldns_rdf* rdf;
+ if(!s->name) {
+ log_err("stub zone without a name");
+ return 0;
+ }
+ rdf = ldns_dname_new_frm_str(s->name);
+ if(!rdf) {
+ log_err("cannot parse stub zone name %s", s->name);
+ return 0;
+ }
+ if(!delegpt_set_name(dp, hints->region, ldns_rdf_data(rdf))) {
+ ldns_rdf_deep_free(rdf);
+ log_err("out of memory");
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ return 1;
+}
+
+/** set stub host names */
+static int
+read_stubs_host(struct iter_hints* hints, struct config_stub* s,
+ struct delegpt* dp)
+{
+ struct config_strlist* p;
+ ldns_rdf* rdf;
+ for(p = s->hosts; p; p = p->next) {
+ log_assert(p->str);
+ rdf = ldns_dname_new_frm_str(p->str);
+ if(!rdf) {
+ log_err("cannot parse stub %s nameserver name: '%s'",
+ s->name, p->str);
+ return 0;
+ }
+ if(!delegpt_add_ns(dp, hints->region, ldns_rdf_data(rdf), 0)) {
+ ldns_rdf_deep_free(rdf);
+ log_err("out of memory");
+ return 0;
+ }
+ ldns_rdf_deep_free(rdf);
+ }
+ return 1;
+}
+
+/** set stub server addresses */
+static int
+read_stubs_addr(struct iter_hints* hints, struct config_stub* s,
+ struct delegpt* dp)
+{
+ struct config_strlist* p;
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ for(p = s->addrs; p; p = p->next) {
+ log_assert(p->str);
+ if(!extstrtoaddr(p->str, &addr, &addrlen)) {
+ log_err("cannot parse stub %s ip address: '%s'",
+ s->name, p->str);
+ return 0;
+ }
+ if(!delegpt_add_addr(dp, hints->region, &addr, addrlen, 0, 0)) {
+ log_err("out of memory");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/** read stubs config */
+static int
+read_stubs(struct iter_hints* hints, struct config_file* cfg)
+{
+ struct config_stub* s;
+ for(s = cfg->stubs; s; s = s->next) {
+ struct delegpt* dp = delegpt_create(hints->region);
+ if(!dp) {
+ log_err("out of memory");
+ return 0;
+ }
+ dp->has_parent_side_NS = 1;
+ if(!read_stubs_name(hints, s, dp) ||
+ !read_stubs_host(hints, s, dp) ||
+ !read_stubs_addr(hints, s, dp))
+ return 0;
+ if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, !s->isprime))
+ return 0;
+ delegpt_log(VERB_QUERY, dp);
+ }
+ return 1;
+}
+
+/** read root hints from file */
+static int
+read_root_hints(struct iter_hints* hints, char* fname)
+{
+ int lineno = 0;
+ uint32_t default_ttl = 0;
+ ldns_rdf* origin = NULL;
+ ldns_rdf* prev_rr = NULL;
+ struct delegpt* dp;
+ ldns_rr* rr = NULL;
+ ldns_status status;
+ uint16_t c = LDNS_RR_CLASS_IN;
+ FILE* f = fopen(fname, "r");
+ if(!f) {
+ log_err("could not read root hints %s: %s",
+ fname, strerror(errno));
+ return 0;
+ }
+ dp = delegpt_create(hints->region);
+ if(!dp) {
+ log_err("out of memory reading root hints");
+ fclose(f);
+ return 0;
+ }
+ verbose(VERB_QUERY, "Reading root hints from %s", fname);
+ dp->has_parent_side_NS = 1;
+ while(!feof(f)) {
+ status = ldns_rr_new_frm_fp_l(&rr, f,
+ &default_ttl, &origin, &prev_rr, &lineno);
+ if(status == LDNS_STATUS_SYNTAX_EMPTY ||
+ status == LDNS_STATUS_SYNTAX_TTL ||
+ status == LDNS_STATUS_SYNTAX_ORIGIN)
+ continue;
+ if(status != LDNS_STATUS_OK) {
+ log_err("reading root hints %s %d: %s", fname,
+ lineno, ldns_get_errorstr_by_id(status));
+ goto stop_read;
+ }
+ if(ldns_rr_get_type(rr) == LDNS_RR_TYPE_NS) {
+ if(!delegpt_add_ns(dp, hints->region,
+ ldns_rdf_data(ldns_rr_rdf(rr, 0)), 0)) {
+ log_err("out of memory reading root hints");
+ goto stop_read;
+ }
+ c = ldns_rr_get_class(rr);
+ if(!dp->name) {
+ if(!delegpt_set_name(dp, hints->region,
+ ldns_rdf_data(ldns_rr_owner(rr)))){
+ log_err("out of memory.");
+ goto stop_read;
+ }
+ }
+ } else if(ldns_rr_get_type(rr) == LDNS_RR_TYPE_A) {
+ struct sockaddr_in sa;
+ socklen_t len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin_family = AF_INET;
+ sa.sin_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ memmove(&sa.sin_addr,
+ ldns_rdf_data(ldns_rr_rdf(rr, 0)), INET_SIZE);
+ if(!delegpt_add_target(dp, hints->region,
+ ldns_rdf_data(ldns_rr_owner(rr)),
+ ldns_rdf_size(ldns_rr_owner(rr)),
+ (struct sockaddr_storage*)&sa, len,
+ 0, 0)) {
+ log_err("out of memory reading root hints");
+ goto stop_read;
+ }
+ } else if(ldns_rr_get_type(rr) == LDNS_RR_TYPE_AAAA) {
+ struct sockaddr_in6 sa;
+ socklen_t len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin6_family = AF_INET6;
+ sa.sin6_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ memmove(&sa.sin6_addr,
+ ldns_rdf_data(ldns_rr_rdf(rr, 0)), INET6_SIZE);
+ if(!delegpt_add_target(dp, hints->region,
+ ldns_rdf_data(ldns_rr_owner(rr)),
+ ldns_rdf_size(ldns_rr_owner(rr)),
+ (struct sockaddr_storage*)&sa, len,
+ 0, 0)) {
+ log_err("out of memory reading root hints");
+ goto stop_read;
+ }
+ } else {
+ log_warn("root hints %s:%d skipping type %d",
+ fname, lineno, ldns_rr_get_type(rr));
+ }
+
+ ldns_rr_free(rr);
+ }
+
+ if (origin)
+ ldns_rdf_deep_free(origin);
+ if (prev_rr)
+ ldns_rdf_deep_free(prev_rr);
+ fclose(f);
+ if(!dp->name) {
+ log_warn("root hints %s: no NS content", fname);
+ return 1;
+ }
+ if(!hints_insert(hints, c, dp, 0)) {
+ return 0;
+ }
+ delegpt_log(VERB_QUERY, dp);
+ return 1;
+
+stop_read:
+ if (origin)
+ ldns_rdf_deep_free(origin);
+ if (prev_rr)
+ ldns_rdf_deep_free(prev_rr);
+ fclose(f);
+ return 0;
+}
+
+/** read root hints list */
+static int
+read_root_hints_list(struct iter_hints* hints, struct config_file* cfg)
+{
+ struct config_strlist* p;
+ for(p = cfg->root_hints; p; p = p->next) {
+ log_assert(p->str);
+ if(p->str && p->str[0]) {
+ char* f = p->str;
+ if(cfg->chrootdir && cfg->chrootdir[0] &&
+ strncmp(p->str, cfg->chrootdir,
+ strlen(cfg->chrootdir)) == 0)
+ f += strlen(cfg->chrootdir);
+ if(!read_root_hints(hints, f))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int
+hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
+{
+ regional_free_all(hints->region);
+ name_tree_init(&hints->tree);
+
+ /* read root hints */
+ if(!read_root_hints_list(hints, cfg))
+ return 0;
+
+ /* read stub hints */
+ if(!read_stubs(hints, cfg))
+ return 0;
+
+ /* use fallback compiletime root hints */
+ if(!hints_lookup_root(hints, LDNS_RR_CLASS_IN)) {
+ struct delegpt* dp = compile_time_root_prime(hints->region,
+ cfg->do_ip4, cfg->do_ip6);
+ verbose(VERB_ALGO, "no config, using builtin root hints.");
+ if(!dp)
+ return 0;
+ if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, 0))
+ return 0;
+ }
+
+ name_tree_init_parents(&hints->tree);
+ return 1;
+}
+
+struct delegpt*
+hints_lookup_root(struct iter_hints* hints, uint16_t qclass)
+{
+ uint8_t rootlab = 0;
+ struct iter_hints_stub *stub;
+ stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
+ &rootlab, 1, 1, qclass);
+ if(!stub)
+ return NULL;
+ return stub->dp;
+}
+
+struct iter_hints_stub*
+hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
+ uint16_t qclass, struct delegpt* cache_dp)
+{
+ size_t len;
+ int labs;
+ struct iter_hints_stub *r;
+
+ /* first lookup the stub */
+ labs = dname_count_size_labels(qname, &len);
+ r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname,
+ len, labs, qclass);
+ if(!r) return NULL;
+
+ /* If there is no cache (root prime situation) */
+ if(cache_dp == NULL) {
+ if(r->dp->namelabs != 1)
+ return r; /* no cache dp, use any non-root stub */
+ return NULL;
+ }
+
+ /*
+ * If the stub is same as the delegation we got
+ * And has noprime set, we need to 'prime' to use this stub instead.
+ */
+ if(r->noprime && query_dname_compare(cache_dp->name, r->dp->name)==0)
+ return r; /* use this stub instead of cached dp */
+
+ /*
+ * If our cached delegation point is above the hint, we need to prime.
+ */
+ if(dname_strict_subdomain(r->dp->name, r->dp->namelabs,
+ cache_dp->name, cache_dp->namelabs))
+ return r; /* need to prime this stub */
+ return NULL;
+}
+
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass)
+{
+ return name_tree_next_root(&hints->tree, qclass);
+}
+
+size_t
+hints_get_mem(struct iter_hints* hints)
+{
+ if(!hints) return 0;
+ return sizeof(*hints) + regional_get_mem(hints->region);
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_hints.h b/3rdParty/Unbound/src/src/iterator/iter_hints.h
new file mode 100644
index 0000000..4540971
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_hints.h
@@ -0,0 +1,142 @@
+/*
+ * iterator/iter_hints.h - iterative resolver module stub and root hints.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of stub and root hints, and read those from config.
+ */
+
+#ifndef ITERATOR_ITER_HINTS_H
+#define ITERATOR_ITER_HINTS_H
+#include "util/storage/dnstree.h"
+struct iter_env;
+struct config_file;
+struct delegpt;
+struct regional;
+
+/**
+ * Iterator hints structure
+ */
+struct iter_hints {
+ /** regional where hints are allocated */
+ struct regional* region;
+ /**
+ * Hints are stored in this tree. Sort order is specially chosen.
+ * first sorted on qclass. Then on dname in nsec-like order, so that
+ * a lookup on class, name will return an exact match or the closest
+ * match which gives the ancestor needed.
+ * contents of type iter_hints_stub. The class IN root is in here.
+ * uses name_tree_node from dnstree.h.
+ */
+ rbtree_t tree;
+};
+
+/**
+ * Iterator hints for a particular stub.
+ */
+struct iter_hints_stub {
+ /** tree sorted by name, class */
+ struct name_tree_node node;
+ /** delegation point with hint information for this stub. */
+ struct delegpt* dp;
+ /** does the stub need to forego priming (like on other ports) */
+ uint8_t noprime;
+};
+
+/**
+ * Create hints
+ * @return new hints or NULL on error.
+ */
+struct iter_hints* hints_create(void);
+
+/**
+ * Delete hints.
+ * @param hints: to delete.
+ */
+void hints_delete(struct iter_hints* hints);
+
+/**
+ * Process hints config. Sets default values for root hints if no config.
+ * @param hints: where to store.
+ * @param cfg: config options.
+ * @return 0 on error.
+ */
+int hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg);
+
+/**
+ * Find root hints for the given class.
+ * @param hints: hint storage.
+ * @param qclass: class for which root hints are requested. host order.
+ * @return: NULL if no hints, or a ptr to stored hints.
+ */
+struct delegpt* hints_lookup_root(struct iter_hints* hints, uint16_t qclass);
+
+/**
+ * Find next root hints (to cycle through all root hints).
+ * @param hints: hint storage
+ * @param qclass: class for which root hints are sought.
+ * 0 means give the first available root hints class.
+ * x means, give class x or a higher class if any.
+ * returns the found class in this variable.
+ * @return true if a root hint class is found.
+ * false if not root hint class is found (qclass may have been changed).
+ */
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass);
+
+/**
+ * Given a qname/qclass combination, and the delegation point from the cache
+ * for this qname/qclass, determine if this combination indicates that a
+ * stub hint exists and must be primed.
+ *
+ * @param hints: hint storage.
+ * @param qname: The qname that generated the delegation point.
+ * @param qclass: The qclass that generated the delegation point.
+ * @param dp: The cache generated delegation point.
+ * @return: A priming delegation point if there is a stub hint that must
+ * be primed, otherwise null.
+ */
+struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
+ uint8_t* qname, uint16_t qclass, struct delegpt* dp);
+
+/**
+ * Get memory in use by hints
+ * @param hints: hint storage.
+ * @return bytes in use
+ */
+size_t hints_get_mem(struct iter_hints* hints);
+
+#endif /* ITERATOR_ITER_HINTS_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_priv.c b/3rdParty/Unbound/src/src/iterator/iter_priv.c
new file mode 100644
index 0000000..db7dbe5
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_priv.c
@@ -0,0 +1,263 @@
+/*
+ * iterator/iter_priv.c - iterative resolver private address and domain store
+ *
+ * Copyright (c) 2008, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of the private addresses and lookup fast.
+ */
+
+#include "config.h"
+#include <ldns/dname.h>
+#include "iterator/iter_priv.h"
+#include "util/regional.h"
+#include "util/log.h"
+#include "util/config_file.h"
+#include "util/data/dname.h"
+#include "util/data/msgparse.h"
+#include "util/net_help.h"
+#include "util/storage/dnstree.h"
+
+struct iter_priv* priv_create(void)
+{
+ struct iter_priv* priv = (struct iter_priv*)calloc(1, sizeof(*priv));
+ if(!priv)
+ return NULL;
+ priv->region = regional_create();
+ if(!priv->region) {
+ priv_delete(priv);
+ return NULL;
+ }
+ addr_tree_init(&priv->a);
+ name_tree_init(&priv->n);
+ return priv;
+}
+
+void priv_delete(struct iter_priv* priv)
+{
+ if(!priv) return;
+ regional_destroy(priv->region);
+ free(priv);
+}
+
+/** Read private-addr declarations from config */
+static int read_addrs(struct iter_priv* priv, struct config_file* cfg)
+{
+ /* parse addresses, report errors, insert into tree */
+ struct config_strlist* p;
+ struct addr_tree_node* n;
+ struct sockaddr_storage addr;
+ int net;
+ socklen_t addrlen;
+
+ for(p = cfg->private_address; p; p = p->next) {
+ log_assert(p->str);
+ if(!netblockstrtoaddr(p->str, UNBOUND_DNS_PORT, &addr,
+ &addrlen, &net)) {
+ log_err("cannot parse private-address: %s", p->str);
+ return 0;
+ }
+ n = (struct addr_tree_node*)regional_alloc(priv->region,
+ sizeof(*n));
+ if(!n) {
+ log_err("out of memory");
+ return 0;
+ }
+ if(!addr_tree_insert(&priv->a, n, &addr, addrlen, net)) {
+ verbose(VERB_QUERY, "ignoring duplicate "
+ "private-address: %s", p->str);
+ }
+ }
+ return 1;
+}
+
+/** Read private-domain declarations from config */
+static int read_names(struct iter_priv* priv, struct config_file* cfg)
+{
+ /* parse names, report errors, insert into tree */
+ struct config_strlist* p;
+ struct name_tree_node* n;
+ uint8_t* nm;
+ size_t nm_len;
+ int nm_labs;
+ ldns_rdf* rdf;
+
+ for(p = cfg->private_domain; p; p = p->next) {
+ log_assert(p->str);
+ rdf = ldns_dname_new_frm_str(p->str);
+ if(!rdf) {
+ log_err("cannot parse private-domain: %s", p->str);
+ return 0;
+ }
+ nm = ldns_rdf_data(rdf);
+ nm_labs = dname_count_size_labels(nm, &nm_len);
+ nm = (uint8_t*)regional_alloc_init(priv->region, nm, nm_len);
+ ldns_rdf_deep_free(rdf);
+ if(!nm) {
+ log_err("out of memory");
+ return 0;
+ }
+ n = (struct name_tree_node*)regional_alloc(priv->region,
+ sizeof(*n));
+ if(!n) {
+ log_err("out of memory");
+ return 0;
+ }
+ if(!name_tree_insert(&priv->n, n, nm, nm_len, nm_labs,
+ LDNS_RR_CLASS_IN)) {
+ verbose(VERB_QUERY, "ignoring duplicate "
+ "private-domain: %s", p->str);
+ }
+ }
+ return 1;
+}
+
+int priv_apply_cfg(struct iter_priv* priv, struct config_file* cfg)
+{
+ /* empty the current contents */
+ regional_free_all(priv->region);
+ addr_tree_init(&priv->a);
+ name_tree_init(&priv->n);
+
+ /* read new contents */
+ if(!read_addrs(priv, cfg))
+ return 0;
+ if(!read_names(priv, cfg))
+ return 0;
+
+ /* prepare for lookups */
+ addr_tree_init_parents(&priv->a);
+ name_tree_init_parents(&priv->n);
+ return 1;
+}
+
+/**
+ * See if an address is blocked.
+ * @param priv: structure for address storage.
+ * @param addr: address to check
+ * @param addrlen: length of addr.
+ * @return: true if the address must not be queried. false if unlisted.
+ */
+static int
+priv_lookup_addr(struct iter_priv* priv, struct sockaddr_storage* addr,
+ socklen_t addrlen)
+{
+ return addr_tree_lookup(&priv->a, addr, addrlen) != NULL;
+}
+
+/**
+ * See if a name is whitelisted.
+ * @param priv: structure for address storage.
+ * @param pkt: the packet (for compression ptrs).
+ * @param name: name to check.
+ * @param name_len: uncompressed length of the name to check.
+ * @param dclass: class to check.
+ * @return: true if the name is OK. false if unlisted.
+ */
+static int
+priv_lookup_name(struct iter_priv* priv, ldns_buffer* pkt,
+ uint8_t* name, size_t name_len, uint16_t dclass)
+{
+ size_t len;
+ uint8_t decomp[256];
+ int labs;
+ if(name_len >= sizeof(decomp))
+ return 0;
+ dname_pkt_copy(pkt, decomp, name);
+ labs = dname_count_size_labels(decomp, &len);
+ log_assert(name_len == len);
+ return name_tree_lookup(&priv->n, decomp, len, labs, dclass) != NULL;
+}
+
+size_t priv_get_mem(struct iter_priv* priv)
+{
+ if(!priv) return 0;
+ return sizeof(*priv) + regional_get_mem(priv->region);
+}
+
+int priv_rrset_bad(struct iter_priv* priv, ldns_buffer* pkt,
+ struct rrset_parse* rrset)
+{
+ if(priv->a.count == 0)
+ return 0; /* there are no blocked addresses */
+
+ /* see if it is a private name, that is allowed to have any */
+ if(priv_lookup_name(priv, pkt, rrset->dname, rrset->dname_len,
+ ntohs(rrset->rrset_class))) {
+ return 0;
+ } else {
+ /* so its a public name, check the address */
+ socklen_t len;
+ struct rr_parse* rr;
+ if(rrset->type == LDNS_RR_TYPE_A) {
+ struct sockaddr_storage addr;
+ struct sockaddr_in sa;
+
+ len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin_family = AF_INET;
+ sa.sin_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ for(rr = rrset->rr_first; rr; rr = rr->next) {
+ if(ldns_read_uint16(rr->ttl_data+4)
+ != INET_SIZE)
+ continue;
+ memmove(&sa.sin_addr, rr->ttl_data+4+2,
+ INET_SIZE);
+ memmove(&addr, &sa, len);
+ if(priv_lookup_addr(priv, &addr, len))
+ return 1;
+ }
+ } else if(rrset->type == LDNS_RR_TYPE_AAAA) {
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 sa;
+ len = (socklen_t)sizeof(sa);
+ memset(&sa, 0, len);
+ sa.sin6_family = AF_INET6;
+ sa.sin6_port = (in_port_t)htons(UNBOUND_DNS_PORT);
+ for(rr = rrset->rr_first; rr; rr = rr->next) {
+ if(ldns_read_uint16(rr->ttl_data+4)
+ != INET6_SIZE)
+ continue;
+ memmove(&sa.sin6_addr, rr->ttl_data+4+2,
+ INET6_SIZE);
+ memmove(&addr, &sa, len);
+ if(priv_lookup_addr(priv, &addr, len))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_priv.h b/3rdParty/Unbound/src/src/iterator/iter_priv.h
new file mode 100644
index 0000000..f6264f8
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_priv.h
@@ -0,0 +1,110 @@
+/*
+ * iterator/iter_priv.h - iterative resolver private address and domain store
+ *
+ * Copyright (c) 2008, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Keep track of the private addresses and lookup fast.
+ */
+
+#ifndef ITERATOR_ITER_PRIV_H
+#define ITERATOR_ITER_PRIV_H
+#include "util/rbtree.h"
+#include <ldns/buffer.h>
+struct iter_env;
+struct config_file;
+struct regional;
+struct rrset_parse;
+
+/**
+ * Iterator priv structure
+ */
+struct iter_priv {
+ /** regional for allocation */
+ struct regional* region;
+ /**
+ * Tree of the address spans that are blocked.
+ * contents of type addr_tree_node.
+ * No further data need, only presence or absence.
+ */
+ rbtree_t a;
+ /**
+ * Tree of the domains spans that are allowed to contain
+ * the blocked address spans.
+ * contents of type name_tree_node.
+ * No further data need, only presence or absence.
+ */
+ rbtree_t n;
+};
+
+/**
+ * Create priv structure
+ * @return new structure or NULL on error.
+ */
+struct iter_priv* priv_create(void);
+
+/**
+ * Delete priv structure.
+ * @param priv: to delete.
+ */
+void priv_delete(struct iter_priv* priv);
+
+/**
+ * Process priv config.
+ * @param priv: where to store.
+ * @param cfg: config options.
+ * @return 0 on error.
+ */
+int priv_apply_cfg(struct iter_priv* priv, struct config_file* cfg);
+
+/**
+ * See if rrset is bad.
+ * @param priv: structure for private address storage.
+ * @param pkt: packet to decompress rrset name in.
+ * @param rrset: the rrset to examine, A or AAAA.
+ * @return true if the rrset is bad and should be removed.
+ */
+int priv_rrset_bad(struct iter_priv* priv, ldns_buffer* pkt,
+ struct rrset_parse* rrset);
+
+/**
+ * Get memory used by priv structure.
+ * @param priv: structure for address storage.
+ * @return bytes in use.
+ */
+size_t priv_get_mem(struct iter_priv* priv);
+
+#endif /* ITERATOR_ITER_PRIV_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_resptype.c b/3rdParty/Unbound/src/src/iterator/iter_resptype.c
new file mode 100644
index 0000000..2cdc5fc
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_resptype.c
@@ -0,0 +1,286 @@
+/*
+ * iterator/iter_resptype.c - response type information and classification.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file defines the response type. DNS Responses can be classified as
+ * one of the response types.
+ */
+#include "config.h"
+#include <ldns/packet.h>
+#include "iterator/iter_resptype.h"
+#include "iterator/iter_delegpt.h"
+#include "services/cache/dns.h"
+#include "util/net_help.h"
+#include "util/data/dname.h"
+
+enum response_type
+response_type_from_cache(struct dns_msg* msg,
+ struct query_info* request)
+{
+ /* If the message is NXDOMAIN, then it is an ANSWER. */
+ if(FLAGS_GET_RCODE(msg->rep->flags) == LDNS_RCODE_NXDOMAIN)
+ return RESPONSE_TYPE_ANSWER;
+ if(request->qtype == LDNS_RR_TYPE_ANY)
+ return RESPONSE_TYPE_ANSWER;
+
+ /* First we look at the answer section. This can tell us if this is
+ * CNAME or positive ANSWER. */
+ if(msg->rep->an_numrrsets > 0) {
+ /* Now look at the answer section first. 3 states:
+ * o our answer is there directly,
+ * o our answer is there after a cname,
+ * o or there is just a cname. */
+ uint8_t* mname = request->qname;
+ size_t mname_len = request->qname_len;
+ size_t i;
+ for(i=0; i<msg->rep->an_numrrsets; i++) {
+ struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
+
+ /* If we have encountered an answer (before or
+ * after a CNAME), then we are done! Note that
+ * if qtype == CNAME then this will be noted as
+ * an ANSWER before it gets treated as a CNAME,
+ * as it should */
+ if(ntohs(s->rk.type) == request->qtype &&
+ ntohs(s->rk.rrset_class) == request->qclass &&
+ query_dname_compare(mname, s->rk.dname) == 0) {
+ return RESPONSE_TYPE_ANSWER;
+ }
+
+ /* If we have encountered a CNAME, make sure that
+ * it is relevant. */
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME &&
+ query_dname_compare(mname, s->rk.dname) == 0) {
+ get_cname_target(s, &mname, &mname_len);
+ }
+ }
+
+ /* if we encountered a CNAME (or a bunch of CNAMEs), and
+ * still got to here, then it is a CNAME response. (i.e.,
+ * the CNAME chain didn't terminate in an answer rrset.) */
+ if(mname != request->qname) {
+ return RESPONSE_TYPE_CNAME;
+ }
+ }
+
+ /* At this point, since we don't need to detect REFERRAL or LAME
+ * messages, it can only be an ANSWER. */
+ return RESPONSE_TYPE_ANSWER;
+}
+
+enum response_type
+response_type_from_server(int rdset,
+ struct dns_msg* msg, struct query_info* request, struct delegpt* dp)
+{
+ uint8_t* origzone = (uint8_t*)"\000"; /* the default */
+ struct ub_packed_rrset_key* s;
+ size_t i;
+
+ if(!msg || !request)
+ return RESPONSE_TYPE_THROWAWAY;
+
+ /* If the message is NXDOMAIN, then it answers the question. */
+ if(FLAGS_GET_RCODE(msg->rep->flags) == LDNS_RCODE_NXDOMAIN) {
+ /* make sure its not recursive when we don't want it to */
+ if( (msg->rep->flags&BIT_RA) &&
+ !(msg->rep->flags&BIT_AA) && !rdset)
+ return RESPONSE_TYPE_REC_LAME;
+ /* it could be a CNAME with NXDOMAIN rcode */
+ for(i=0; i<msg->rep->an_numrrsets; i++) {
+ s = msg->rep->rrsets[i];
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME &&
+ query_dname_compare(request->qname,
+ s->rk.dname) == 0) {
+ return RESPONSE_TYPE_CNAME;
+ }
+ }
+ return RESPONSE_TYPE_ANSWER;
+ }
+
+ /* Other response codes mean (so far) to throw the response away as
+ * meaningless and move on to the next nameserver. */
+ if(FLAGS_GET_RCODE(msg->rep->flags) != LDNS_RCODE_NOERROR)
+ return RESPONSE_TYPE_THROWAWAY;
+
+ /* Note: TC bit has already been handled */
+
+ if(dp) {
+ origzone = dp->name;
+ }
+
+ /* First we look at the answer section. This can tell us if this is a
+ * CNAME or ANSWER or (provisional) ANSWER. */
+ if(msg->rep->an_numrrsets > 0) {
+ uint8_t* mname = request->qname;
+ size_t mname_len = request->qname_len;
+
+ /* Now look at the answer section first. 3 states: our
+ * answer is there directly, our answer is there after
+ * a cname, or there is just a cname. */
+ for(i=0; i<msg->rep->an_numrrsets; i++) {
+ s = msg->rep->rrsets[i];
+
+ /* if the answer section has NS rrset, and qtype ANY
+ * and the delegation is lower, and no CNAMEs followed,
+ * this is a referral where the NS went to AN section */
+ if((request->qtype == LDNS_RR_TYPE_ANY ||
+ request->qtype == LDNS_RR_TYPE_NS) &&
+ ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
+ ntohs(s->rk.rrset_class) == request->qclass &&
+ dname_strict_subdomain_c(s->rk.dname,
+ origzone)) {
+ if((msg->rep->flags&BIT_AA))
+ return RESPONSE_TYPE_ANSWER;
+ return RESPONSE_TYPE_REFERRAL;
+ }
+
+ /* If we have encountered an answer (before or
+ * after a CNAME), then we are done! Note that
+ * if qtype == CNAME then this will be noted as an
+ * ANSWER before it gets treated as a CNAME, as
+ * it should. */
+ if(ntohs(s->rk.type) == request->qtype &&
+ ntohs(s->rk.rrset_class) == request->qclass &&
+ query_dname_compare(mname, s->rk.dname) == 0) {
+ if((msg->rep->flags&BIT_AA))
+ return RESPONSE_TYPE_ANSWER;
+ /* If the AA bit isn't on, and we've seen
+ * the answer, we only provisionally say
+ * 'ANSWER' -- it very well could be a
+ * REFERRAL. */
+ break;
+ }
+
+ /* If we have encountered a CNAME, make sure that
+ * it is relevant. */
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME &&
+ query_dname_compare(mname, s->rk.dname) == 0) {
+ get_cname_target(s, &mname, &mname_len);
+ }
+ }
+ /* not a referral, and qtype any, thus an answer */
+ if(request->qtype == LDNS_RR_TYPE_ANY)
+ return RESPONSE_TYPE_ANSWER;
+ /* if we encountered a CNAME (or a bunch of CNAMEs), and
+ * still got to here, then it is a CNAME response.
+ * (This is regardless of the AA bit at this point) */
+ if(mname != request->qname) {
+ return RESPONSE_TYPE_CNAME;
+ }
+ }
+
+ /* Looking at the authority section, we just look and see if
+ * there is a SOA record, that means a NOERROR/NODATA */
+ for(i = msg->rep->an_numrrsets; i < (msg->rep->an_numrrsets +
+ msg->rep->ns_numrrsets); i++) {
+ s = msg->rep->rrsets[i];
+
+ /* The normal way of detecting NOERROR/NODATA. */
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_SOA &&
+ dname_subdomain_c(request->qname, s->rk.dname)) {
+ /* we do our own recursion, thank you */
+ if( (msg->rep->flags&BIT_RA) &&
+ !(msg->rep->flags&BIT_AA) && !rdset)
+ return RESPONSE_TYPE_REC_LAME;
+ return RESPONSE_TYPE_ANSWER;
+ }
+ }
+ /* Looking at the authority section, we just look and see if
+ * there is a delegation NS set, turning it into a delegation.
+ * Otherwise, we will have to conclude ANSWER (either it is
+ * NOERROR/NODATA, or an non-authoritative answer). */
+ for(i = msg->rep->an_numrrsets; i < (msg->rep->an_numrrsets +
+ msg->rep->ns_numrrsets); i++) {
+ s = msg->rep->rrsets[i];
+
+ /* Detect REFERRAL/LAME/ANSWER based on the relationship
+ * of the NS set to the originating zone name. */
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS) {
+ /* If we are getting an NS set for the zone we
+ * thought we were contacting, then it is an answer.*/
+ if(query_dname_compare(s->rk.dname, origzone) == 0) {
+ /* see if mistakenly a recursive server was
+ * deployed and is responding nonAA */
+ if( (msg->rep->flags&BIT_RA) &&
+ !(msg->rep->flags&BIT_AA) && !rdset)
+ return RESPONSE_TYPE_REC_LAME;
+ /* Or if a lame server is deployed,
+ * which gives ns==zone delegation from cache
+ * without AA bit as well, with nodata nosoa*/
+ /* real answer must be +AA and SOA RFC(2308),
+ * so this is wrong, and we SERVFAIL it if
+ * this is the only possible reply, if it
+ * is misdeployed the THROWAWAY makes us pick
+ * the next server from the selection */
+ if(msg->rep->an_numrrsets==0 &&
+ !(msg->rep->flags&BIT_AA) && !rdset)
+ return RESPONSE_TYPE_THROWAWAY;
+ return RESPONSE_TYPE_ANSWER;
+ }
+ /* If we are getting a referral upwards (or to
+ * the same zone), then the server is 'lame'. */
+ if(dname_subdomain_c(origzone, s->rk.dname)) {
+ if(rdset) /* forward or reclame not LAME */
+ return RESPONSE_TYPE_THROWAWAY;
+ return RESPONSE_TYPE_LAME;
+ }
+ /* If the NS set is below the delegation point we
+ * are on, and it is non-authoritative, then it is
+ * a referral, otherwise it is an answer. */
+ if(dname_subdomain_c(s->rk.dname, origzone)) {
+ /* NOTE: I no longer remember in what case
+ * we would like this to be an answer.
+ * NODATA should have a SOA or nothing,
+ * not an NS rrset.
+ * True, referrals should not have the AA
+ * bit set, but... */
+
+ /* if((msg->rep->flags&BIT_AA))
+ return RESPONSE_TYPE_ANSWER; */
+ return RESPONSE_TYPE_REFERRAL;
+ }
+ /* Otherwise, the NS set is irrelevant. */
+ }
+ }
+
+ /* If we've gotten this far, this is NOERROR/NODATA (which could
+ * be an entirely empty message) */
+ /* check if recursive answer; saying it has empty cache */
+ if( (msg->rep->flags&BIT_RA) && !(msg->rep->flags&BIT_AA) && !rdset)
+ return RESPONSE_TYPE_REC_LAME;
+ return RESPONSE_TYPE_ANSWER;
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_resptype.h b/3rdParty/Unbound/src/src/iterator/iter_resptype.h
new file mode 100644
index 0000000..3bb3eed
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_resptype.h
@@ -0,0 +1,127 @@
+/*
+ * iterator/iter_resptype.h - response type information and classification.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file defines the response type. DNS Responses can be classified as
+ * one of the response types.
+ */
+
+#ifndef ITERATOR_ITER_RESPTYPE_H
+#define ITERATOR_ITER_RESPTYPE_H
+struct dns_msg;
+struct query_info;
+struct delegpt;
+
+/**
+ * The response type is used to interpret the response.
+ */
+enum response_type {
+ /**
+ * 'untyped' means that the type of this response hasn't been
+ * assigned.
+ */
+ RESPONSE_TYPE_UNTYPED = 0,
+
+ /**
+ * 'answer' means that the response terminates the resolution
+ * process.
+ */
+ RESPONSE_TYPE_ANSWER,
+
+ /** 'delegation' means that the response is a delegation. */
+ RESPONSE_TYPE_REFERRAL,
+
+ /**
+ * 'cname' means that the response is a cname without the final
+ * answer, and thus must be restarted.
+ */
+ RESPONSE_TYPE_CNAME,
+
+ /**
+ * 'throwaway' means that this particular response should be
+ * discarded and the next nameserver should be contacted
+ */
+ RESPONSE_TYPE_THROWAWAY,
+
+ /**
+ * 'lame' means that this particular response indicates that
+ * the nameserver knew nothing about the question.
+ */
+ RESPONSE_TYPE_LAME,
+
+ /**
+ * Recursion lame means that the nameserver is some sort of
+ * open recursor, and not authoritative for the question.
+ * It may know something, but not authoritatively.
+ */
+ RESPONSE_TYPE_REC_LAME
+};
+
+/**
+ * Classifies a response message from cache based on the current request.
+ * Note that this routine assumes that THROWAWAY or LAME responses will not
+ * occur. Also, it will not detect REFERRAL type messages, since those are
+ * (currently) automatically classified based on how they came from the
+ * cache (findDelegation() instead of lookup()).
+ *
+ * @param msg: the message from the cache.
+ * @param request: the request that generated the response.
+ * @return the response type (CNAME or ANSWER).
+ */
+enum response_type response_type_from_cache(struct dns_msg* msg,
+ struct query_info* request);
+
+/**
+ * Classifies a response message (from the wire) based on the current
+ * request.
+ *
+ * NOTE: currently this routine uses the AA bit in the response to help
+ * distinguish between some non-standard referrals and answers. It also
+ * relies somewhat on the originating zone to be accurate (for lameness
+ * detection, mostly).
+ *
+ * @param rdset: if RD bit was sent in query sent by unbound.
+ * @param msg: the message from the cache.
+ * @param request: the request that generated the response.
+ * @param dp: The delegation point that was being queried
+ * when the response was returned.
+ * @return the response type (CNAME or ANSWER).
+ */
+enum response_type response_type_from_server(int rdset,
+ struct dns_msg* msg, struct query_info* request, struct delegpt* dp);
+
+#endif /* ITERATOR_ITER_RESPTYPE_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_scrub.c b/3rdParty/Unbound/src/src/iterator/iter_scrub.c
new file mode 100644
index 0000000..6147c96
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_scrub.c
@@ -0,0 +1,751 @@
+/*
+ * iterator/iter_scrub.c - scrubbing, normalization, sanitization of DNS msgs.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file has routine(s) for cleaning up incoming DNS messages from
+ * possible useless or malicious junk in it.
+ */
+#include "config.h"
+#include "iterator/iter_scrub.h"
+#include "iterator/iterator.h"
+#include "iterator/iter_priv.h"
+#include "services/cache/rrset.h"
+#include "util/log.h"
+#include "util/net_help.h"
+#include "util/regional.h"
+#include "util/config_file.h"
+#include "util/module.h"
+#include "util/data/msgparse.h"
+#include "util/data/dname.h"
+#include "util/data/msgreply.h"
+#include "util/alloc.h"
+
+/** RRset flag used during scrubbing. The RRset is OK. */
+#define RRSET_SCRUB_OK 0x80
+
+/** remove rrset, update loop variables */
+static void
+remove_rrset(const char* str, ldns_buffer* pkt, struct msg_parse* msg,
+ struct rrset_parse* prev, struct rrset_parse** rrset)
+{
+ if(verbosity >= VERB_QUERY
+ && (*rrset)->dname_len <= LDNS_MAX_DOMAINLEN) {
+ uint8_t buf[LDNS_MAX_DOMAINLEN+1];
+ dname_pkt_copy(pkt, buf, (*rrset)->dname);
+ log_nametypeclass(VERB_QUERY, str, buf,
+ (*rrset)->type, ntohs((*rrset)->rrset_class));
+ }
+ if(prev)
+ prev->rrset_all_next = (*rrset)->rrset_all_next;
+ else msg->rrset_first = (*rrset)->rrset_all_next;
+ if(msg->rrset_last == *rrset)
+ msg->rrset_last = prev;
+ msg->rrset_count --;
+ switch((*rrset)->section) {
+ case LDNS_SECTION_ANSWER: msg->an_rrsets--; break;
+ case LDNS_SECTION_AUTHORITY: msg->ns_rrsets--; break;
+ case LDNS_SECTION_ADDITIONAL: msg->ar_rrsets--; break;
+ default: log_assert(0);
+ }
+ msgparse_bucket_remove(msg, *rrset);
+ *rrset = (*rrset)->rrset_all_next;
+}
+
+/** return true if rr type has additional names in it */
+static int
+has_additional(uint16_t t)
+{
+ switch(t) {
+ case LDNS_RR_TYPE_MB:
+ case LDNS_RR_TYPE_MD:
+ case LDNS_RR_TYPE_MF:
+ case LDNS_RR_TYPE_NS:
+ case LDNS_RR_TYPE_MX:
+ case LDNS_RR_TYPE_KX:
+ case LDNS_RR_TYPE_SRV:
+ return 1;
+ case LDNS_RR_TYPE_NAPTR:
+ /* TODO: NAPTR not supported, glue stripped off */
+ return 0;
+ }
+ return 0;
+}
+
+/** get additional name from rrset RR, return false if no name present */
+static int
+get_additional_name(struct rrset_parse* rrset, struct rr_parse* rr,
+ uint8_t** nm, size_t* nmlen, ldns_buffer* pkt)
+{
+ size_t offset = 0;
+ size_t len, oldpos;
+ switch(rrset->type) {
+ case LDNS_RR_TYPE_MB:
+ case LDNS_RR_TYPE_MD:
+ case LDNS_RR_TYPE_MF:
+ case LDNS_RR_TYPE_NS:
+ offset = 0;
+ break;
+ case LDNS_RR_TYPE_MX:
+ case LDNS_RR_TYPE_KX:
+ offset = 2;
+ break;
+ case LDNS_RR_TYPE_SRV:
+ offset = 6;
+ break;
+ case LDNS_RR_TYPE_NAPTR:
+ /* TODO: NAPTR not supported, glue stripped off */
+ return 0;
+ default:
+ return 0;
+ }
+ len = ldns_read_uint16(rr->ttl_data+sizeof(uint32_t));
+ if(len < offset+1)
+ return 0; /* rdata field too small */
+ *nm = rr->ttl_data+sizeof(uint32_t)+sizeof(uint16_t)+offset;
+ oldpos = ldns_buffer_position(pkt);
+ ldns_buffer_set_position(pkt, (size_t)(*nm - ldns_buffer_begin(pkt)));
+ *nmlen = pkt_dname_len(pkt);
+ ldns_buffer_set_position(pkt, oldpos);
+ if(*nmlen == 0)
+ return 0;
+ return 1;
+}
+
+/** Place mark on rrsets in additional section they are OK */
+static void
+mark_additional_rrset(ldns_buffer* pkt, struct msg_parse* msg,
+ struct rrset_parse* rrset)
+{
+ /* Mark A and AAAA for NS as appropriate additional section info. */
+ uint8_t* nm = NULL;
+ size_t nmlen = 0;
+ struct rr_parse* rr;
+
+ if(!has_additional(rrset->type))
+ return;
+ for(rr = rrset->rr_first; rr; rr = rr->next) {
+ if(get_additional_name(rrset, rr, &nm, &nmlen, pkt)) {
+ /* mark A */
+ hashvalue_t h = pkt_hash_rrset(pkt, nm, LDNS_RR_TYPE_A,
+ rrset->rrset_class, 0);
+ struct rrset_parse* r = msgparse_hashtable_lookup(
+ msg, pkt, h, 0, nm, nmlen,
+ LDNS_RR_TYPE_A, rrset->rrset_class);
+ if(r && r->section == LDNS_SECTION_ADDITIONAL) {
+ r->flags |= RRSET_SCRUB_OK;
+ }
+
+ /* mark AAAA */
+ h = pkt_hash_rrset(pkt, nm, LDNS_RR_TYPE_AAAA,
+ rrset->rrset_class, 0);
+ r = msgparse_hashtable_lookup(msg, pkt, h, 0, nm,
+ nmlen, LDNS_RR_TYPE_AAAA, rrset->rrset_class);
+ if(r && r->section == LDNS_SECTION_ADDITIONAL) {
+ r->flags |= RRSET_SCRUB_OK;
+ }
+ }
+ }
+}
+
+/** Get target name of a CNAME */
+static int
+parse_get_cname_target(struct rrset_parse* rrset, uint8_t** sname,
+ size_t* snamelen)
+{
+ if(rrset->rr_count != 1) {
+ struct rr_parse* sig;
+ verbose(VERB_ALGO, "Found CNAME rrset with "
+ "size > 1: %u", (unsigned)rrset->rr_count);
+ /* use the first CNAME! */
+ rrset->rr_count = 1;
+ rrset->size = rrset->rr_first->size;
+ for(sig=rrset->rrsig_first; sig; sig=sig->next)
+ rrset->size += sig->size;
+ rrset->rr_last = rrset->rr_first;
+ rrset->rr_first->next = NULL;
+ }
+ if(rrset->rr_first->size < sizeof(uint16_t)+1)
+ return 0; /* CNAME rdata too small */
+ *sname = rrset->rr_first->ttl_data + sizeof(uint32_t)
+ + sizeof(uint16_t); /* skip ttl, rdatalen */
+ *snamelen = rrset->rr_first->size - sizeof(uint16_t);
+ return 1;
+}
+
+/** Synthesize CNAME from DNAME, false if too long */
+static int
+synth_cname(uint8_t* qname, size_t qnamelen, struct rrset_parse* dname_rrset,
+ uint8_t* alias, size_t* aliaslen, ldns_buffer* pkt)
+{
+ /* we already know that sname is a strict subdomain of DNAME owner */
+ uint8_t* dtarg = NULL;
+ size_t dtarglen;
+ if(!parse_get_cname_target(dname_rrset, &dtarg, &dtarglen))
+ return 0;
+ log_assert(qnamelen > dname_rrset->dname_len);
+ /* DNAME from com. to net. with qname example.com. -> example.net. */
+ /* so: \3com\0 to \3net\0 and qname \7example\3com\0 */
+ *aliaslen = qnamelen + dtarglen - dname_rrset->dname_len;
+ if(*aliaslen > LDNS_MAX_DOMAINLEN)
+ return 0; /* should have been RCODE YXDOMAIN */
+ /* decompress dnames into buffer, we know it fits */
+ dname_pkt_copy(pkt, alias, qname);
+ dname_pkt_copy(pkt, alias+(qnamelen-dname_rrset->dname_len), dtarg);
+ return 1;
+}
+
+/** synthesize a CNAME rrset */
+static struct rrset_parse*
+synth_cname_rrset(uint8_t** sname, size_t* snamelen, uint8_t* alias,
+ size_t aliaslen, struct regional* region, struct msg_parse* msg,
+ struct rrset_parse* rrset, struct rrset_parse* prev,
+ struct rrset_parse* nx, ldns_buffer* pkt)
+{
+ struct rrset_parse* cn = (struct rrset_parse*)regional_alloc(region,
+ sizeof(struct rrset_parse));
+ if(!cn)
+ return NULL;
+ memset(cn, 0, sizeof(*cn));
+ cn->rr_first = (struct rr_parse*)regional_alloc(region,
+ sizeof(struct rr_parse));
+ if(!cn->rr_first)
+ return NULL;
+ cn->rr_last = cn->rr_first;
+ /* CNAME from sname to alias */
+ cn->dname = (uint8_t*)regional_alloc(region, *snamelen);
+ if(!cn->dname)
+ return NULL;
+ dname_pkt_copy(pkt, cn->dname, *sname);
+ cn->dname_len = *snamelen;
+ cn->type = LDNS_RR_TYPE_CNAME;
+ cn->section = rrset->section;
+ cn->rrset_class = rrset->rrset_class;
+ cn->rr_count = 1;
+ cn->size = sizeof(uint16_t) + aliaslen;
+ cn->hash=pkt_hash_rrset(pkt, cn->dname, cn->type, cn->rrset_class, 0);
+ /* allocate TTL + rdatalen + uncompressed dname */
+ memset(cn->rr_first, 0, sizeof(struct rr_parse));
+ cn->rr_first->outside_packet = 1;
+ cn->rr_first->ttl_data = (uint8_t*)regional_alloc(region,
+ sizeof(uint32_t)+sizeof(uint16_t)+aliaslen);
+ if(!cn->rr_first->ttl_data)
+ return NULL;
+ ldns_write_uint32(cn->rr_first->ttl_data, 0); /* TTL = 0 */
+ ldns_write_uint16(cn->rr_first->ttl_data+4, aliaslen);
+ memmove(cn->rr_first->ttl_data+6, alias, aliaslen);
+ cn->rr_first->size = sizeof(uint16_t)+aliaslen;
+
+ /* link it in */
+ cn->rrset_all_next = nx;
+ if(prev)
+ prev->rrset_all_next = cn;
+ else msg->rrset_first = cn;
+ if(nx == NULL)
+ msg->rrset_last = cn;
+ msg->rrset_count ++;
+ msg->an_rrsets++;
+ /* it is not inserted in the msg hashtable. */
+
+ *sname = cn->rr_first->ttl_data + sizeof(uint32_t)+sizeof(uint16_t);
+ *snamelen = aliaslen;
+ return cn;
+}
+
+/** check if DNAME applies to a name */
+static int
+pkt_strict_sub(ldns_buffer* pkt, uint8_t* sname, uint8_t* dr)
+{
+ uint8_t buf1[LDNS_MAX_DOMAINLEN+1];
+ uint8_t buf2[LDNS_MAX_DOMAINLEN+1];
+ /* decompress names */
+ dname_pkt_copy(pkt, buf1, sname);
+ dname_pkt_copy(pkt, buf2, dr);
+ return dname_strict_subdomain_c(buf1, buf2);
+}
+
+/** check subdomain with decompression */
+static int
+pkt_sub(ldns_buffer* pkt, uint8_t* comprname, uint8_t* zone)
+{
+ uint8_t buf[LDNS_MAX_DOMAINLEN+1];
+ dname_pkt_copy(pkt, buf, comprname);
+ return dname_subdomain_c(buf, zone);
+}
+
+/** check subdomain with decompression, compressed is parent */
+static int
+sub_of_pkt(ldns_buffer* pkt, uint8_t* zone, uint8_t* comprname)
+{
+ uint8_t buf[LDNS_MAX_DOMAINLEN+1];
+ dname_pkt_copy(pkt, buf, comprname);
+ return dname_subdomain_c(zone, buf);
+}
+
+/**
+ * This routine normalizes a response. This includes removing "irrelevant"
+ * records from the answer and additional sections and (re)synthesizing
+ * CNAMEs from DNAMEs, if present.
+ *
+ * @param pkt: packet.
+ * @param msg: msg to normalize.
+ * @param qinfo: original query.
+ * @param region: where to allocate synthesized CNAMEs.
+ * @return 0 on error.
+ */
+static int
+scrub_normalize(ldns_buffer* pkt, struct msg_parse* msg,
+ struct query_info* qinfo, struct regional* region)
+{
+ uint8_t* sname = qinfo->qname;
+ size_t snamelen = qinfo->qname_len;
+ struct rrset_parse* rrset, *prev, *nsset=NULL;
+
+ if(FLAGS_GET_RCODE(msg->flags) != LDNS_RCODE_NOERROR &&
+ FLAGS_GET_RCODE(msg->flags) != LDNS_RCODE_NXDOMAIN)
+ return 1;
+
+ /* For the ANSWER section, remove all "irrelevant" records and add
+ * synthesized CNAMEs from DNAMEs
+ * This will strip out-of-order CNAMEs as well. */
+
+ /* walk through the parse packet rrset list, keep track of previous
+ * for insert and delete ease, and examine every RRset */
+ prev = NULL;
+ rrset = msg->rrset_first;
+ while(rrset && rrset->section == LDNS_SECTION_ANSWER) {
+ if(rrset->type == LDNS_RR_TYPE_DNAME &&
+ pkt_strict_sub(pkt, sname, rrset->dname)) {
+ /* check if next rrset is correct CNAME. else,
+ * synthesize a CNAME */
+ struct rrset_parse* nx = rrset->rrset_all_next;
+ uint8_t alias[LDNS_MAX_DOMAINLEN+1];
+ size_t aliaslen = 0;
+ if(rrset->rr_count != 1) {
+ verbose(VERB_ALGO, "Found DNAME rrset with "
+ "size > 1: %u",
+ (unsigned)rrset->rr_count);
+ return 0;
+ }
+ if(!synth_cname(sname, snamelen, rrset, alias,
+ &aliaslen, pkt)) {
+ verbose(VERB_ALGO, "synthesized CNAME "
+ "too long");
+ return 0;
+ }
+ if(nx && nx->type == LDNS_RR_TYPE_CNAME &&
+ dname_pkt_compare(pkt, sname, nx->dname) == 0) {
+ /* check next cname */
+ uint8_t* t = NULL;
+ size_t tlen = 0;
+ if(!parse_get_cname_target(rrset, &t, &tlen))
+ return 0;
+ if(dname_pkt_compare(pkt, alias, t) == 0) {
+ /* it's OK and better capitalized */
+ prev = rrset;
+ rrset = nx;
+ continue;
+ }
+ /* synth ourselves */
+ }
+ /* synth a CNAME rrset */
+ prev = synth_cname_rrset(&sname, &snamelen, alias,
+ aliaslen, region, msg, rrset, rrset, nx, pkt);
+ if(!prev) {
+ log_err("out of memory synthesizing CNAME");
+ return 0;
+ }
+ /* FIXME: resolve the conflict between synthesized
+ * CNAME ttls and the cache. */
+ rrset = nx;
+ continue;
+
+ }
+
+ /* The only records in the ANSWER section not allowed to */
+ if(dname_pkt_compare(pkt, sname, rrset->dname) != 0) {
+ remove_rrset("normalize: removing irrelevant RRset:",
+ pkt, msg, prev, &rrset);
+ continue;
+ }
+
+ /* Follow the CNAME chain. */
+ if(rrset->type == LDNS_RR_TYPE_CNAME) {
+ uint8_t* oldsname = sname;
+ if(!parse_get_cname_target(rrset, &sname, &snamelen))
+ return 0;
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ /* in CNAME ANY response, can have data after CNAME */
+ if(qinfo->qtype == LDNS_RR_TYPE_ANY) {
+ while(rrset && rrset->section ==
+ LDNS_SECTION_ANSWER &&
+ dname_pkt_compare(pkt, oldsname,
+ rrset->dname) == 0) {
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+ }
+ continue;
+ }
+
+ /* Otherwise, make sure that the RRset matches the qtype. */
+ if(qinfo->qtype != LDNS_RR_TYPE_ANY &&
+ qinfo->qtype != rrset->type) {
+ remove_rrset("normalize: removing irrelevant RRset:",
+ pkt, msg, prev, &rrset);
+ continue;
+ }
+
+ /* Mark the additional names from relevant rrset as OK. */
+ /* only for RRsets that match the query name, other ones
+ * will be removed by sanitize, so no additional for them */
+ if(dname_pkt_compare(pkt, qinfo->qname, rrset->dname) == 0)
+ mark_additional_rrset(pkt, msg, rrset);
+
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+
+ /* Mark additional names from AUTHORITY */
+ while(rrset && rrset->section == LDNS_SECTION_AUTHORITY) {
+ if(rrset->type==LDNS_RR_TYPE_DNAME ||
+ rrset->type==LDNS_RR_TYPE_CNAME ||
+ rrset->type==LDNS_RR_TYPE_A ||
+ rrset->type==LDNS_RR_TYPE_AAAA) {
+ remove_rrset("normalize: removing irrelevant "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ /* only one NS set allowed in authority section */
+ if(rrset->type==LDNS_RR_TYPE_NS) {
+ /* NS set must be pertinent to the query */
+ if(!sub_of_pkt(pkt, qinfo->qname, rrset->dname)) {
+ remove_rrset("normalize: removing irrelevant "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ if(nsset == NULL) {
+ nsset = rrset;
+ } else {
+ remove_rrset("normalize: removing irrelevant "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ }
+ mark_additional_rrset(pkt, msg, rrset);
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+
+ /* For each record in the additional section, remove it if it is an
+ * address record and not in the collection of additional names
+ * found in ANSWER and AUTHORITY. */
+ /* These records have not been marked OK previously */
+ while(rrset && rrset->section == LDNS_SECTION_ADDITIONAL) {
+ /* FIXME: what about other types? */
+ if(rrset->type==LDNS_RR_TYPE_A ||
+ rrset->type==LDNS_RR_TYPE_AAAA)
+ {
+ if((rrset->flags & RRSET_SCRUB_OK)) {
+ /* remove flag to clean up flags variable */
+ rrset->flags &= ~RRSET_SCRUB_OK;
+ } else {
+ remove_rrset("normalize: removing irrelevant "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ }
+ if(rrset->type==LDNS_RR_TYPE_DNAME ||
+ rrset->type==LDNS_RR_TYPE_CNAME ||
+ rrset->type==LDNS_RR_TYPE_NS) {
+ remove_rrset("normalize: removing irrelevant "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+
+ return 1;
+}
+
+/**
+ * Store potential poison in the cache (only if hardening disabled).
+ * The rrset is stored in the cache but removed from the message.
+ * So that it will be used for infrastructure purposes, but not be
+ * returned to the client.
+ * @param pkt: packet
+ * @param msg: message parsed
+ * @param env: environment with cache
+ * @param rrset: to store.
+ */
+static void
+store_rrset(ldns_buffer* pkt, struct msg_parse* msg, struct module_env* env,
+ struct rrset_parse* rrset)
+{
+ struct ub_packed_rrset_key* k;
+ struct packed_rrset_data* d;
+ struct rrset_ref ref;
+ uint32_t now = *env->now;
+
+ k = alloc_special_obtain(env->alloc);
+ if(!k)
+ return;
+ k->entry.data = NULL;
+ if(!parse_copy_decompress_rrset(pkt, msg, rrset, NULL, k)) {
+ alloc_special_release(env->alloc, k);
+ return;
+ }
+ d = (struct packed_rrset_data*)k->entry.data;
+ packed_rrset_ttl_add(d, now);
+ ref.key = k;
+ ref.id = k->id;
+ /*ignore ret: it was in the cache, ref updated */
+ (void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, now);
+}
+
+/** Check if there are SOA records in the authority section (negative) */
+static int
+soa_in_auth(struct msg_parse* msg)
+{
+ struct rrset_parse* rrset;
+ for(rrset = msg->rrset_first; rrset; rrset = rrset->rrset_all_next)
+ if(rrset->type == LDNS_RR_TYPE_SOA &&
+ rrset->section == LDNS_SECTION_AUTHORITY)
+ return 1;
+ return 0;
+}
+
+/**
+ * Check if right hand name in NSEC is within zone
+ * @param rrset: the NSEC rrset
+ * @param zonename: the zone name.
+ * @return true if BAD.
+ */
+static int sanitize_nsec_is_overreach(struct rrset_parse* rrset,
+ uint8_t* zonename)
+{
+ struct rr_parse* rr;
+ uint8_t* rhs;
+ size_t len;
+ log_assert(rrset->type == LDNS_RR_TYPE_NSEC);
+ for(rr = rrset->rr_first; rr; rr = rr->next) {
+ rhs = rr->ttl_data+4+2;
+ len = ldns_read_uint16(rr->ttl_data+4);
+ if(!dname_valid(rhs, len)) {
+ /* malformed domain name in rdata */
+ return 1;
+ }
+ if(!dname_subdomain_c(rhs, zonename)) {
+ /* overreaching */
+ return 1;
+ }
+ }
+ /* all NSEC RRs OK */
+ return 0;
+}
+
+/**
+ * Given a response event, remove suspect RRsets from the response.
+ * "Suspect" rrsets are potentially poison. Note that this routine expects
+ * the response to be in a "normalized" state -- that is, all "irrelevant"
+ * RRsets have already been removed, CNAMEs are in order, etc.
+ *
+ * @param pkt: packet.
+ * @param msg: msg to normalize.
+ * @param qinfo: the question originally asked.
+ * @param zonename: name of server zone.
+ * @param env: module environment with config and cache.
+ * @param ie: iterator environment with private address data.
+ * @return 0 on error.
+ */
+static int
+scrub_sanitize(ldns_buffer* pkt, struct msg_parse* msg,
+ struct query_info* qinfo, uint8_t* zonename, struct module_env* env,
+ struct iter_env* ie)
+{
+ int del_addi = 0; /* if additional-holding rrsets are deleted, we
+ do not trust the normalized additional-A-AAAA any more */
+ struct rrset_parse* rrset, *prev;
+ prev = NULL;
+ rrset = msg->rrset_first;
+
+ /* the first DNAME is allowed to stay. It needs checking before
+ * it can be used from the cache. After normalization, an initial
+ * DNAME will have a correctly synthesized CNAME after it. */
+ if(rrset && rrset->type == LDNS_RR_TYPE_DNAME &&
+ rrset->section == LDNS_SECTION_ANSWER &&
+ pkt_strict_sub(pkt, qinfo->qname, rrset->dname) &&
+ pkt_sub(pkt, rrset->dname, zonename)) {
+ prev = rrset; /* DNAME allowed to stay in answer section */
+ rrset = rrset->rrset_all_next;
+ }
+
+ /* remove all records from the answer section that are
+ * not the same domain name as the query domain name.
+ * The answer section should contain rrsets with the same name
+ * as the question. For DNAMEs a CNAME has been synthesized.
+ * Wildcards have the query name in answer section.
+ * ANY queries get query name in answer section.
+ * Remainders of CNAME chains are cut off and resolved by iterator. */
+ while(rrset && rrset->section == LDNS_SECTION_ANSWER) {
+ if(dname_pkt_compare(pkt, qinfo->qname, rrset->dname) != 0) {
+ if(has_additional(rrset->type)) del_addi = 1;
+ remove_rrset("sanitize: removing extraneous answer "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+
+ /* At this point, we brutally remove ALL rrsets that aren't
+ * children of the originating zone. The idea here is that,
+ * as far as we know, the server that we contacted is ONLY
+ * authoritative for the originating zone. It, of course, MAY
+ * be authoriative for any other zones, and of course, MAY
+ * NOT be authoritative for some subdomains of the originating
+ * zone. */
+ prev = NULL;
+ rrset = msg->rrset_first;
+ while(rrset) {
+
+ /* remove private addresses */
+ if( (rrset->type == LDNS_RR_TYPE_A ||
+ rrset->type == LDNS_RR_TYPE_AAAA) &&
+ priv_rrset_bad(ie->priv, pkt, rrset)) {
+
+ /* do not set servfail since this leads to too
+ * many drops of other people using rfc1918 space */
+ remove_rrset("sanitize: removing public name with "
+ "private address", pkt, msg, prev, &rrset);
+ continue;
+ }
+
+ /* skip DNAME records -- they will always be followed by a
+ * synthesized CNAME, which will be relevant.
+ * FIXME: should this do something differently with DNAME
+ * rrsets NOT in Section.ANSWER? */
+ /* But since DNAME records are also subdomains of the zone,
+ * same check can be used */
+
+ if(!pkt_sub(pkt, rrset->dname, zonename)) {
+ if(msg->an_rrsets == 0 &&
+ rrset->type == LDNS_RR_TYPE_NS &&
+ rrset->section == LDNS_SECTION_AUTHORITY &&
+ FLAGS_GET_RCODE(msg->flags) ==
+ LDNS_RCODE_NOERROR && !soa_in_auth(msg) &&
+ sub_of_pkt(pkt, zonename, rrset->dname)) {
+ /* noerror, nodata and this NS rrset is above
+ * the zone. This is LAME!
+ * Leave in the NS for lame classification. */
+ /* remove everything from the additional
+ * (we dont want its glue that was approved
+ * during the normalize action) */
+ del_addi = 1;
+ } else if(!env->cfg->harden_glue) {
+ /* store in cache! Since it is relevant
+ * (from normalize) it will be picked up
+ * from the cache to be used later */
+ store_rrset(pkt, msg, env, rrset);
+ remove_rrset("sanitize: storing potential "
+ "poison RRset:", pkt, msg, prev, &rrset);
+ continue;
+ } else {
+ if(has_additional(rrset->type)) del_addi = 1;
+ remove_rrset("sanitize: removing potential "
+ "poison RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ }
+ if(del_addi && rrset->section == LDNS_SECTION_ADDITIONAL) {
+ remove_rrset("sanitize: removing potential "
+ "poison reference RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ /* check if right hand side of NSEC is within zone */
+ if(rrset->type == LDNS_RR_TYPE_NSEC &&
+ sanitize_nsec_is_overreach(rrset, zonename)) {
+ remove_rrset("sanitize: removing overreaching NSEC "
+ "RRset:", pkt, msg, prev, &rrset);
+ continue;
+ }
+ prev = rrset;
+ rrset = rrset->rrset_all_next;
+ }
+ return 1;
+}
+
+int
+scrub_message(ldns_buffer* pkt, struct msg_parse* msg,
+ struct query_info* qinfo, uint8_t* zonename, struct regional* region,
+ struct module_env* env, struct iter_env* ie)
+{
+ /* basic sanity checks */
+ log_nametypeclass(VERB_ALGO, "scrub for", zonename, LDNS_RR_TYPE_NS,
+ qinfo->qclass);
+ if(msg->qdcount > 1)
+ return 0;
+ if( !(msg->flags&BIT_QR) )
+ return 0;
+ msg->flags &= ~(BIT_AD|BIT_Z); /* force off bit AD and Z */
+
+ /* make sure that a query is echoed back when NOERROR or NXDOMAIN */
+ /* this is not required for basic operation but is a forgery
+ * resistance (security) feature */
+ if((FLAGS_GET_RCODE(msg->flags) == LDNS_RCODE_NOERROR ||
+ FLAGS_GET_RCODE(msg->flags) == LDNS_RCODE_NXDOMAIN) &&
+ msg->qdcount == 0)
+ return 0;
+
+ /* if a query is echoed back, make sure it is correct. Otherwise,
+ * this may be not a reply to our query. */
+ if(msg->qdcount == 1) {
+ if(dname_pkt_compare(pkt, msg->qname, qinfo->qname) != 0)
+ return 0;
+ if(msg->qtype != qinfo->qtype || msg->qclass != qinfo->qclass)
+ return 0;
+ }
+
+ /* normalize the response, this cleans up the additional. */
+ if(!scrub_normalize(pkt, msg, qinfo, region))
+ return 0;
+ /* delete all out-of-zone information */
+ if(!scrub_sanitize(pkt, msg, qinfo, zonename, env, ie))
+ return 0;
+ return 1;
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_scrub.h b/3rdParty/Unbound/src/src/iterator/iter_scrub.h
new file mode 100644
index 0000000..6b7274e
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_scrub.h
@@ -0,0 +1,69 @@
+/*
+ * iterator/iter_scrub.h - scrubbing, normalization, sanitization of DNS msgs.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file has routine(s) for cleaning up incoming DNS messages from
+ * possible useless or malicious junk in it.
+ */
+
+#ifndef ITERATOR_ITER_SCRUB_H
+#define ITERATOR_ITER_SCRUB_H
+#include <ldns/buffer.h>
+struct msg_parse;
+struct query_info;
+struct regional;
+struct module_env;
+struct iter_env;
+
+/**
+ * Cleanup the passed dns message.
+ * @param pkt: the packet itself, for resolving name compression pointers.
+ * the packet buffer is unaltered.
+ * @param msg: the parsed packet, this structure is cleaned up.
+ * @param qinfo: the query info that was sent to the server. Checked.
+ * @param zonename: the name of the last delegation point.
+ * Used to determine out of bailiwick information.
+ * @param regional: where to allocate (new) parts of the message.
+ * @param env: module environment with config settings and cache.
+ * @param ie: iterator module environment data.
+ * @return: false if the message is total waste. true if scrubbed with success.
+ */
+int scrub_message(ldns_buffer* pkt, struct msg_parse* msg,
+ struct query_info* qinfo, uint8_t* zonename, struct regional* regional,
+ struct module_env* env, struct iter_env* ie);
+
+#endif /* ITERATOR_ITER_SCRUB_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iter_utils.c b/3rdParty/Unbound/src/src/iterator/iter_utils.c
new file mode 100644
index 0000000..25fda5e
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_utils.c
@@ -0,0 +1,976 @@
+/*
+ * iterator/iter_utils.c - iterative resolver module utility functions.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Configuration options. Forward zones.
+ */
+#include "config.h"
+#include "iterator/iter_utils.h"
+#include "iterator/iterator.h"
+#include "iterator/iter_hints.h"
+#include "iterator/iter_fwd.h"
+#include "iterator/iter_donotq.h"
+#include "iterator/iter_delegpt.h"
+#include "iterator/iter_priv.h"
+#include "services/cache/infra.h"
+#include "services/cache/dns.h"
+#include "services/cache/rrset.h"
+#include "util/net_help.h"
+#include "util/module.h"
+#include "util/log.h"
+#include "util/config_file.h"
+#include "util/regional.h"
+#include "util/data/msgparse.h"
+#include "util/data/dname.h"
+#include "util/random.h"
+#include "util/fptr_wlist.h"
+#include "validator/val_anchor.h"
+#include "validator/val_kcache.h"
+#include "validator/val_kentry.h"
+
+/** time when nameserver glue is said to be 'recent' */
+#define SUSPICION_RECENT_EXPIRY 86400
+/** penalty to validation failed blacklisted IPs */
+#define BLACKLIST_PENALTY (USEFUL_SERVER_TOP_TIMEOUT*4)
+
+/** fillup fetch policy array */
+static void
+fetch_fill(struct iter_env* ie, const char* str)
+{
+ char* s = (char*)str, *e;
+ int i;
+ for(i=0; i<ie->max_dependency_depth+1; i++) {
+ ie->target_fetch_policy[i] = strtol(s, &e, 10);
+ if(s == e)
+ fatal_exit("cannot parse fetch policy number %s", s);
+ s = e;
+ }
+}
+
+/** Read config string that represents the target fetch policy */
+static int
+read_fetch_policy(struct iter_env* ie, const char* str)
+{
+ int count = cfg_count_numbers(str);
+ if(count < 1) {
+ log_err("Cannot parse target fetch policy: \"%s\"", str);
+ return 0;
+ }
+ ie->max_dependency_depth = count - 1;
+ ie->target_fetch_policy = (int*)calloc(
+ (size_t)ie->max_dependency_depth+1, sizeof(int));
+ if(!ie->target_fetch_policy) {
+ log_err("alloc fetch policy: out of memory");
+ return 0;
+ }
+ fetch_fill(ie, str);
+ return 1;
+}
+
+int
+iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg)
+{
+ int i;
+ /* target fetch policy */
+ if(!read_fetch_policy(iter_env, cfg->target_fetch_policy))
+ return 0;
+ for(i=0; i<iter_env->max_dependency_depth+1; i++)
+ verbose(VERB_QUERY, "target fetch policy for level %d is %d",
+ i, iter_env->target_fetch_policy[i]);
+
+ if(!iter_env->hints)
+ iter_env->hints = hints_create();
+ if(!iter_env->hints || !hints_apply_cfg(iter_env->hints, cfg)) {
+ log_err("Could not set root or stub hints");
+ return 0;
+ }
+ if(!iter_env->donotq)
+ iter_env->donotq = donotq_create();
+ if(!iter_env->donotq || !donotq_apply_cfg(iter_env->donotq, cfg)) {
+ log_err("Could not set donotqueryaddresses");
+ return 0;
+ }
+ if(!iter_env->priv)
+ iter_env->priv = priv_create();
+ if(!iter_env->priv || !priv_apply_cfg(iter_env->priv, cfg)) {
+ log_err("Could not set private addresses");
+ return 0;
+ }
+ iter_env->supports_ipv6 = cfg->do_ip6;
+ iter_env->supports_ipv4 = cfg->do_ip4;
+ return 1;
+}
+
+/** filter out unsuitable targets
+ * @param iter_env: iterator environment with ipv6-support flag.
+ * @param env: module environment with infra cache.
+ * @param name: zone name
+ * @param namelen: length of name
+ * @param qtype: query type (host order).
+ * @param now: current time
+ * @param a: address in delegation point we are examining.
+ * @return an integer that signals the target suitability.
+ * as follows:
+ * -1: The address should be omitted from the list.
+ * Because:
+ * o The address is bogus (DNSSEC validation failure).
+ * o Listed as donotquery
+ * o is ipv6 but no ipv6 support (in operating system).
+ * o is ipv4 but no ipv4 support (in operating system).
+ * o is lame
+ * Otherwise, an rtt in milliseconds.
+ * 0 .. USEFUL_SERVER_TOP_TIMEOUT-1
+ * The roundtrip time timeout estimate. less than 2 minutes.
+ * Note that util/rtt.c has a MIN_TIMEOUT of 50 msec, thus
+ * values 0 .. 49 are not used, unless that is changed.
+ * USEFUL_SERVER_TOP_TIMEOUT
+ * This value exactly is given for unresponsive blacklisted.
+ * USEFUL_SERVER_TOP_TIMEOUT+1
+ * For non-blacklisted servers: huge timeout, but has traffic.
+ * USEFUL_SERVER_TOP_TIMEOUT*1 ..
+ * parent-side lame servers get this penalty. A dispreferential
+ * server. (lame in delegpt).
+ * USEFUL_SERVER_TOP_TIMEOUT*2 ..
+ * dnsseclame servers get penalty
+ * USEFUL_SERVER_TOP_TIMEOUT*3 ..
+ * recursion lame servers get penalty
+ * UNKNOWN_SERVER_NICENESS
+ * If no information is known about the server, this is
+ * returned. 376 msec or so.
+ * +BLACKLIST_PENALTY (of USEFUL_TOP_TIMEOUT*4) for dnssec failed IPs.
+ *
+ * When a final value is chosen that is dnsseclame ; dnsseclameness checking
+ * is turned off (so we do not discard the reply).
+ * When a final value is chosen that is recursionlame; RD bit is set on query.
+ * Because of the numbers this means recursionlame also have dnssec lameness
+ * checking turned off.
+ */
+static int
+iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env,
+ uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ struct delegpt_addr* a)
+{
+ int rtt, lame, reclame, dnsseclame;
+ if(a->bogus)
+ return -1; /* address of server is bogus */
+ if(donotq_lookup(iter_env->donotq, &a->addr, a->addrlen)) {
+ log_addr(VERB_ALGO, "skip addr on the donotquery list",
+ &a->addr, a->addrlen);
+ return -1; /* server is on the donotquery list */
+ }
+ if(!iter_env->supports_ipv6 && addr_is_ip6(&a->addr, a->addrlen)) {
+ return -1; /* there is no ip6 available */
+ }
+ if(!iter_env->supports_ipv4 && !addr_is_ip6(&a->addr, a->addrlen)) {
+ return -1; /* there is no ip4 available */
+ }
+ /* check lameness - need zone , class info */
+ if(infra_get_lame_rtt(env->infra_cache, &a->addr, a->addrlen,
+ name, namelen, qtype, &lame, &dnsseclame, &reclame,
+ &rtt, now)) {
+ log_addr(VERB_ALGO, "servselect", &a->addr, a->addrlen);
+ verbose(VERB_ALGO, " rtt=%d%s%s%s%s", rtt,
+ lame?" LAME":"",
+ dnsseclame?" DNSSEC_LAME":"",
+ reclame?" REC_LAME":"",
+ a->lame?" ADDR_LAME":"");
+ if(lame)
+ return -1; /* server is lame */
+ else if(rtt >= USEFUL_SERVER_TOP_TIMEOUT)
+ /* server is unresponsive,
+ * we used to return TOP_TIMOUT, but fairly useless,
+ * because if == TOP_TIMEOUT is dropped because
+ * blacklisted later, instead, remove it here, so
+ * other choices (that are not blacklisted) can be
+ * tried */
+ return -1;
+ /* select remainder from worst to best */
+ else if(reclame)
+ return rtt+USEFUL_SERVER_TOP_TIMEOUT*3; /* nonpref */
+ else if(dnsseclame )
+ return rtt+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
+ else if(a->lame)
+ return rtt+USEFUL_SERVER_TOP_TIMEOUT+1; /* nonpref */
+ else return rtt;
+ }
+ /* no server information present */
+ if(a->lame)
+ return USEFUL_SERVER_TOP_TIMEOUT+1+UNKNOWN_SERVER_NICENESS; /* nonpref */
+ return UNKNOWN_SERVER_NICENESS;
+}
+
+/** lookup RTT information, and also store fastest rtt (if any) */
+static int
+iter_fill_rtt(struct iter_env* iter_env, struct module_env* env,
+ uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ struct delegpt* dp, int* best_rtt, struct sock_list* blacklist)
+{
+ int got_it = 0;
+ struct delegpt_addr* a;
+ if(dp->bogus)
+ return 0; /* NS bogus, all bogus, nothing found */
+ for(a=dp->result_list; a; a = a->next_result) {
+ a->sel_rtt = iter_filter_unsuitable(iter_env, env,
+ name, namelen, qtype, now, a);
+ if(a->sel_rtt != -1) {
+ if(sock_list_find(blacklist, &a->addr, a->addrlen))
+ a->sel_rtt += BLACKLIST_PENALTY;
+
+ if(!got_it) {
+ *best_rtt = a->sel_rtt;
+ got_it = 1;
+ } else if(a->sel_rtt < *best_rtt) {
+ *best_rtt = a->sel_rtt;
+ }
+ }
+ }
+ return got_it;
+}
+
+/** filter the addres list, putting best targets at front,
+ * returns number of best targets (or 0, no suitable targets) */
+static int
+iter_filter_order(struct iter_env* iter_env, struct module_env* env,
+ uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ struct delegpt* dp, int* selected_rtt, int open_target,
+ struct sock_list* blacklist)
+{
+ int got_num = 0, low_rtt = 0, swap_to_front;
+ struct delegpt_addr* a, *n, *prev=NULL;
+
+ /* fillup sel_rtt and find best rtt in the bunch */
+ got_num = iter_fill_rtt(iter_env, env, name, namelen, qtype, now, dp,
+ &low_rtt, blacklist);
+ if(got_num == 0)
+ return 0;
+ if(low_rtt >= USEFUL_SERVER_TOP_TIMEOUT &&
+ (delegpt_count_missing_targets(dp) > 0 || open_target > 0)) {
+ verbose(VERB_ALGO, "Bad choices, trying to get more choice");
+ return 0; /* we want more choice. The best choice is a bad one.
+ return 0 to force the caller to fetch more */
+ }
+
+ got_num = 0;
+ a = dp->result_list;
+ while(a) {
+ /* skip unsuitable targets */
+ if(a->sel_rtt == -1) {
+ prev = a;
+ a = a->next_result;
+ continue;
+ }
+ /* classify the server address and determine what to do */
+ swap_to_front = 0;
+ if(a->sel_rtt >= low_rtt && a->sel_rtt - low_rtt <= RTT_BAND) {
+ got_num++;
+ swap_to_front = 1;
+ } else if(a->sel_rtt<low_rtt && low_rtt-a->sel_rtt<=RTT_BAND) {
+ got_num++;
+ swap_to_front = 1;
+ }
+ /* swap to front if necessary, or move to next result */
+ if(swap_to_front && prev) {
+ n = a->next_result;
+ prev->next_result = n;
+ a->next_result = dp->result_list;
+ dp->result_list = a;
+ a = n;
+ } else {
+ prev = a;
+ a = a->next_result;
+ }
+ }
+ *selected_rtt = low_rtt;
+ return got_num;
+}
+
+struct delegpt_addr*
+iter_server_selection(struct iter_env* iter_env,
+ struct module_env* env, struct delegpt* dp,
+ uint8_t* name, size_t namelen, uint16_t qtype, int* dnssec_lame,
+ int* chase_to_rd, int open_target, struct sock_list* blacklist)
+{
+ int sel;
+ int selrtt;
+ struct delegpt_addr* a, *prev;
+ int num = iter_filter_order(iter_env, env, name, namelen, qtype,
+ *env->now, dp, &selrtt, open_target, blacklist);
+
+ if(num == 0)
+ return NULL;
+ verbose(VERB_ALGO, "selrtt %d", selrtt);
+ if(selrtt > BLACKLIST_PENALTY) {
+ if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*3) {
+ verbose(VERB_ALGO, "chase to "
+ "blacklisted recursion lame server");
+ *chase_to_rd = 1;
+ }
+ if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*2) {
+ verbose(VERB_ALGO, "chase to "
+ "blacklisted dnssec lame server");
+ *dnssec_lame = 1;
+ }
+ } else {
+ if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*3) {
+ verbose(VERB_ALGO, "chase to recursion lame server");
+ *chase_to_rd = 1;
+ }
+ if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*2) {
+ verbose(VERB_ALGO, "chase to dnssec lame server");
+ *dnssec_lame = 1;
+ }
+ if(selrtt == USEFUL_SERVER_TOP_TIMEOUT) {
+ verbose(VERB_ALGO, "chase to blacklisted lame server");
+ return NULL;
+ }
+ }
+
+ if(num == 1) {
+ a = dp->result_list;
+ if(++a->attempts < OUTBOUND_MSG_RETRY)
+ return a;
+ dp->result_list = a->next_result;
+ return a;
+ }
+
+ /* randomly select a target from the list */
+ log_assert(num > 1);
+ /* grab secure random number, to pick unexpected server.
+ * also we need it to be threadsafe. */
+ sel = ub_random_max(env->rnd, num);
+ a = dp->result_list;
+ prev = NULL;
+ while(sel > 0 && a) {
+ prev = a;
+ a = a->next_result;
+ sel--;
+ }
+ if(!a) /* robustness */
+ return NULL;
+ if(++a->attempts < OUTBOUND_MSG_RETRY)
+ return a;
+ /* remove it from the delegation point result list */
+ if(prev)
+ prev->next_result = a->next_result;
+ else dp->result_list = a->next_result;
+ return a;
+}
+
+struct dns_msg*
+dns_alloc_msg(ldns_buffer* pkt, struct msg_parse* msg,
+ struct regional* region)
+{
+ struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
+ sizeof(struct dns_msg));
+ if(!m)
+ return NULL;
+ memset(m, 0, sizeof(*m));
+ if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) {
+ log_err("malloc failure: allocating incoming dns_msg");
+ return NULL;
+ }
+ return m;
+}
+
+struct dns_msg*
+dns_copy_msg(struct dns_msg* from, struct regional* region)
+{
+ struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
+ sizeof(struct dns_msg));
+ if(!m)
+ return NULL;
+ m->qinfo = from->qinfo;
+ if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname,
+ from->qinfo.qname_len)))
+ return NULL;
+ if(!(m->rep = reply_info_copy(from->rep, NULL, region)))
+ return NULL;
+ return m;
+}
+
+int
+iter_dns_store(struct module_env* env, struct query_info* msgqinf,
+ struct reply_info* msgrep, int is_referral, uint32_t leeway,
+ struct regional* region)
+{
+ return dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
+ region);
+}
+
+int
+iter_ns_probability(struct ub_randstate* rnd, int n, int m)
+{
+ int sel;
+ if(n == m) /* 100% chance */
+ return 1;
+ /* we do not need secure random numbers here, but
+ * we do need it to be threadsafe, so we use this */
+ sel = ub_random_max(rnd, m);
+ return (sel < n);
+}
+
+/** detect dependency cycle for query and target */
+static int
+causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen,
+ uint16_t t, uint16_t c)
+{
+ struct query_info qinf;
+ qinf.qname = name;
+ qinf.qname_len = namelen;
+ qinf.qtype = t;
+ qinf.qclass = c;
+ fptr_ok(fptr_whitelist_modenv_detect_cycle(
+ qstate->env->detect_cycle));
+ return (*qstate->env->detect_cycle)(qstate, &qinf,
+ (uint16_t)(BIT_RD|BIT_CD), qstate->is_priming);
+}
+
+void
+iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ if(ns->resolved)
+ continue;
+ /* see if this ns as target causes dependency cycle */
+ if(causes_cycle(qstate, ns->name, ns->namelen,
+ LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass) ||
+ causes_cycle(qstate, ns->name, ns->namelen,
+ LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
+ log_nametypeclass(VERB_QUERY, "skipping target due "
+ "to dependency cycle (harden-glue: no may "
+ "fix some of the cycles)",
+ ns->name, LDNS_RR_TYPE_A,
+ qstate->qinfo.qclass);
+ ns->resolved = 1;
+ }
+ }
+}
+
+void
+iter_mark_pside_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ if(ns->done_pside4 && ns->done_pside6)
+ continue;
+ /* see if this ns as target causes dependency cycle */
+ if(causes_cycle(qstate, ns->name, ns->namelen,
+ LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
+ log_nametypeclass(VERB_QUERY, "skipping target due "
+ "to dependency cycle", ns->name,
+ LDNS_RR_TYPE_A, qstate->qinfo.qclass);
+ ns->done_pside4 = 1;
+ }
+ if(causes_cycle(qstate, ns->name, ns->namelen,
+ LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass)) {
+ log_nametypeclass(VERB_QUERY, "skipping target due "
+ "to dependency cycle", ns->name,
+ LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass);
+ ns->done_pside6 = 1;
+ }
+ }
+}
+
+int
+iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
+ struct delegpt* dp)
+{
+ struct delegpt_ns* ns;
+ /* check:
+ * o RD qflag is on.
+ * o no addresses are provided.
+ * o all NS items are required glue.
+ * OR
+ * o RD qflag is on.
+ * o no addresses are provided.
+ * o the query is for one of the nameservers in dp,
+ * and that nameserver is a glue-name for this dp.
+ */
+ if(!(qflags&BIT_RD))
+ return 0;
+ /* either available or unused targets */
+ if(dp->usable_list || dp->result_list)
+ return 0;
+
+ /* see if query is for one of the nameservers, which is glue */
+ if( (qinfo->qtype == LDNS_RR_TYPE_A ||
+ qinfo->qtype == LDNS_RR_TYPE_AAAA) &&
+ dname_subdomain_c(qinfo->qname, dp->name) &&
+ delegpt_find_ns(dp, qinfo->qname, qinfo->qname_len))
+ return 1;
+
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ if(ns->resolved) /* skip failed targets */
+ continue;
+ if(!dname_subdomain_c(ns->name, dp->name))
+ return 0; /* one address is not required glue */
+ }
+ return 1;
+}
+
+int
+iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
+ struct dns_msg* msg, uint16_t dclass)
+{
+ struct trust_anchor* a;
+ /* information not available, !env->anchors can be common */
+ if(!env || !env->anchors || !dp || !dp->name)
+ return 0;
+ /* a trust anchor exists with this name, RRSIGs expected */
+ if((a=anchor_find(env->anchors, dp->name, dp->namelabs, dp->namelen,
+ dclass))) {
+ lock_basic_unlock(&a->lock);
+ return 1;
+ }
+ /* see if DS rrset was given, in AUTH section */
+ if(msg && msg->rep &&
+ reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_DS, dclass))
+ return 1;
+ /* look in key cache */
+ if(env->key_cache) {
+ struct key_entry_key* kk = key_cache_obtain(env->key_cache,
+ dp->name, dp->namelen, dclass, env->scratch, *env->now);
+ if(kk) {
+ if(query_dname_compare(kk->name, dp->name) == 0) {
+ if(key_entry_isgood(kk) || key_entry_isbad(kk)) {
+ regional_free_all(env->scratch);
+ return 1;
+ } else if(key_entry_isnull(kk)) {
+ regional_free_all(env->scratch);
+ return 0;
+ }
+ }
+ regional_free_all(env->scratch);
+ }
+ }
+ return 0;
+}
+
+int
+iter_msg_has_dnssec(struct dns_msg* msg)
+{
+ size_t i;
+ if(!msg || !msg->rep)
+ return 0;
+ for(i=0; i<msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
+ if(((struct packed_rrset_data*)msg->rep->rrsets[i]->
+ entry.data)->rrsig_count > 0)
+ return 1;
+ }
+ /* empty message has no DNSSEC info, with DNSSEC the reply is
+ * not empty (NSEC) */
+ return 0;
+}
+
+int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
+ enum response_type type, uint16_t dclass)
+{
+ if(!msg || !dp || !msg->rep || !dp->name)
+ return 0;
+ /* SOA RRset - always from reply zone */
+ if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_SOA, dclass) ||
+ reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_SOA, dclass))
+ return 1;
+ if(type == RESPONSE_TYPE_REFERRAL) {
+ size_t i;
+ /* if it adds a single label, i.e. we expect .com,
+ * and referral to example.com. NS ... , then origin zone
+ * is .com. For a referral to sub.example.com. NS ... then
+ * we do not know, since example.com. may be in between. */
+ for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets;
+ i++) {
+ struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
+ ntohs(s->rk.rrset_class) == dclass) {
+ int l = dname_count_labels(s->rk.dname);
+ if(l == dp->namelabs + 1 &&
+ dname_strict_subdomain(s->rk.dname,
+ l, dp->name, dp->namelabs))
+ return 1;
+ }
+ }
+ return 0;
+ }
+ log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME);
+ /* not a referral, and not lame delegation (upwards), so,
+ * any NS rrset must be from the zone itself */
+ if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_NS, dclass) ||
+ reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_NS, dclass))
+ return 1;
+ /* a DNSKEY set is expected at the zone apex as well */
+ /* this is for 'minimal responses' for DNSKEYs */
+ if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
+ LDNS_RR_TYPE_DNSKEY, dclass))
+ return 1;
+ return 0;
+}
+
+/**
+ * check equality of two rrsets
+ * @param k1: rrset
+ * @param k2: rrset
+ * @return true if equal
+ */
+static int
+rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
+{
+ struct packed_rrset_data* d1 = (struct packed_rrset_data*)
+ k1->entry.data;
+ struct packed_rrset_data* d2 = (struct packed_rrset_data*)
+ k2->entry.data;
+ size_t i, t;
+ if(k1->rk.dname_len != k2->rk.dname_len ||
+ k1->rk.flags != k2->rk.flags ||
+ k1->rk.type != k2->rk.type ||
+ k1->rk.rrset_class != k2->rk.rrset_class ||
+ query_dname_compare(k1->rk.dname, k2->rk.dname) != 0)
+ return 0;
+ if(d1->ttl != d2->ttl ||
+ d1->count != d2->count ||
+ d1->rrsig_count != d2->rrsig_count ||
+ d1->trust != d2->trust ||
+ d1->security != d2->security)
+ return 0;
+ t = d1->count + d1->rrsig_count;
+ for(i=0; i<t; i++) {
+ if(d1->rr_len[i] != d2->rr_len[i] ||
+ d1->rr_ttl[i] != d2->rr_ttl[i] ||
+ memcmp(d1->rr_data[i], d2->rr_data[i],
+ d1->rr_len[i]) != 0)
+ return 0;
+ }
+ return 1;
+}
+
+int
+reply_equal(struct reply_info* p, struct reply_info* q, ldns_buffer* scratch)
+{
+ size_t i;
+ if(p->flags != q->flags ||
+ p->qdcount != q->qdcount ||
+ p->ttl != q->ttl ||
+ p->prefetch_ttl != q->prefetch_ttl ||
+ p->security != q->security ||
+ p->an_numrrsets != q->an_numrrsets ||
+ p->ns_numrrsets != q->ns_numrrsets ||
+ p->ar_numrrsets != q->ar_numrrsets ||
+ p->rrset_count != q->rrset_count)
+ return 0;
+ for(i=0; i<p->rrset_count; i++) {
+ if(!rrset_equal(p->rrsets[i], q->rrsets[i])) {
+ /* fallback procedure: try to sort and canonicalize */
+ ldns_rr_list* pl, *ql;
+ pl = packed_rrset_to_rr_list(p->rrsets[i], scratch);
+ ql = packed_rrset_to_rr_list(q->rrsets[i], scratch);
+ if(!pl || !ql) {
+ ldns_rr_list_deep_free(pl);
+ ldns_rr_list_deep_free(ql);
+ return 0;
+ }
+ ldns_rr_list2canonical(pl);
+ ldns_rr_list2canonical(ql);
+ ldns_rr_list_sort(pl);
+ ldns_rr_list_sort(ql);
+ if(ldns_rr_list_compare(pl, ql) != 0) {
+ ldns_rr_list_deep_free(pl);
+ ldns_rr_list_deep_free(ql);
+ return 0;
+ }
+ ldns_rr_list_deep_free(pl);
+ ldns_rr_list_deep_free(ql);
+ continue;
+ }
+ }
+ return 1;
+}
+
+void
+iter_store_parentside_rrset(struct module_env* env,
+ struct ub_packed_rrset_key* rrset)
+{
+ struct rrset_ref ref;
+ rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now);
+ if(!rrset) {
+ log_err("malloc failure in store_parentside_rrset");
+ return;
+ }
+ rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE;
+ rrset->entry.hash = rrset_key_hash(&rrset->rk);
+ ref.key = rrset;
+ ref.id = rrset->id;
+ /* ignore ret: if it was in the cache, ref updated */
+ (void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now);
+}
+
+/** fetch NS record from reply, if any */
+static struct ub_packed_rrset_key*
+reply_get_NS_rrset(struct reply_info* rep)
+{
+ size_t i;
+ for(i=0; i<rep->rrset_count; i++) {
+ if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) {
+ return rep->rrsets[i];
+ }
+ }
+ return NULL;
+}
+
+void
+iter_store_parentside_NS(struct module_env* env, struct reply_info* rep)
+{
+ struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
+ if(rrset) {
+ log_rrset_key(VERB_ALGO, "store parent-side NS", rrset);
+ iter_store_parentside_rrset(env, rrset);
+ }
+}
+
+void iter_store_parentside_neg(struct module_env* env,
+ struct query_info* qinfo, struct reply_info* rep)
+{
+ /* TTL: NS from referral in iq->deleg_msg,
+ * or first RR from iq->response,
+ * or servfail5secs if !iq->response */
+ uint32_t ttl = NORR_TTL;
+ struct ub_packed_rrset_key* neg;
+ struct packed_rrset_data* newd;
+ if(rep) {
+ struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
+ if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
+ if(rrset) ttl = ub_packed_rrset_ttl(rrset);
+ }
+ /* create empty rrset to store */
+ neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
+ sizeof(struct ub_packed_rrset_key));
+ if(!neg) {
+ log_err("out of memory in store_parentside_neg");
+ return;
+ }
+ memset(&neg->entry, 0, sizeof(neg->entry));
+ neg->entry.key = neg;
+ neg->rk.type = htons(qinfo->qtype);
+ neg->rk.rrset_class = htons(qinfo->qclass);
+ neg->rk.flags = 0;
+ neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname,
+ qinfo->qname_len);
+ if(!neg->rk.dname) {
+ log_err("out of memory in store_parentside_neg");
+ return;
+ }
+ neg->rk.dname_len = qinfo->qname_len;
+ neg->entry.hash = rrset_key_hash(&neg->rk);
+ newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch,
+ sizeof(struct packed_rrset_data) + sizeof(size_t) +
+ sizeof(uint8_t*) + sizeof(uint32_t) + sizeof(uint16_t));
+ if(!newd) {
+ log_err("out of memory in store_parentside_neg");
+ return;
+ }
+ neg->entry.data = newd;
+ newd->ttl = ttl;
+ /* entry must have one RR, otherwise not valid in cache.
+ * put in one RR with empty rdata: those are ignored as nameserver */
+ newd->count = 1;
+ newd->rrsig_count = 0;
+ newd->trust = rrset_trust_ans_noAA;
+ newd->rr_len = (size_t*)((uint8_t*)newd +
+ sizeof(struct packed_rrset_data));
+ newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
+ packed_rrset_ptr_fixup(newd);
+ newd->rr_ttl[0] = newd->ttl;
+ ldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
+ /* store it */
+ log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
+ iter_store_parentside_rrset(env, neg);
+}
+
+int
+iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp,
+ struct regional* region, struct query_info* qinfo)
+{
+ struct ub_packed_rrset_key* akey;
+ akey = rrset_cache_lookup(env->rrset_cache, dp->name,
+ dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass,
+ PACKED_RRSET_PARENT_SIDE, *env->now, 0);
+ if(akey) {
+ log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey);
+ dp->has_parent_side_NS = 1;
+ /* and mark the new names as lame */
+ if(!delegpt_rrset_add_ns(dp, region, akey, 1)) {
+ lock_rw_unlock(&akey->entry.lock);
+ return 0;
+ }
+ lock_rw_unlock(&akey->entry.lock);
+ }
+ return 1;
+}
+
+int iter_lookup_parent_glue_from_cache(struct module_env* env,
+ struct delegpt* dp, struct regional* region, struct query_info* qinfo)
+{
+ struct ub_packed_rrset_key* akey;
+ struct delegpt_ns* ns;
+ size_t num = delegpt_count_targets(dp);
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ /* get cached parentside A */
+ akey = rrset_cache_lookup(env->rrset_cache, ns->name,
+ ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
+ PACKED_RRSET_PARENT_SIDE, *env->now, 0);
+ if(akey) {
+ log_rrset_key(VERB_ALGO, "found parent-side", akey);
+ ns->done_pside4 = 1;
+ /* a negative-cache-element has no addresses it adds */
+ if(!delegpt_add_rrset_A(dp, region, akey, 1))
+ log_err("malloc failure in lookup_parent_glue");
+ lock_rw_unlock(&akey->entry.lock);
+ }
+ /* get cached parentside AAAA */
+ akey = rrset_cache_lookup(env->rrset_cache, ns->name,
+ ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass,
+ PACKED_RRSET_PARENT_SIDE, *env->now, 0);
+ if(akey) {
+ log_rrset_key(VERB_ALGO, "found parent-side", akey);
+ ns->done_pside6 = 1;
+ /* a negative-cache-element has no addresses it adds */
+ if(!delegpt_add_rrset_AAAA(dp, region, akey, 1))
+ log_err("malloc failure in lookup_parent_glue");
+ lock_rw_unlock(&akey->entry.lock);
+ }
+ }
+ /* see if new (but lame) addresses have become available */
+ return delegpt_count_targets(dp) != num;
+}
+
+int
+iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
+ uint16_t* c)
+{
+ uint16_t c1 = *c, c2 = *c;
+ int r1 = hints_next_root(hints, &c1);
+ int r2 = forwards_next_root(fwd, &c2);
+ if(!r1 && !r2) /* got none, end of list */
+ return 0;
+ else if(!r1) /* got one, return that */
+ *c = c2;
+ else if(!r2)
+ *c = c1;
+ else if(c1 < c2) /* got both take smallest */
+ *c = c1;
+ else *c = c2;
+ return 1;
+}
+
+void
+iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z)
+{
+ /* Only the DS record for the delegation itself is expected.
+ * We allow DS for everything between the bailiwick and the
+ * zonecut, thus DS records must be at or above the zonecut.
+ * And the DS records must be below the server authority zone.
+ * The answer section is already scrubbed. */
+ size_t i = msg->rep->an_numrrsets;
+ while(i < (msg->rep->an_numrrsets + msg->rep->ns_numrrsets)) {
+ struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
+ if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS &&
+ (!ns || !dname_subdomain_c(ns->rk.dname, s->rk.dname)
+ || query_dname_compare(z, s->rk.dname) == 0)) {
+ log_nametypeclass(VERB_ALGO, "removing irrelevant DS",
+ s->rk.dname, ntohs(s->rk.type),
+ ntohs(s->rk.rrset_class));
+ memmove(msg->rep->rrsets+i, msg->rep->rrsets+i+1,
+ sizeof(struct ub_packed_rrset_key*) *
+ (msg->rep->rrset_count-i-1));
+ msg->rep->ns_numrrsets--;
+ msg->rep->rrset_count--;
+ /* stay at same i, but new record */
+ continue;
+ }
+ i++;
+ }
+}
+
+void iter_dec_attempts(struct delegpt* dp, int d)
+{
+ struct delegpt_addr* a;
+ for(a=dp->target_list; a; a = a->next_target) {
+ if(a->attempts >= OUTBOUND_MSG_RETRY) {
+ /* add back to result list */
+ a->next_result = dp->result_list;
+ dp->result_list = a;
+ }
+ if(a->attempts > d)
+ a->attempts -= d;
+ else a->attempts = 0;
+ }
+}
+
+void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old)
+{
+ struct delegpt_addr* a, *o, *prev;
+ for(a=dp->target_list; a; a = a->next_target) {
+ o = delegpt_find_addr(old, &a->addr, a->addrlen);
+ if(o) {
+ log_addr(VERB_ALGO, "copy attempt count previous dp",
+ &a->addr, a->addrlen);
+ a->attempts = o->attempts;
+ }
+ }
+ prev = NULL;
+ a = dp->usable_list;
+ while(a) {
+ if(a->attempts >= OUTBOUND_MSG_RETRY) {
+ log_addr(VERB_ALGO, "remove from usable list dp",
+ &a->addr, a->addrlen);
+ /* remove from result list */
+ if(prev)
+ prev->next_usable = a->next_usable;
+ else dp->usable_list = a->next_usable;
+ /* prev stays the same */
+ a = a->next_usable;
+ continue;
+ }
+ prev = a;
+ a = a->next_usable;
+ }
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iter_utils.h b/3rdParty/Unbound/src/src/iterator/iter_utils.h
new file mode 100644
index 0000000..6d2f862
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iter_utils.h
@@ -0,0 +1,312 @@
+/*
+ * iterator/iter_utils.h - iterative resolver module utility functions.
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to assist the iterator module.
+ * Configuration options. Forward zones.
+ */
+
+#ifndef ITERATOR_ITER_UTILS_H
+#define ITERATOR_ITER_UTILS_H
+#include "iterator/iter_resptype.h"
+#include <ldns/buffer.h>
+struct iter_env;
+struct iter_hints;
+struct iter_forwards;
+struct config_file;
+struct module_env;
+struct delegpt_addr;
+struct delegpt;
+struct regional;
+struct msg_parse;
+struct ub_randstate;
+struct query_info;
+struct reply_info;
+struct module_qstate;
+struct sock_list;
+struct ub_packed_rrset_key;
+
+/**
+ * Process config options and set iterator module state.
+ * Sets default values if no config is found.
+ * @param iter_env: iterator module state.
+ * @param cfg: config options.
+ * @return 0 on error.
+ */
+int iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg);
+
+/**
+ * Select a valid, nice target to send query to.
+ * Sorting and removing unsuitable targets is combined.
+ *
+ * @param iter_env: iterator module global state, with ip6 enabled and
+ * do-not-query-addresses.
+ * @param env: environment with infra cache (lameness, rtt info).
+ * @param dp: delegation point with result list.
+ * @param name: zone name (for lameness check).
+ * @param namelen: length of name.
+ * @param qtype: query type that we want to send.
+ * @param dnssec_lame: set to 1, if a known dnssec-lame server is selected
+ * these are not preferred, but are used as a last resort.
+ * @param chase_to_rd: set to 1 if a known recursion lame server is selected
+ * these are not preferred, but are used as a last resort.
+ * @param open_target: number of currently outstanding target queries.
+ * If we wait for these, perhaps more server addresses become available.
+ * @param blacklist: the IP blacklist to use.
+ * @return best target or NULL if no target.
+ * if not null, that target is removed from the result list in the dp.
+ */
+struct delegpt_addr* iter_server_selection(struct iter_env* iter_env,
+ struct module_env* env, struct delegpt* dp, uint8_t* name,
+ size_t namelen, uint16_t qtype, int* dnssec_lame,
+ int* chase_to_rd, int open_target, struct sock_list* blacklist);
+
+/**
+ * Allocate dns_msg from parsed msg, in regional.
+ * @param pkt: packet.
+ * @param msg: parsed message (cleaned and ready for regional allocation).
+ * @param regional: regional to use for allocation.
+ * @return newly allocated dns_msg, or NULL on memory error.
+ */
+struct dns_msg* dns_alloc_msg(ldns_buffer* pkt, struct msg_parse* msg,
+ struct regional* regional);
+
+/**
+ * Copy a dns_msg to this regional.
+ * @param from: dns message, also in regional.
+ * @param regional: regional to use for allocation.
+ * @return newly allocated dns_msg, or NULL on memory error.
+ */
+struct dns_msg* dns_copy_msg(struct dns_msg* from, struct regional* regional);
+
+/**
+ * Allocate a dns_msg with malloc/alloc structure and store in dns cache.
+ * @param env: environment, with alloc structure and dns cache.
+ * @param qinf: query info, the query for which answer is stored.
+ * @param rep: reply in dns_msg from dns_alloc_msg for example.
+ * @param is_referral: If true, then the given message to be stored is a
+ * referral. The cache implementation may use this as a hint.
+ * @param leeway: prefetch TTL leeway to expire old rrsets quicker.
+ * @param region: to copy modified (cache is better) rrs back to.
+ * @return 0 on alloc error (out of memory).
+ */
+int iter_dns_store(struct module_env* env, struct query_info* qinf,
+ struct reply_info* rep, int is_referral, uint32_t leeway,
+ struct regional* region);
+
+/**
+ * Select randomly with n/m probability.
+ * For shuffle NS records for address fetching.
+ * @param rnd: random table
+ * @param n: probability.
+ * @param m: divisor for probability.
+ * @return true with n/m probability.
+ */
+int iter_ns_probability(struct ub_randstate* rnd, int n, int m);
+
+/**
+ * Mark targets that result in a dependency cycle as done, so they
+ * will not get selected as targets.
+ * @param qstate: query state.
+ * @param dp: delegpt to mark ns in.
+ */
+void iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp);
+
+/**
+ * Mark targets that result in a dependency cycle as done, so they
+ * will not get selected as targets. For the parent-side lookups.
+ * @param qstate: query state.
+ * @param dp: delegpt to mark ns in.
+ */
+void iter_mark_pside_cycle_targets(struct module_qstate* qstate,
+ struct delegpt* dp);
+
+/**
+ * See if delegation is useful or offers immediately no targets for
+ * further recursion.
+ * @param qinfo: query name and type
+ * @param qflags: query flags with RD flag
+ * @param dp: delegpt to check.
+ * @return true if dp is useless.
+ */
+int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
+ struct delegpt* dp);
+
+/**
+ * See if delegation is expected to have DNSSEC information (RRSIGs) in
+ * its answers, or not. Inspects delegation point (name), trust anchors,
+ * and delegation message (DS RRset) to determine this.
+ * @param env: module env with trust anchors.
+ * @param dp: delegation point.
+ * @param msg: delegation message, with DS if a secure referral.
+ * @param dclass: class of query.
+ * @return 1 if dnssec is expected, 0 if not.
+ */
+int iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
+ struct dns_msg* msg, uint16_t dclass);
+
+/**
+ * See if a message contains DNSSEC.
+ * This is examined by looking for RRSIGs. With DNSSEC a valid answer,
+ * nxdomain, nodata, referral or cname reply has RRSIGs in answer or auth
+ * sections, sigs on answer data, SOA, DS, or NSEC/NSEC3 records.
+ * @param msg: message to examine.
+ * @return true if DNSSEC information was found.
+ */
+int iter_msg_has_dnssec(struct dns_msg* msg);
+
+/**
+ * See if a message is known to be from a certain zone.
+ * This looks for SOA or NS rrsets, for answers.
+ * For referrals, when one label is delegated, the zone is detected.
+ * Does not look at signatures.
+ * @param msg: the message to inspect.
+ * @param dp: delegation point with zone name to look for.
+ * @param type: type of message.
+ * @param dclass: class of query.
+ * @return true if message is certain to be from zone in dp->name.
+ * false if not sure (empty msg), or not from the zone.
+ */
+int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
+ enum response_type type, uint16_t dclass);
+
+/**
+ * Check if two replies are equal
+ * For fallback procedures
+ * @param p: reply one. The reply has rrset data pointers in region.
+ * Does not check rrset-IDs
+ * @param q: reply two
+ * @param buf: scratch buffer.
+ * @return if one and two are equal.
+ */
+int reply_equal(struct reply_info* p, struct reply_info* q, ldns_buffer* buf);
+
+/**
+ * Store parent-side rrset in seperate rrset cache entries for later
+ * last-resort * lookups in case the child-side versions of this information
+ * fails.
+ * @param env: environment with cache, time, ...
+ * @param rrset: the rrset to store (copied).
+ * Failure to store is logged, but otherwise ignored.
+ */
+void iter_store_parentside_rrset(struct module_env* env,
+ struct ub_packed_rrset_key* rrset);
+
+/**
+ * Store parent-side NS records from a referral message
+ * @param env: environment with cache, time, ...
+ * @param rep: response with NS rrset.
+ * Failure to store is logged, but otherwise ignored.
+ */
+void iter_store_parentside_NS(struct module_env* env, struct reply_info* rep);
+
+/**
+ * Store parent-side negative element, the parentside rrset does not exist,
+ * creates an rrset with empty rdata in the rrset cache with PARENTSIDE flag.
+ * @param env: environment with cache, time, ...
+ * @param qinfo: the identity of the rrset that is missing.
+ * @param rep: delegation response or answer response, to glean TTL from.
+ * (malloc) failure is logged but otherwise ignored.
+ */
+void iter_store_parentside_neg(struct module_env* env,
+ struct query_info* qinfo, struct reply_info* rep);
+
+/**
+ * Add parent NS record if that exists in the cache. This is both new
+ * information and acts like a timeout throttle on retries.
+ * @param env: query env with rrset cache and time.
+ * @param dp: delegation point to store result in. Also this dp is used to
+ * see which NS name is needed.
+ * @param region: region to alloc result in.
+ * @param qinfo: pertinent information, the qclass.
+ * @return false on malloc failure.
+ * if true, the routine worked and if such cached information
+ * existed dp->has_parent_side_NS is set true.
+ */
+int iter_lookup_parent_NS_from_cache(struct module_env* env,
+ struct delegpt* dp, struct regional* region, struct query_info* qinfo);
+
+/**
+ * Add parent-side glue if that exists in the cache. This is both new
+ * information and acts like a timeout throttle on retries to fetch them.
+ * @param env: query env with rrset cache and time.
+ * @param dp: delegation point to store result in. Also this dp is used to
+ * see which NS name is needed.
+ * @param region: region to alloc result in.
+ * @param qinfo: pertinent information, the qclass.
+ * @return: true, it worked, no malloc failures, and new addresses (lame)
+ * have been added, giving extra options as query targets.
+ */
+int iter_lookup_parent_glue_from_cache(struct module_env* env,
+ struct delegpt* dp, struct regional* region, struct query_info* qinfo);
+
+/**
+ * Lookup next root-hint or root-forward entry.
+ * @param hints: the hints.
+ * @param fwd: the forwards.
+ * @param c: the class to start searching at. 0 means find first one.
+ * @return false if no classes found, true if found and returned in c.
+ */
+int iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
+ uint16_t* c);
+
+/**
+ * Remove DS records that are inappropriate before they are cached.
+ * @param msg: the response to scrub.
+ * @param ns: RRSET that is the NS record for the referral.
+ * if NULL, then all DS records are removed from the authority section.
+ * @param z: zone name that the response is from.
+ */
+void iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns,
+ uint8_t* z);
+
+/**
+ * Remove query attempts from all available ips. For 0x20.
+ * @param dp: delegpt.
+ * @param d: decrease.
+ */
+void iter_dec_attempts(struct delegpt* dp, int d);
+
+/**
+ * Add retry counts from older delegpt to newer delegpt.
+ * Does not waste time on timeout'd (or other failing) addresses.
+ * @param dp: new delegationpoint.
+ * @param old: old delegationpoint.
+ */
+void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old);
+
+#endif /* ITERATOR_ITER_UTILS_H */
diff --git a/3rdParty/Unbound/src/src/iterator/iterator.c b/3rdParty/Unbound/src/src/iterator/iterator.c
new file mode 100644
index 0000000..432fdc4
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iterator.c
@@ -0,0 +1,2767 @@
+/*
+ * iterator/iterator.c - iterative resolver DNS query response module
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a module that performs recusive iterative DNS query
+ * processing.
+ */
+
+#include "config.h"
+#include <ldns/ldns.h>
+#include "iterator/iterator.h"
+#include "iterator/iter_utils.h"
+#include "iterator/iter_hints.h"
+#include "iterator/iter_fwd.h"
+#include "iterator/iter_donotq.h"
+#include "iterator/iter_delegpt.h"
+#include "iterator/iter_resptype.h"
+#include "iterator/iter_scrub.h"
+#include "iterator/iter_priv.h"
+#include "validator/val_neg.h"
+#include "services/cache/dns.h"
+#include "services/cache/infra.h"
+#include "util/module.h"
+#include "util/netevent.h"
+#include "util/net_help.h"
+#include "util/regional.h"
+#include "util/data/dname.h"
+#include "util/data/msgencode.h"
+#include "util/fptr_wlist.h"
+#include "util/config_file.h"
+
+int
+iter_init(struct module_env* env, int id)
+{
+ struct iter_env* iter_env = (struct iter_env*)calloc(1,
+ sizeof(struct iter_env));
+ if(!iter_env) {
+ log_err("malloc failure");
+ return 0;
+ }
+ env->modinfo[id] = (void*)iter_env;
+ if(!iter_apply_cfg(iter_env, env->cfg)) {
+ log_err("iterator: could not apply configuration settings.");
+ return 0;
+ }
+ return 1;
+}
+
+void
+iter_deinit(struct module_env* env, int id)
+{
+ struct iter_env* iter_env;
+ if(!env || !env->modinfo[id])
+ return;
+ iter_env = (struct iter_env*)env->modinfo[id];
+ free(iter_env->target_fetch_policy);
+ priv_delete(iter_env->priv);
+ hints_delete(iter_env->hints);
+ donotq_delete(iter_env->donotq);
+ free(iter_env);
+ env->modinfo[id] = NULL;
+}
+
+/** new query for iterator */
+static int
+iter_new(struct module_qstate* qstate, int id)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)regional_alloc(
+ qstate->region, sizeof(struct iter_qstate));
+ qstate->minfo[id] = iq;
+ if(!iq)
+ return 0;
+ memset(iq, 0, sizeof(*iq));
+ iq->state = INIT_REQUEST_STATE;
+ iq->final_state = FINISHED_STATE;
+ iq->an_prepend_list = NULL;
+ iq->an_prepend_last = NULL;
+ iq->ns_prepend_list = NULL;
+ iq->ns_prepend_last = NULL;
+ iq->dp = NULL;
+ iq->depth = 0;
+ iq->num_target_queries = 0;
+ iq->num_current_queries = 0;
+ iq->query_restart_count = 0;
+ iq->referral_count = 0;
+ iq->sent_count = 0;
+ iq->wait_priming_stub = 0;
+ iq->refetch_glue = 0;
+ iq->dnssec_expected = 0;
+ iq->dnssec_lame_query = 0;
+ iq->chase_flags = qstate->query_flags;
+ /* Start with the (current) qname. */
+ iq->qchase = qstate->qinfo;
+ outbound_list_init(&iq->outlist);
+ return 1;
+}
+
+/**
+ * Transition to the next state. This can be used to advance a currently
+ * processing event. It cannot be used to reactivate a forEvent.
+ *
+ * @param iq: iterator query state
+ * @param nextstate The state to transition to.
+ * @return true. This is so this can be called as the return value for the
+ * actual process*State() methods. (Transitioning to the next state
+ * implies further processing).
+ */
+static int
+next_state(struct iter_qstate* iq, enum iter_state nextstate)
+{
+ /* If transitioning to a "response" state, make sure that there is a
+ * response */
+ if(iter_state_is_responsestate(nextstate)) {
+ if(iq->response == NULL) {
+ log_err("transitioning to response state sans "
+ "response.");
+ }
+ }
+ iq->state = nextstate;
+ return 1;
+}
+
+/**
+ * Transition an event to its final state. Final states always either return
+ * a result up the module chain, or reactivate a dependent event. Which
+ * final state to transtion to is set in the module state for the event when
+ * it was created, and depends on the original purpose of the event.
+ *
+ * The response is stored in the qstate->buf buffer.
+ *
+ * @param iq: iterator query state
+ * @return false. This is so this method can be used as the return value for
+ * the processState methods. (Transitioning to the final state
+ */
+static int
+final_state(struct iter_qstate* iq)
+{
+ return next_state(iq, iq->final_state);
+}
+
+/**
+ * Callback routine to handle errors in parent query states
+ * @param qstate: query state that failed.
+ * @param id: module id.
+ * @param super: super state.
+ */
+static void
+error_supers(struct module_qstate* qstate, int id, struct module_qstate* super)
+{
+ struct iter_qstate* super_iq = (struct iter_qstate*)super->minfo[id];
+
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_A ||
+ qstate->qinfo.qtype == LDNS_RR_TYPE_AAAA) {
+ /* mark address as failed. */
+ struct delegpt_ns* dpns = NULL;
+ if(super_iq->dp)
+ dpns = delegpt_find_ns(super_iq->dp,
+ qstate->qinfo.qname, qstate->qinfo.qname_len);
+ if(!dpns) {
+ /* not interested */
+ verbose(VERB_ALGO, "subq error, but not interested");
+ log_query_info(VERB_ALGO, "superq", &super->qinfo);
+ if(super_iq->dp)
+ delegpt_log(VERB_ALGO, super_iq->dp);
+ log_assert(0);
+ return;
+ } else {
+ /* see if the failure did get (parent-lame) info */
+ if(!cache_fill_missing(super->env,
+ super_iq->qchase.qclass, super->region,
+ super_iq->dp))
+ log_err("out of memory adding missing");
+ }
+ dpns->resolved = 1; /* mark as failed */
+ super_iq->num_target_queries--;
+ }
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS) {
+ /* prime failed to get delegation */
+ super_iq->dp = NULL;
+ }
+ /* evaluate targets again */
+ super_iq->state = QUERYTARGETS_STATE;
+ /* super becomes runnable, and will process this change */
+}
+
+/**
+ * Return an error to the client
+ * @param qstate: our query state
+ * @param id: module id
+ * @param rcode: error code (DNS errcode).
+ * @return: 0 for use by caller, to make notation easy, like:
+ * return error_response(..).
+ */
+static int
+error_response(struct module_qstate* qstate, int id, int rcode)
+{
+ verbose(VERB_QUERY, "return error response %s",
+ ldns_lookup_by_id(ldns_rcodes, rcode)?
+ ldns_lookup_by_id(ldns_rcodes, rcode)->name:"??");
+ qstate->return_rcode = rcode;
+ qstate->return_msg = NULL;
+ qstate->ext_state[id] = module_finished;
+ return 0;
+}
+
+/**
+ * Return an error to the client and cache the error code in the
+ * message cache (so per qname, qtype, qclass).
+ * @param qstate: our query state
+ * @param id: module id
+ * @param rcode: error code (DNS errcode).
+ * @return: 0 for use by caller, to make notation easy, like:
+ * return error_response(..).
+ */
+static int
+error_response_cache(struct module_qstate* qstate, int id, int rcode)
+{
+ /* store in cache */
+ struct reply_info err;
+ memset(&err, 0, sizeof(err));
+ err.flags = (uint16_t)(BIT_QR | BIT_RA);
+ FLAGS_SET_RCODE(err.flags, rcode);
+ err.qdcount = 1;
+ err.ttl = NORR_TTL;
+ err.prefetch_ttl = PREFETCH_TTL_CALC(err.ttl);
+ /* do not waste time trying to validate this servfail */
+ err.security = sec_status_indeterminate;
+ verbose(VERB_ALGO, "store error response in message cache");
+ if(!iter_dns_store(qstate->env, &qstate->qinfo, &err, 0, 0, NULL)) {
+ log_err("error_response_cache: could not store error (nomem)");
+ }
+ return error_response(qstate, id, rcode);
+}
+
+/** check if prepend item is duplicate item */
+static int
+prepend_is_duplicate(struct ub_packed_rrset_key** sets, size_t to,
+ struct ub_packed_rrset_key* dup)
+{
+ size_t i;
+ for(i=0; i<to; i++) {
+ if(sets[i]->rk.type == dup->rk.type &&
+ sets[i]->rk.rrset_class == dup->rk.rrset_class &&
+ sets[i]->rk.dname_len == dup->rk.dname_len &&
+ query_dname_compare(sets[i]->rk.dname, dup->rk.dname)
+ == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/** prepend the prepend list in the answer and authority section of dns_msg */
+static int
+iter_prepend(struct iter_qstate* iq, struct dns_msg* msg,
+ struct regional* region)
+{
+ struct iter_prep_list* p;
+ struct ub_packed_rrset_key** sets;
+ size_t num_an = 0, num_ns = 0;;
+ for(p = iq->an_prepend_list; p; p = p->next)
+ num_an++;
+ for(p = iq->ns_prepend_list; p; p = p->next)
+ num_ns++;
+ if(num_an + num_ns == 0)
+ return 1;
+ verbose(VERB_ALGO, "prepending %d rrsets", (int)num_an + (int)num_ns);
+ sets = regional_alloc(region, (num_an+num_ns+msg->rep->rrset_count) *
+ sizeof(struct ub_packed_rrset_key*));
+ if(!sets)
+ return 0;
+ /* ANSWER section */
+ num_an = 0;
+ for(p = iq->an_prepend_list; p; p = p->next) {
+ sets[num_an++] = p->rrset;
+ }
+ memcpy(sets+num_an, msg->rep->rrsets, msg->rep->an_numrrsets *
+ sizeof(struct ub_packed_rrset_key*));
+ /* AUTH section */
+ num_ns = 0;
+ for(p = iq->ns_prepend_list; p; p = p->next) {
+ if(prepend_is_duplicate(sets+msg->rep->an_numrrsets+num_an,
+ num_ns, p->rrset) || prepend_is_duplicate(
+ msg->rep->rrsets+msg->rep->an_numrrsets,
+ msg->rep->ns_numrrsets, p->rrset))
+ continue;
+ sets[msg->rep->an_numrrsets + num_an + num_ns++] = p->rrset;
+ }
+ memcpy(sets + num_an + msg->rep->an_numrrsets + num_ns,
+ msg->rep->rrsets + msg->rep->an_numrrsets,
+ (msg->rep->ns_numrrsets + msg->rep->ar_numrrsets) *
+ sizeof(struct ub_packed_rrset_key*));
+
+ /* NXDOMAIN rcode can stay if we prepended DNAME/CNAMEs, because
+ * this is what recursors should give. */
+ msg->rep->rrset_count += num_an + num_ns;
+ msg->rep->an_numrrsets += num_an;
+ msg->rep->ns_numrrsets += num_ns;
+ msg->rep->rrsets = sets;
+ return 1;
+}
+
+/**
+ * Add rrset to ANSWER prepend list
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param rrset: rrset to add.
+ * @return false on failure (malloc).
+ */
+static int
+iter_add_prepend_answer(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct ub_packed_rrset_key* rrset)
+{
+ struct iter_prep_list* p = (struct iter_prep_list*)regional_alloc(
+ qstate->region, sizeof(struct iter_prep_list));
+ if(!p)
+ return 0;
+ p->rrset = rrset;
+ p->next = NULL;
+ /* add at end */
+ if(iq->an_prepend_last)
+ iq->an_prepend_last->next = p;
+ else iq->an_prepend_list = p;
+ iq->an_prepend_last = p;
+ return 1;
+}
+
+/**
+ * Add rrset to AUTHORITY prepend list
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param rrset: rrset to add.
+ * @return false on failure (malloc).
+ */
+static int
+iter_add_prepend_auth(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct ub_packed_rrset_key* rrset)
+{
+ struct iter_prep_list* p = (struct iter_prep_list*)regional_alloc(
+ qstate->region, sizeof(struct iter_prep_list));
+ if(!p)
+ return 0;
+ p->rrset = rrset;
+ p->next = NULL;
+ /* add at end */
+ if(iq->ns_prepend_last)
+ iq->ns_prepend_last->next = p;
+ else iq->ns_prepend_list = p;
+ iq->ns_prepend_last = p;
+ return 1;
+}
+
+/**
+ * Given a CNAME response (defined as a response containing a CNAME or DNAME
+ * that does not answer the request), process the response, modifying the
+ * state as necessary. This follows the CNAME/DNAME chain and returns the
+ * final query name.
+ *
+ * sets the new query name, after following the CNAME/DNAME chain.
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param msg: the response.
+ * @param mname: returned target new query name.
+ * @param mname_len: length of mname.
+ * @return false on (malloc) error.
+ */
+static int
+handle_cname_response(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct dns_msg* msg, uint8_t** mname, size_t* mname_len)
+{
+ size_t i;
+ /* Start with the (current) qname. */
+ *mname = iq->qchase.qname;
+ *mname_len = iq->qchase.qname_len;
+
+ /* Iterate over the ANSWER rrsets in order, looking for CNAMEs and
+ * DNAMES. */
+ for(i=0; i<msg->rep->an_numrrsets; i++) {
+ struct ub_packed_rrset_key* r = msg->rep->rrsets[i];
+ /* If there is a (relevant) DNAME, add it to the list.
+ * We always expect there to be CNAME that was generated
+ * by this DNAME following, so we don't process the DNAME
+ * directly. */
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_DNAME &&
+ dname_strict_subdomain_c(*mname, r->rk.dname)) {
+ if(!iter_add_prepend_answer(qstate, iq, r))
+ return 0;
+ continue;
+ }
+
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_CNAME &&
+ query_dname_compare(*mname, r->rk.dname) == 0) {
+ /* Add this relevant CNAME rrset to the prepend list.*/
+ if(!iter_add_prepend_answer(qstate, iq, r))
+ return 0;
+ get_cname_target(r, mname, mname_len);
+ }
+
+ /* Other rrsets in the section are ignored. */
+ }
+ /* add authority rrsets to authority prepend, for wildcarded CNAMEs */
+ for(i=msg->rep->an_numrrsets; i<msg->rep->an_numrrsets +
+ msg->rep->ns_numrrsets; i++) {
+ struct ub_packed_rrset_key* r = msg->rep->rrsets[i];
+ /* only add NSEC/NSEC3, as they may be needed for validation */
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_NSEC ||
+ ntohs(r->rk.type) == LDNS_RR_TYPE_NSEC3) {
+ if(!iter_add_prepend_auth(qstate, iq, r))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Generate a subrequest.
+ * Generate a local request event. Local events are tied to this module, and
+ * have a correponding (first tier) event that is waiting for this event to
+ * resolve to continue.
+ *
+ * @param qname The query name for this request.
+ * @param qnamelen length of qname
+ * @param qtype The query type for this request.
+ * @param qclass The query class for this request.
+ * @param qstate The event that is generating this event.
+ * @param id: module id.
+ * @param iq: The iterator state that is generating this event.
+ * @param initial_state The initial response state (normally this
+ * is QUERY_RESP_STATE, unless it is known that the request won't
+ * need iterative processing
+ * @param finalstate The final state for the response to this request.
+ * @param subq_ret: if newly allocated, the subquerystate, or NULL if it does
+ * not need initialisation.
+ * @param v: if true, validation is done on the subquery.
+ * @return false on error (malloc).
+ */
+static int
+generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
+ uint16_t qclass, struct module_qstate* qstate, int id,
+ struct iter_qstate* iq, enum iter_state initial_state,
+ enum iter_state finalstate, struct module_qstate** subq_ret, int v)
+{
+ struct module_qstate* subq = NULL;
+ struct iter_qstate* subiq = NULL;
+ uint16_t qflags = 0; /* OPCODE QUERY, no flags */
+ struct query_info qinf;
+ int prime = (finalstate == PRIME_RESP_STATE)?1:0;
+ qinf.qname = qname;
+ qinf.qname_len = qnamelen;
+ qinf.qtype = qtype;
+ qinf.qclass = qclass;
+
+ /* RD should be set only when sending the query back through the INIT
+ * state. */
+ if(initial_state == INIT_REQUEST_STATE)
+ qflags |= BIT_RD;
+ /* We set the CD flag so we can send this through the "head" of
+ * the resolution chain, which might have a validator. We are
+ * uninterested in validating things not on the direct resolution
+ * path. */
+ if(!v)
+ qflags |= BIT_CD;
+
+ /* attach subquery, lookup existing or make a new one */
+ fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
+ if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, &subq)) {
+ return 0;
+ }
+ *subq_ret = subq;
+ if(subq) {
+ /* initialise the new subquery */
+ subq->curmod = id;
+ subq->ext_state[id] = module_state_initial;
+ subq->minfo[id] = regional_alloc(subq->region,
+ sizeof(struct iter_qstate));
+ if(!subq->minfo[id]) {
+ log_err("init subq: out of memory");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return 0;
+ }
+ subiq = (struct iter_qstate*)subq->minfo[id];
+ memset(subiq, 0, sizeof(*subiq));
+ subiq->num_target_queries = 0;
+ subiq->num_current_queries = 0;
+ subiq->depth = iq->depth+1;
+ outbound_list_init(&subiq->outlist);
+ subiq->state = initial_state;
+ subiq->final_state = finalstate;
+ subiq->qchase = subq->qinfo;
+ subiq->chase_flags = subq->query_flags;
+ subiq->refetch_glue = 0;
+ }
+ return 1;
+}
+
+/**
+ * Generate and send a root priming request.
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param ie: iterator global state.
+ * @param id: module id.
+ * @param qclass: the class to prime.
+ * @return 0 on failure
+ */
+static int
+prime_root(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id, uint16_t qclass)
+{
+ struct delegpt* dp;
+ struct module_qstate* subq;
+ verbose(VERB_DETAIL, "priming . %s NS",
+ ldns_lookup_by_id(ldns_rr_classes, (int)qclass)?
+ ldns_lookup_by_id(ldns_rr_classes, (int)qclass)->name:"??");
+ dp = hints_lookup_root(ie->hints, qclass);
+ if(!dp) {
+ verbose(VERB_ALGO, "Cannot prime due to lack of hints");
+ return 0;
+ }
+ /* Priming requests start at the QUERYTARGETS state, skipping
+ * the normal INIT state logic (which would cause an infloop). */
+ if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS,
+ qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE,
+ &subq, 0)) {
+ verbose(VERB_ALGO, "could not prime root");
+ return 0;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+ /* Set the initial delegation point to the hint.
+ * copy dp, it is now part of the root prime query.
+ * dp was part of in the fixed hints structure. */
+ subiq->dp = delegpt_copy(dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory priming root, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return 0;
+ }
+ /* there should not be any target queries. */
+ subiq->num_target_queries = 0;
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ }
+
+ /* this module stops, our submodule starts, and does the query. */
+ qstate->ext_state[id] = module_wait_subquery;
+ return 1;
+}
+
+/**
+ * Generate and process a stub priming request. This method tests for the
+ * need to prime a stub zone, so it is safe to call for every request.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param ie: iterator global state.
+ * @param id: module id.
+ * @param q: request name.
+ * @return true if a priming subrequest was made, false if not. The will only
+ * issue a priming request if it detects an unprimed stub.
+ * Uses value of 2 to signal during stub-prime in root-prime situation
+ * that a noprime-stub is available and resolution can continue.
+ */
+static int
+prime_stub(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id, struct query_info* q)
+{
+ /* Lookup the stub hint. This will return null if the stub doesn't
+ * need to be re-primed. */
+ struct iter_hints_stub* stub;
+ struct delegpt* stub_dp;
+ struct module_qstate* subq;
+ uint8_t* delname = q->qname;
+ size_t delnamelen = q->qname_len;
+
+ if(q->qtype == LDNS_RR_TYPE_DS && !dname_is_root(q->qname))
+ /* remove first label, but not for root */
+ dname_remove_label(&delname, &delnamelen);
+
+ stub = hints_lookup_stub(ie->hints, delname, q->qclass, iq->dp);
+ /* The stub (if there is one) does not need priming. */
+ if(!stub)
+ return 0;
+ stub_dp = stub->dp;
+
+ /* is it a noprime stub (always use) */
+ if(stub->noprime) {
+ int r = 0;
+ if(iq->dp == NULL) r = 2;
+ /* copy the dp out of the fixed hints structure, so that
+ * it can be changed when servicing this query */
+ iq->dp = delegpt_copy(stub_dp, qstate->region);
+ if(!iq->dp) {
+ log_err("out of memory priming stub");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ log_nametypeclass(VERB_DETAIL, "use stub", stub_dp->name,
+ LDNS_RR_TYPE_NS, q->qclass);
+ return r;
+ }
+
+ /* Otherwise, we need to (re)prime the stub. */
+ log_nametypeclass(VERB_DETAIL, "priming stub", stub_dp->name,
+ LDNS_RR_TYPE_NS, q->qclass);
+
+ /* Stub priming events start at the QUERYTARGETS state to avoid the
+ * redundant INIT state processing. */
+ if(!generate_sub_request(stub_dp->name, stub_dp->namelen,
+ LDNS_RR_TYPE_NS, q->qclass, qstate, id, iq,
+ QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0)) {
+ verbose(VERB_ALGO, "could not prime stub");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+
+ /* Set the initial delegation point to the hint. */
+ /* make copy to avoid use of stub dp by different qs/threads */
+ subiq->dp = delegpt_copy(stub_dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory priming stub, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ /* there should not be any target queries -- although there
+ * wouldn't be anyway, since stub hints never have
+ * missing targets. */
+ subiq->num_target_queries = 0;
+ subiq->wait_priming_stub = 1;
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ }
+
+ /* this module stops, our submodule starts, and does the query. */
+ qstate->ext_state[id] = module_wait_subquery;
+ return 1;
+}
+
+/**
+ * Generate A and AAAA checks for glue that is in-zone for the referral
+ * we just got to obtain authoritative information on the adresses.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_a_aaaa_check(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id)
+{
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct module_qstate* subq;
+ size_t i;
+ struct reply_info* rep = iq->response->rep;
+ struct ub_packed_rrset_key* s;
+ log_assert(iq->dp);
+
+ if(iq->depth == ie->max_dependency_depth)
+ return;
+ /* walk through additional, and check if in-zone,
+ * only relevant A, AAAA are left after scrub anyway */
+ for(i=rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
+ s = rep->rrsets[i];
+ /* check *ALL* addresses that are transmitted in additional*/
+ /* is it an address ? */
+ if( !(ntohs(s->rk.type)==LDNS_RR_TYPE_A ||
+ ntohs(s->rk.type)==LDNS_RR_TYPE_AAAA)) {
+ continue;
+ }
+ /* is this query the same as the A/AAAA check for it */
+ if(qstate->qinfo.qtype == ntohs(s->rk.type) &&
+ qstate->qinfo.qclass == ntohs(s->rk.rrset_class) &&
+ query_dname_compare(qstate->qinfo.qname,
+ s->rk.dname)==0 &&
+ (qstate->query_flags&BIT_RD) &&
+ !(qstate->query_flags&BIT_CD))
+ continue;
+
+ /* generate subrequest for it */
+ log_nametypeclass(VERB_ALGO, "schedule addr fetch",
+ s->rk.dname, ntohs(s->rk.type),
+ ntohs(s->rk.rrset_class));
+ if(!generate_sub_request(s->rk.dname, s->rk.dname_len,
+ ntohs(s->rk.type), ntohs(s->rk.rrset_class),
+ qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ verbose(VERB_ALGO, "could not generate addr check");
+ return;
+ }
+ /* ignore subq - not need for more init */
+ }
+}
+
+/**
+ * Generate a NS check request to obtain authoritative information
+ * on an NS rrset.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
+{
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct module_qstate* subq;
+ log_assert(iq->dp);
+
+ if(iq->depth == ie->max_dependency_depth)
+ return;
+ /* is this query the same as the nscheck? */
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
+ query_dname_compare(iq->dp->name, qstate->qinfo.qname)==0 &&
+ (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
+ /* spawn off A, AAAA queries for in-zone glue to check */
+ generate_a_aaaa_check(qstate, iq, id);
+ return;
+ }
+
+ log_nametypeclass(VERB_ALGO, "schedule ns fetch",
+ iq->dp->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);
+ if(!generate_sub_request(iq->dp->name, iq->dp->namelen,
+ LDNS_RR_TYPE_NS, iq->qchase.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ verbose(VERB_ALGO, "could not generate ns check");
+ return;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+
+ /* make copy to avoid use of stub dp by different qs/threads */
+ /* refetch glue to start higher up the tree */
+ subiq->refetch_glue = 1;
+ subiq->dp = delegpt_copy(iq->dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory generating ns check, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return;
+ }
+ }
+}
+
+/**
+ * Generate a DNSKEY prefetch query to get the DNSKEY for the DS record we
+ * just got in a referral (where we have dnssec_expected, thus have trust
+ * anchors above it). Note that right after calling this routine the
+ * iterator detached subqueries (because of following the referral), and thus
+ * the DNSKEY query becomes detached, its return stored in the cache for
+ * later lookup by the validator. This cache lookup by the validator avoids
+ * the roundtrip incurred by the DNSKEY query. The DNSKEY query is now
+ * performed at about the same time the original query is sent to the domain,
+ * thus the two answers are likely to be returned at about the same time,
+ * saving a roundtrip from the validated lookup.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_dnskey_prefetch(struct module_qstate* qstate,
+ struct iter_qstate* iq, int id)
+{
+ struct module_qstate* subq;
+ log_assert(iq->dp);
+
+ /* is this query the same as the prefetch? */
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_DNSKEY &&
+ query_dname_compare(iq->dp->name, qstate->qinfo.qname)==0 &&
+ (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
+ return;
+ }
+
+ /* if the DNSKEY is in the cache this lookup will stop quickly */
+ log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch",
+ iq->dp->name, LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass);
+ if(!generate_sub_request(iq->dp->name, iq->dp->namelen,
+ LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) {
+ /* we'll be slower, but it'll work */
+ verbose(VERB_ALGO, "could not generate dnskey prefetch");
+ return;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+ /* this qstate has the right delegation for the dnskey lookup*/
+ /* make copy to avoid use of stub dp by different qs/threads */
+ subiq->dp = delegpt_copy(iq->dp, subq->region);
+ /* if !subiq->dp, it'll start from the cache, no problem */
+ }
+}
+
+/**
+ * See if the query needs forwarding.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @return true if the request is forwarded, false if not.
+ * If returns true but, iq->dp is NULL then a malloc failure occurred.
+ */
+static int
+forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
+{
+ struct delegpt* dp;
+ uint8_t* delname = iq->qchase.qname;
+ size_t delnamelen = iq->qchase.qname_len;
+ /* strip one label off of DS query to lookup higher for it */
+ if(iq->qchase.qtype == LDNS_RR_TYPE_DS
+ && !dname_is_root(iq->qchase.qname))
+ dname_remove_label(&delname, &delnamelen);
+ dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
+ if(!dp)
+ return 0;
+ /* send recursion desired to forward addr */
+ iq->chase_flags |= BIT_RD;
+ iq->dp = delegpt_copy(dp, qstate->region);
+ /* iq->dp checked by caller */
+ verbose(VERB_ALGO, "forwarding request");
+ return 1;
+}
+
+/**
+ * Process the initial part of the request handling. This state roughly
+ * corresponds to resolver algorithms steps 1 (find answer in cache) and 2
+ * (find the best servers to ask).
+ *
+ * Note that all requests start here, and query restarts revisit this state.
+ *
+ * This state either generates: 1) a response, from cache or error, 2) a
+ * priming event, or 3) forwards the request to the next state (init2,
+ * generally).
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event needs more request processing immediately,
+ * false if not.
+ */
+static int
+processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ uint8_t* delname;
+ size_t delnamelen;
+ struct dns_msg* msg;
+
+ log_query_info(VERB_DETAIL, "resolving", &qstate->qinfo);
+ /* check effort */
+
+ /* We enforce a maximum number of query restarts. This is primarily a
+ * cheap way to prevent CNAME loops. */
+ if(iq->query_restart_count > MAX_RESTART_COUNT) {
+ verbose(VERB_QUERY, "request has exceeded the maximum number"
+ " of query restarts with %d", iq->query_restart_count);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* We enforce a maximum recursion/dependency depth -- in general,
+ * this is unnecessary for dependency loops (although it will
+ * catch those), but it provides a sensible limit to the amount
+ * of work required to answer a given query. */
+ verbose(VERB_ALGO, "request has dependency depth of %d", iq->depth);
+ if(iq->depth > ie->max_dependency_depth) {
+ verbose(VERB_QUERY, "request has exceeded the maximum "
+ "dependency depth with depth of %d", iq->depth);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* If the request is qclass=ANY, setup to generate each class */
+ if(qstate->qinfo.qclass == LDNS_RR_CLASS_ANY) {
+ iq->qchase.qclass = 0;
+ return next_state(iq, COLLECT_CLASS_STATE);
+ }
+
+ /* Resolver Algorithm Step 1 -- Look for the answer in local data. */
+
+ /* This either results in a query restart (CNAME cache response), a
+ * terminating response (ANSWER), or a cache miss (null). */
+
+ if(qstate->blacklist) {
+ /* if cache, or anything else, was blacklisted then
+ * getting older results from cache is a bad idea, no cache */
+ verbose(VERB_ALGO, "cache blacklisted, going to the network");
+ msg = NULL;
+ } else {
+ msg = dns_cache_lookup(qstate->env, iq->qchase.qname,
+ iq->qchase.qname_len, iq->qchase.qtype,
+ iq->qchase.qclass, qstate->region, qstate->env->scratch);
+ if(!msg && qstate->env->neg_cache) {
+ /* lookup in negative cache; may result in
+ * NOERROR/NODATA or NXDOMAIN answers that need validation */
+ msg = val_neg_getmsg(qstate->env->neg_cache, &iq->qchase,
+ qstate->region, qstate->env->rrset_cache,
+ qstate->env->scratch_buffer,
+ *qstate->env->now, 1/*add SOA*/, NULL);
+ }
+ /* item taken from cache does not match our query name, thus
+ * security needs to be re-examined later */
+ if(msg && query_dname_compare(qstate->qinfo.qname,
+ iq->qchase.qname) != 0)
+ msg->rep->security = sec_status_unchecked;
+ }
+ if(msg) {
+ /* handle positive cache response */
+ enum response_type type = response_type_from_cache(msg,
+ &iq->qchase);
+ if(verbosity >= VERB_ALGO) {
+ log_dns_msg("msg from cache lookup", &msg->qinfo,
+ msg->rep);
+ verbose(VERB_ALGO, "msg ttl is %d, prefetch ttl %d",
+ (int)msg->rep->ttl,
+ (int)msg->rep->prefetch_ttl);
+ }
+
+ if(type == RESPONSE_TYPE_CNAME) {
+ uint8_t* sname = 0;
+ size_t slen = 0;
+ verbose(VERB_ALGO, "returning CNAME response from "
+ "cache");
+ if(!handle_cname_response(qstate, iq, msg,
+ &sname, &slen))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = slen;
+ /* This *is* a query restart, even if it is a cheap
+ * one. */
+ iq->dp = NULL;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+
+ /* if from cache, NULL, else insert 'cache IP' len=0 */
+ if(qstate->reply_origin)
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ /* it is an answer, response, to final state */
+ verbose(VERB_ALGO, "returning answer from cache.");
+ iq->response = msg;
+ return final_state(iq);
+ }
+
+ /* attempt to forward the request */
+ if(forward_request(qstate, iq))
+ {
+ if(!iq->dp) {
+ log_err("alloc failure for forward dp");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->refetch_glue = 0;
+ /* the request has been forwarded.
+ * forwarded requests need to be immediately sent to the
+ * next state, QUERYTARGETS. */
+ return next_state(iq, QUERYTARGETS_STATE);
+ }
+
+ /* Resolver Algorithm Step 2 -- find the "best" servers. */
+
+ /* first, adjust for DS queries. To avoid the grandparent problem,
+ * we just look for the closest set of server to the parent of qname.
+ * When re-fetching glue we also need to ask the parent.
+ */
+ if(iq->refetch_glue) {
+ if(!iq->dp) {
+ log_err("internal or malloc fail: no dp for refetch");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ delname = iq->dp->name;
+ delnamelen = iq->dp->namelen;
+ } else {
+ delname = iq->qchase.qname;
+ delnamelen = iq->qchase.qname_len;
+ }
+ if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue ||
+ (iq->qchase.qtype == LDNS_RR_TYPE_NS && qstate->prefetch_leeway)) {
+ /* remove first label from delname, root goes to hints,
+ * but only to fetch glue, not for qtype=DS. */
+ /* also when prefetching an NS record, fetch it again from
+ * its parent, just as if it expired, so that you do not
+ * get stuck on an older nameserver that gives old NSrecords */
+ if(dname_is_root(delname) && (iq->refetch_glue ||
+ (iq->qchase.qtype == LDNS_RR_TYPE_NS &&
+ qstate->prefetch_leeway)))
+ delname = NULL; /* go to root priming */
+ else dname_remove_label(&delname, &delnamelen);
+ iq->refetch_glue = 0; /* if CNAME causes restart, no refetch */
+ }
+ /* delname is the name to lookup a delegation for. If NULL rootprime */
+ while(1) {
+
+ /* Lookup the delegation in the cache. If null, then the
+ * cache needs to be primed for the qclass. */
+ if(delname)
+ iq->dp = dns_cache_find_delegation(qstate->env, delname,
+ delnamelen, iq->qchase.qtype, iq->qchase.qclass,
+ qstate->region, &iq->deleg_msg, *qstate->env->now);
+ else iq->dp = NULL;
+
+ /* If the cache has returned nothing, then we have a
+ * root priming situation. */
+ if(iq->dp == NULL) {
+ /* if there is a stub, then no root prime needed */
+ int r = prime_stub(qstate, iq, ie, id, &iq->qchase);
+ if(r == 2)
+ break; /* got noprime-stub-zone, continue */
+ else if(r)
+ return 0; /* stub prime request made */
+ if(forwards_lookup_root(qstate->env->fwds,
+ iq->qchase.qclass)) {
+ /* forward zone root, no root prime needed */
+ /* fill in some dp - safety belt */
+ iq->dp = hints_lookup_root(ie->hints,
+ iq->qchase.qclass);
+ if(!iq->dp) {
+ log_err("internal error: no hints dp");
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ iq->dp = delegpt_copy(iq->dp, qstate->region);
+ if(!iq->dp) {
+ log_err("out of memory in safety belt");
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ return next_state(iq, INIT_REQUEST_2_STATE);
+ }
+ /* Note that the result of this will set a new
+ * DelegationPoint based on the result of priming. */
+ if(!prime_root(qstate, iq, ie, id, iq->qchase.qclass))
+ return error_response(qstate, id,
+ LDNS_RCODE_REFUSED);
+
+ /* priming creates and sends a subordinate query, with
+ * this query as the parent. So further processing for
+ * this event will stop until reactivated by the
+ * results of priming. */
+ return 0;
+ }
+
+ /* see if this dp not useless.
+ * It is useless if:
+ * o all NS items are required glue.
+ * or the query is for NS item that is required glue.
+ * o no addresses are provided.
+ * o RD qflag is on.
+ * Instead, go up one level, and try to get even further
+ * If the root was useless, use safety belt information.
+ * Only check cache returns, because replies for servers
+ * could be useless but lead to loops (bumping into the
+ * same server reply) if useless-checked.
+ */
+ if(iter_dp_is_useless(&qstate->qinfo, qstate->query_flags,
+ iq->dp)) {
+ if(dname_is_root(iq->dp->name)) {
+ /* use safety belt */
+ verbose(VERB_QUERY, "Cache has root NS but "
+ "no addresses. Fallback to the safety belt.");
+ iq->dp = hints_lookup_root(ie->hints,
+ iq->qchase.qclass);
+ /* note deleg_msg is from previous lookup,
+ * but RD is on, so it is not used */
+ if(!iq->dp) {
+ log_err("internal error: no hints dp");
+ return error_response(qstate, id,
+ LDNS_RCODE_REFUSED);
+ }
+ iq->dp = delegpt_copy(iq->dp, qstate->region);
+ if(!iq->dp) {
+ log_err("out of memory in safety belt");
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ break;
+ } else {
+ verbose(VERB_ALGO,
+ "cache delegation was useless:");
+ delegpt_log(VERB_ALGO, iq->dp);
+ /* go up */
+ delname = iq->dp->name;
+ delnamelen = iq->dp->namelen;
+ dname_remove_label(&delname, &delnamelen);
+ }
+ } else break;
+ }
+
+ verbose(VERB_ALGO, "cache delegation returns delegpt");
+ delegpt_log(VERB_ALGO, iq->dp);
+
+ /* Otherwise, set the current delegation point and move on to the
+ * next state. */
+ return next_state(iq, INIT_REQUEST_2_STATE);
+}
+
+/**
+ * Process the second part of the initial request handling. This state
+ * basically exists so that queries that generate root priming events have
+ * the same init processing as ones that do not. Request events that reach
+ * this state must have a valid currentDelegationPoint set.
+ *
+ * This part is primarly handling stub zone priming. Events that reach this
+ * state must have a current delegation point.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event needs more request processing immediately,
+ * false if not.
+ */
+static int
+processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ log_query_info(VERB_QUERY, "resolving (init part 2): ",
+ &qstate->qinfo);
+
+ /* Check to see if we need to prime a stub zone. */
+ if(prime_stub(qstate, iq, ie, id, &iq->qchase)) {
+ /* A priming sub request was made */
+ return 0;
+ }
+
+ /* most events just get forwarded to the next state. */
+ return next_state(iq, INIT_REQUEST_3_STATE);
+}
+
+/**
+ * Process the third part of the initial request handling. This state exists
+ * as a separate state so that queries that generate stub priming events
+ * will get the tail end of the init process but not repeat the stub priming
+ * check.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @return true, advancing the event to the QUERYTARGETS_STATE.
+ */
+static int
+processInitRequest3(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id)
+{
+ log_query_info(VERB_QUERY, "resolving (init part 3): ",
+ &qstate->qinfo);
+ /* if the cache reply dp equals a validation anchor or msg has DS,
+ * then DNSSEC RRSIGs are expected in the reply */
+ iq->dnssec_expected = iter_indicates_dnssec(qstate->env, iq->dp,
+ iq->deleg_msg, iq->qchase.qclass);
+
+ /* If the RD flag wasn't set, then we just finish with the
+ * cached referral as the response. */
+ if(!(qstate->query_flags & BIT_RD)) {
+ iq->response = iq->deleg_msg;
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg("no RD requested, using delegation msg",
+ &iq->response->qinfo, iq->response->rep);
+ if(qstate->reply_origin)
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ return final_state(iq);
+ }
+ /* After this point, unset the RD flag -- this query is going to
+ * be sent to an auth. server. */
+ iq->chase_flags &= ~BIT_RD;
+
+ /* if dnssec expected, fetch key for the trust-anchor or cached-DS */
+ if(iq->dnssec_expected && qstate->env->cfg->prefetch_key &&
+ !(qstate->query_flags&BIT_CD)) {
+ generate_dnskey_prefetch(qstate, iq, id);
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ }
+
+ /* Jump to the next state. */
+ return next_state(iq, QUERYTARGETS_STATE);
+}
+
+/**
+ * Given a basic query, generate a parent-side "target" query.
+ * These are subordinate queries for missing delegation point target addresses,
+ * for which only the parent of the delegation provides correct IP addresses.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @param name: target qname.
+ * @param namelen: target qname length.
+ * @param qtype: target qtype (either A or AAAA).
+ * @param qclass: target qclass.
+ * @return true on success, false on failure.
+ */
+static int
+generate_parentside_target_query(struct module_qstate* qstate,
+ struct iter_qstate* iq, int id, uint8_t* name, size_t namelen,
+ uint16_t qtype, uint16_t qclass)
+{
+ struct module_qstate* subq;
+ if(!generate_sub_request(name, namelen, qtype, qclass, qstate,
+ id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0))
+ return 0;
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+ /* blacklist the cache - we want to fetch parent stuff */
+ sock_list_insert(&subq->blacklist, NULL, 0, subq->region);
+ subiq->query_for_pside_glue = 1;
+ if(dname_subdomain_c(name, iq->dp->name)) {
+ subiq->dp = delegpt_copy(iq->dp, subq->region);
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL,
+ subq->qinfo.qclass);
+ subiq->refetch_glue = 1;
+ } else {
+ subiq->dp = dns_cache_find_delegation(qstate->env,
+ name, namelen, qtype, qclass, subq->region,
+ &subiq->deleg_msg, *qstate->env->now);
+ /* if no dp, then it's from root, refetch unneeded */
+ if(subiq->dp) {
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL,
+ subq->qinfo.qclass);
+ subiq->refetch_glue = 1;
+ }
+ }
+ }
+ log_nametypeclass(VERB_QUERY, "new pside target", name, qtype, qclass);
+ return 1;
+}
+
+/**
+ * Given a basic query, generate a "target" query. These are subordinate
+ * queries for missing delegation point target addresses.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @param name: target qname.
+ * @param namelen: target qname length.
+ * @param qtype: target qtype (either A or AAAA).
+ * @param qclass: target qclass.
+ * @return true on success, false on failure.
+ */
+static int
+generate_target_query(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id, uint8_t* name, size_t namelen, uint16_t qtype, uint16_t qclass)
+{
+ struct module_qstate* subq;
+ if(!generate_sub_request(name, namelen, qtype, qclass, qstate,
+ id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0))
+ return 0;
+ log_nametypeclass(VERB_QUERY, "new target", name, qtype, qclass);
+ return 1;
+}
+
+/**
+ * Given an event at a certain state, generate zero or more target queries
+ * for it's current delegation point.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @param maxtargets: The maximum number of targets to query for.
+ * if it is negative, there is no maximum number of targets.
+ * @param num: returns the number of queries generated and processed,
+ * which may be zero if there were no missing targets.
+ * @return false on error.
+ */
+static int
+query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id, int maxtargets, int* num)
+{
+ int query_count = 0;
+ struct delegpt_ns* ns;
+ int missing;
+ int toget = 0;
+
+ if(iq->depth == ie->max_dependency_depth)
+ return 0;
+
+ iter_mark_cycle_targets(qstate, iq->dp);
+ missing = (int)delegpt_count_missing_targets(iq->dp);
+ log_assert(maxtargets != 0); /* that would not be useful */
+
+ /* Generate target requests. Basically, any missing targets
+ * are queried for here, regardless if it is necessary to do
+ * so to continue processing. */
+ if(maxtargets < 0 || maxtargets > missing)
+ toget = missing;
+ else toget = maxtargets;
+ if(toget == 0) {
+ *num = 0;
+ return 1;
+ }
+ /* select 'toget' items from the total of 'missing' items */
+ log_assert(toget <= missing);
+
+ /* loop over missing targets */
+ for(ns = iq->dp->nslist; ns; ns = ns->next) {
+ if(ns->resolved)
+ continue;
+
+ /* randomly select this item with probability toget/missing */
+ if(!iter_ns_probability(qstate->env->rnd, toget, missing)) {
+ /* do not select this one, next; select toget number
+ * of items from a list one less in size */
+ missing --;
+ continue;
+ }
+
+ if(ie->supports_ipv6 && !ns->got6) {
+ /* Send the AAAA request. */
+ if(!generate_target_query(qstate, iq, id,
+ ns->name, ns->namelen,
+ LDNS_RR_TYPE_AAAA, iq->qchase.qclass)) {
+ *num = query_count;
+ if(query_count > 0)
+ qstate->ext_state[id] = module_wait_subquery;
+ return 0;
+ }
+ query_count++;
+ }
+ /* Send the A request. */
+ if(ie->supports_ipv4 && !ns->got4) {
+ if(!generate_target_query(qstate, iq, id,
+ ns->name, ns->namelen,
+ LDNS_RR_TYPE_A, iq->qchase.qclass)) {
+ *num = query_count;
+ if(query_count > 0)
+ qstate->ext_state[id] = module_wait_subquery;
+ return 0;
+ }
+ query_count++;
+ }
+
+ /* mark this target as in progress. */
+ ns->resolved = 1;
+ missing--;
+ toget--;
+ if(toget == 0)
+ break;
+ }
+ *num = query_count;
+ if(query_count > 0)
+ qstate->ext_state[id] = module_wait_subquery;
+
+ return 1;
+}
+
+/**
+ * Called by processQueryTargets when it would like extra targets to query
+ * but it seems to be out of options. At last resort some less appealing
+ * options are explored. If there are no more options, the result is SERVFAIL
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event requires more request processing immediately,
+ * false if not.
+ */
+static int
+processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ struct delegpt_ns* ns;
+ int query_count = 0;
+ verbose(VERB_ALGO, "No more query targets, attempting last resort");
+ log_assert(iq->dp);
+
+ if(!iq->dp->has_parent_side_NS) {
+ if(!iter_lookup_parent_NS_from_cache(qstate->env, iq->dp,
+ qstate->region, &qstate->qinfo)
+ || !iq->dp->has_parent_side_NS) {
+ /* if: malloc failure in lookup go up to try */
+ /* if: no parent NS in cache - go up one level */
+ verbose(VERB_ALGO, "try to grab parent NS");
+ iq->store_parent_NS = iq->dp;
+ iq->deleg_msg = NULL;
+ iq->refetch_glue = 1;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+ }
+ /* see if that makes new names available */
+ if(!cache_fill_missing(qstate->env, iq->qchase.qclass,
+ qstate->region, iq->dp))
+ log_err("out of memory in cache_fill_missing");
+ if(iq->dp->usable_list) {
+ verbose(VERB_ALGO, "try parent-side-name, w. glue from cache");
+ return next_state(iq, QUERYTARGETS_STATE);
+ }
+ /* try to fill out parent glue from cache */
+ if(iter_lookup_parent_glue_from_cache(qstate->env, iq->dp,
+ qstate->region, &qstate->qinfo)) {
+ /* got parent stuff from cache, see if we can continue */
+ verbose(VERB_ALGO, "try parent-side glue from cache");
+ return next_state(iq, QUERYTARGETS_STATE);
+ }
+ /* query for an extra name added by the parent-NS record */
+ if(delegpt_count_missing_targets(iq->dp) > 0) {
+ int qs = 0;
+ verbose(VERB_ALGO, "try parent-side target name");
+ if(!query_for_targets(qstate, iq, ie, id, 1, &qs)) {
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->num_target_queries += qs;
+ if(qs != 0) {
+ qstate->ext_state[id] = module_wait_subquery;
+ return 0; /* and wait for them */
+ }
+ }
+ if(iq->depth == ie->max_dependency_depth) {
+ verbose(VERB_QUERY, "maxdepth and need more nameservers, fail");
+ return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ /* mark cycle targets for parent-side lookups */
+ iter_mark_pside_cycle_targets(qstate, iq->dp);
+ /* see if we can issue queries to get nameserver addresses */
+ /* this lookup is not randomized, but sequential. */
+ for(ns = iq->dp->nslist; ns; ns = ns->next) {
+ /* query for parent-side A and AAAA for nameservers */
+ if(ie->supports_ipv6 && !ns->done_pside6) {
+ /* Send the AAAA request. */
+ if(!generate_parentside_target_query(qstate, iq, id,
+ ns->name, ns->namelen,
+ LDNS_RR_TYPE_AAAA, iq->qchase.qclass))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ ns->done_pside6 = 1;
+ query_count++;
+ }
+ if(ie->supports_ipv4 && !ns->done_pside4) {
+ /* Send the A request. */
+ if(!generate_parentside_target_query(qstate, iq, id,
+ ns->name, ns->namelen,
+ LDNS_RR_TYPE_A, iq->qchase.qclass))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ ns->done_pside4 = 1;
+ query_count++;
+ }
+ if(query_count != 0) { /* suspend to await results */
+ verbose(VERB_ALGO, "try parent-side glue lookup");
+ iq->num_target_queries += query_count;
+ qstate->ext_state[id] = module_wait_subquery;
+ return 0;
+ }
+ }
+
+ /* if this was a parent-side glue query itself, then store that
+ * failure in cache. */
+ if(iq->query_for_pside_glue && !iq->pside_glue)
+ iter_store_parentside_neg(qstate->env, &qstate->qinfo,
+ iq->deleg_msg?iq->deleg_msg->rep:
+ (iq->response?iq->response->rep:NULL));
+
+ verbose(VERB_QUERY, "out of query targets -- returning SERVFAIL");
+ /* fail -- no more targets, no more hope of targets, no hope
+ * of a response. */
+ return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
+}
+
+/**
+ * This is the request event state where the request will be sent to one of
+ * its current query targets. This state also handles issuing target lookup
+ * queries for missing target IP addresses. Queries typically iterate on
+ * this state, both when they are just trying different targets for a given
+ * delegation point, and when they change delegation points. This state
+ * roughly corresponds to RFC 1034 algorithm steps 3 and 4.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event requires more request processing immediately,
+ * false if not. This state only returns true when it is generating
+ * a SERVFAIL response because the query has hit a dead end.
+ */
+static int
+processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ int tf_policy;
+ struct delegpt_addr* target;
+ struct outbound_entry* outq;
+
+ /* NOTE: a request will encounter this state for each target it
+ * needs to send a query to. That is, at least one per referral,
+ * more if some targets timeout or return throwaway answers. */
+
+ log_query_info(VERB_QUERY, "processQueryTargets:", &qstate->qinfo);
+ verbose(VERB_ALGO, "processQueryTargets: targetqueries %d, "
+ "currentqueries %d sentcount %d", iq->num_target_queries,
+ iq->num_current_queries, iq->sent_count);
+
+ /* Make sure that we haven't run away */
+ /* FIXME: is this check even necessary? */
+ if(iq->referral_count > MAX_REFERRAL_COUNT) {
+ verbose(VERB_QUERY, "request has exceeded the maximum "
+ "number of referrrals with %d", iq->referral_count);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ if(iq->sent_count > MAX_SENT_COUNT) {
+ verbose(VERB_QUERY, "request has exceeded the maximum "
+ "number of sends with %d", iq->sent_count);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* Make sure we have a delegation point, otherwise priming failed
+ * or another failure occurred */
+ if(!iq->dp) {
+ verbose(VERB_QUERY, "Failed to get a delegation, giving up");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ if(!ie->supports_ipv6)
+ delegpt_no_ipv6(iq->dp);
+ if(!ie->supports_ipv4)
+ delegpt_no_ipv4(iq->dp);
+ delegpt_log(VERB_ALGO, iq->dp);
+
+ if(iq->num_current_queries>0) {
+ /* already busy answering a query, this restart is because
+ * more delegpt addrs became available, wait for existing
+ * query. */
+ verbose(VERB_ALGO, "woke up, but wait for outstanding query");
+ qstate->ext_state[id] = module_wait_reply;
+ return 0;
+ }
+
+ tf_policy = 0;
+ /* < not <=, because although the array is large enough for <=, the
+ * generated query will immediately be discarded due to depth and
+ * that servfail is cached, which is not good as opportunism goes. */
+ if(iq->depth < ie->max_dependency_depth
+ && iq->sent_count < TARGET_FETCH_STOP) {
+ tf_policy = ie->target_fetch_policy[iq->depth];
+ }
+
+ /* if in 0x20 fallback get as many targets as possible */
+ if(iq->caps_fallback) {
+ int extra = 0;
+ size_t naddr, nres, navail;
+ if(!query_for_targets(qstate, iq, ie, id, -1, &extra)) {
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->num_target_queries += extra;
+ if(iq->num_target_queries > 0) {
+ /* wait to get all targets, we want to try em */
+ verbose(VERB_ALGO, "wait for all targets for fallback");
+ qstate->ext_state[id] = module_wait_reply;
+ return 0;
+ }
+ /* did we do enough fallback queries already? */
+ delegpt_count_addr(iq->dp, &naddr, &nres, &navail);
+ /* the current caps_server is the number of fallbacks sent.
+ * the original query is one that matched too, so we have
+ * caps_server+1 number of matching queries now */
+ if(iq->caps_server+1 >= naddr*3 ||
+ iq->caps_server+1 >= MAX_SENT_COUNT) {
+ /* we're done, process the response */
+ verbose(VERB_ALGO, "0x20 fallback had %d responses "
+ "match for %d wanted, done.",
+ (int)iq->caps_server+1, (int)naddr*3);
+ iq->caps_fallback = 0;
+ iter_dec_attempts(iq->dp, 3); /* space for fallback */
+ iq->num_current_queries++; /* RespState decrements it*/
+ iq->referral_count++; /* make sure we don't loop */
+ iq->sent_count = 0;
+ iq->state = QUERY_RESP_STATE;
+ return 1;
+ }
+ verbose(VERB_ALGO, "0x20 fallback number %d",
+ (int)iq->caps_server);
+
+ /* if there is a policy to fetch missing targets
+ * opportunistically, do it. we rely on the fact that once a
+ * query (or queries) for a missing name have been issued,
+ * they will not show up again. */
+ } else if(tf_policy != 0) {
+ int extra = 0;
+ verbose(VERB_ALGO, "attempt to get extra %d targets",
+ tf_policy);
+ (void)query_for_targets(qstate, iq, ie, id, tf_policy, &extra);
+ /* errors ignored, these targets are not strictly necessary for
+ * this result, we do not have to reply with SERVFAIL */
+ iq->num_target_queries += extra;
+ }
+
+ /* Add the current set of unused targets to our queue. */
+ delegpt_add_unused_targets(iq->dp);
+
+ /* Select the next usable target, filtering out unsuitable targets. */
+ target = iter_server_selection(ie, qstate->env, iq->dp,
+ iq->dp->name, iq->dp->namelen, iq->qchase.qtype,
+ &iq->dnssec_lame_query, &iq->chase_to_rd,
+ iq->num_target_queries, qstate->blacklist);
+
+ /* If no usable target was selected... */
+ if(!target) {
+ /* Here we distinguish between three states: generate a new
+ * target query, just wait, or quit (with a SERVFAIL).
+ * We have the following information: number of active
+ * target queries, number of active current queries,
+ * the presence of missing targets at this delegation
+ * point, and the given query target policy. */
+
+ /* Check for the wait condition. If this is true, then
+ * an action must be taken. */
+ if(iq->num_target_queries==0 && iq->num_current_queries==0) {
+ /* If there is nothing to wait for, then we need
+ * to distinguish between generating (a) new target
+ * query, or failing. */
+ if(delegpt_count_missing_targets(iq->dp) > 0) {
+ int qs = 0;
+ verbose(VERB_ALGO, "querying for next "
+ "missing target");
+ if(!query_for_targets(qstate, iq, ie, id,
+ 1, &qs)) {
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ if(qs == 0 &&
+ delegpt_count_missing_targets(iq->dp) == 0){
+ /* it looked like there were missing
+ * targets, but they did not turn up.
+ * Try the bad choices again (if any),
+ * when we get back here missing==0,
+ * so this is not a loop. */
+ return 1;
+ }
+ iq->num_target_queries += qs;
+ }
+ /* Since a target query might have been made, we
+ * need to check again. */
+ if(iq->num_target_queries == 0) {
+ return processLastResort(qstate, iq, ie, id);
+ }
+ }
+
+ /* otherwise, we have no current targets, so submerge
+ * until one of the target or direct queries return. */
+ if(iq->num_target_queries>0 && iq->num_current_queries>0) {
+ verbose(VERB_ALGO, "no current targets -- waiting "
+ "for %d targets to resolve or %d outstanding"
+ " queries to respond", iq->num_target_queries,
+ iq->num_current_queries);
+ qstate->ext_state[id] = module_wait_reply;
+ } else if(iq->num_target_queries>0) {
+ verbose(VERB_ALGO, "no current targets -- waiting "
+ "for %d targets to resolve.",
+ iq->num_target_queries);
+ qstate->ext_state[id] = module_wait_subquery;
+ } else {
+ verbose(VERB_ALGO, "no current targets -- waiting "
+ "for %d outstanding queries to respond.",
+ iq->num_current_queries);
+ qstate->ext_state[id] = module_wait_reply;
+ }
+ return 0;
+ }
+
+ /* We have a valid target. */
+ if(verbosity >= VERB_QUERY) {
+ log_query_info(VERB_QUERY, "sending query:", &iq->qchase);
+ log_name_addr(VERB_QUERY, "sending to target:", iq->dp->name,
+ &target->addr, target->addrlen);
+ verbose(VERB_ALGO, "dnssec status: %s%s",
+ iq->dnssec_expected?"expected": "not expected",
+ iq->dnssec_lame_query?" but lame_query anyway": "");
+ }
+ fptr_ok(fptr_whitelist_modenv_send_query(qstate->env->send_query));
+ outq = (*qstate->env->send_query)(
+ iq->qchase.qname, iq->qchase.qname_len,
+ iq->qchase.qtype, iq->qchase.qclass,
+ iq->chase_flags | (iq->chase_to_rd?BIT_RD:0), EDNS_DO|BIT_CD,
+ iq->dnssec_expected, &target->addr, target->addrlen,
+ iq->dp->name, iq->dp->namelen, qstate);
+ if(!outq) {
+ log_addr(VERB_DETAIL, "error sending query to auth server",
+ &target->addr, target->addrlen);
+ return next_state(iq, QUERYTARGETS_STATE);
+ }
+ outbound_list_insert(&iq->outlist, outq);
+ iq->num_current_queries++;
+ iq->sent_count++;
+ qstate->ext_state[id] = module_wait_reply;
+
+ return 0;
+}
+
+/** find NS rrset in given list */
+static struct ub_packed_rrset_key*
+find_NS(struct reply_info* rep, size_t from, size_t to)
+{
+ size_t i;
+ for(i=from; i<to; i++) {
+ if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NS)
+ return rep->rrsets[i];
+ }
+ return NULL;
+}
+
+
+/**
+ * Process the query response. All queries end up at this state first. This
+ * process generally consists of analyzing the response and routing the
+ * event to the next state (either bouncing it back to a request state, or
+ * terminating the processing for this event).
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @return true if the event requires more immediate processing, false if
+ * not. This is generally only true when forwarding the request to
+ * the final state (i.e., on answer).
+ */
+static int
+processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id)
+{
+ int dnsseclame = 0;
+ enum response_type type;
+ iq->num_current_queries--;
+ if(iq->response == NULL) {
+ iq->chase_to_rd = 0;
+ iq->dnssec_lame_query = 0;
+ verbose(VERB_ALGO, "query response was timeout");
+ return next_state(iq, QUERYTARGETS_STATE);
+ }
+ type = response_type_from_server(
+ (int)((iq->chase_flags&BIT_RD) || iq->chase_to_rd),
+ iq->response, &iq->qchase, iq->dp);
+ iq->chase_to_rd = 0;
+ if(type == RESPONSE_TYPE_REFERRAL && (iq->chase_flags&BIT_RD)) {
+ /* When forwarding (RD bit is set), we handle referrals
+ * differently. No queries should be sent elsewhere */
+ type = RESPONSE_TYPE_ANSWER;
+ }
+ if(iq->dnssec_expected && !iq->dnssec_lame_query &&
+ !(iq->chase_flags&BIT_RD)
+ && type != RESPONSE_TYPE_LAME
+ && type != RESPONSE_TYPE_REC_LAME
+ && type != RESPONSE_TYPE_THROWAWAY
+ && type != RESPONSE_TYPE_UNTYPED) {
+ /* a possible answer, see if it is missing DNSSEC */
+ /* but not when forwarding, so we dont mark fwder lame */
+ /* also make sure the answer is from the zone we expected,
+ * otherwise, (due to parent,child on same server), we
+ * might mark the server,zone lame inappropriately */
+ if(!iter_msg_has_dnssec(iq->response) &&
+ iter_msg_from_zone(iq->response, iq->dp, type,
+ iq->qchase.qclass)) {
+ type = RESPONSE_TYPE_LAME;
+ dnsseclame = 1;
+ }
+ } else iq->dnssec_lame_query = 0;
+ /* see if referral brings us close to the target */
+ if(type == RESPONSE_TYPE_REFERRAL) {
+ struct ub_packed_rrset_key* ns = find_NS(
+ iq->response->rep, iq->response->rep->an_numrrsets,
+ iq->response->rep->an_numrrsets
+ + iq->response->rep->ns_numrrsets);
+ if(!ns) ns = find_NS(iq->response->rep, 0,
+ iq->response->rep->an_numrrsets);
+ if(!ns || !dname_strict_subdomain_c(ns->rk.dname, iq->dp->name)
+ || !dname_subdomain_c(iq->qchase.qname, ns->rk.dname)){
+ verbose(VERB_ALGO, "bad referral, throwaway");
+ type = RESPONSE_TYPE_THROWAWAY;
+ } else
+ iter_scrub_ds(iq->response, ns, iq->dp->name);
+ } else iter_scrub_ds(iq->response, NULL, NULL);
+
+ /* handle each of the type cases */
+ if(type == RESPONSE_TYPE_ANSWER) {
+ /* ANSWER type responses terminate the query algorithm,
+ * so they sent on their */
+ if(verbosity >= VERB_DETAIL) {
+ verbose(VERB_DETAIL, "query response was %s",
+ FLAGS_GET_RCODE(iq->response->rep->flags)
+ ==LDNS_RCODE_NXDOMAIN?"NXDOMAIN ANSWER":
+ (iq->response->rep->an_numrrsets?"ANSWER":
+ "nodata ANSWER"));
+ }
+ if(!iter_dns_store(qstate->env, &iq->response->qinfo,
+ iq->response->rep, 0, qstate->prefetch_leeway,
+ qstate->region))
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ /* close down outstanding requests to be discarded */
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ iq->num_target_queries = 0;
+ if(qstate->reply)
+ sock_list_insert(&qstate->reply_origin,
+ &qstate->reply->addr, qstate->reply->addrlen,
+ qstate->region);
+ return final_state(iq);
+ } else if(type == RESPONSE_TYPE_REFERRAL) {
+ /* REFERRAL type responses get a reset of the
+ * delegation point, and back to the QUERYTARGETS_STATE. */
+ verbose(VERB_DETAIL, "query response was REFERRAL");
+
+ /* if hardened, only store referral if we asked for it */
+ if(!qstate->env->cfg->harden_referral_path ||
+ ( qstate->qinfo.qtype == LDNS_RR_TYPE_NS
+ && (qstate->query_flags&BIT_RD)
+ && !(qstate->query_flags&BIT_CD)
+ /* we know that all other NS rrsets are scrubbed
+ * away, thus on referral only one is left.
+ * see if that equals the query name... */
+ && ( /* auth section, but sometimes in answer section*/
+ reply_find_rrset_section_ns(iq->response->rep,
+ iq->qchase.qname, iq->qchase.qname_len,
+ LDNS_RR_TYPE_NS, iq->qchase.qclass)
+ || reply_find_rrset_section_an(iq->response->rep,
+ iq->qchase.qname, iq->qchase.qname_len,
+ LDNS_RR_TYPE_NS, iq->qchase.qclass)
+ )
+ )) {
+ /* Store the referral under the current query */
+ /* no prefetch-leeway, since its not the answer */
+ if(!iter_dns_store(qstate->env, &iq->response->qinfo,
+ iq->response->rep, 1, 0, NULL))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ if(iq->store_parent_NS)
+ iter_store_parentside_NS(qstate->env,
+ iq->response->rep);
+ if(qstate->env->neg_cache)
+ val_neg_addreferral(qstate->env->neg_cache,
+ iq->response->rep, iq->dp->name);
+ }
+ /* store parent-side-in-zone-glue, if directly queried for */
+ if(iq->query_for_pside_glue && !iq->pside_glue) {
+ iq->pside_glue = reply_find_rrset(iq->response->rep,
+ iq->qchase.qname, iq->qchase.qname_len,
+ iq->qchase.qtype, iq->qchase.qclass);
+ if(iq->pside_glue) {
+ log_rrset_key(VERB_ALGO, "found parent-side "
+ "glue", iq->pside_glue);
+ iter_store_parentside_rrset(qstate->env,
+ iq->pside_glue);
+ }
+ }
+
+ /* Reset the event state, setting the current delegation
+ * point to the referral. */
+ iq->deleg_msg = iq->response;
+ iq->dp = delegpt_from_message(iq->response, qstate->region);
+ if(!iq->dp)
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ if(!cache_fill_missing(qstate->env, iq->qchase.qclass,
+ qstate->region, iq->dp))
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ if(iq->store_parent_NS && query_dname_compare(iq->dp->name,
+ iq->store_parent_NS->name) == 0)
+ iter_merge_retry_counts(iq->dp, iq->store_parent_NS);
+ delegpt_log(VERB_ALGO, iq->dp);
+ /* Count this as a referral. */
+ iq->referral_count++;
+ iq->sent_count = 0;
+ /* see if the next dp is a trust anchor, or a DS was sent
+ * along, indicating dnssec is expected for next zone */
+ iq->dnssec_expected = iter_indicates_dnssec(qstate->env,
+ iq->dp, iq->response, iq->qchase.qclass);
+ /* if dnssec, validating then also fetch the key for the DS */
+ if(iq->dnssec_expected && qstate->env->cfg->prefetch_key &&
+ !(qstate->query_flags&BIT_CD))
+ generate_dnskey_prefetch(qstate, iq, id);
+
+ /* spawn off NS and addr to auth servers for the NS we just
+ * got in the referral. This gets authoritative answer
+ * (answer section trust level) rrset.
+ * right after, we detach the subs, answer goes to cache. */
+ if(qstate->env->cfg->harden_referral_path)
+ generate_ns_check(qstate, iq, id);
+
+ /* stop current outstanding queries.
+ * FIXME: should the outstanding queries be waited for and
+ * handled? Say by a subquery that inherits the outbound_entry.
+ */
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ iq->num_target_queries = 0;
+ verbose(VERB_ALGO, "cleared outbound list for next round");
+ return next_state(iq, QUERYTARGETS_STATE);
+ } else if(type == RESPONSE_TYPE_CNAME) {
+ uint8_t* sname = NULL;
+ size_t snamelen = 0;
+ /* CNAME type responses get a query restart (i.e., get a
+ * reset of the query state and go back to INIT_REQUEST_STATE).
+ */
+ verbose(VERB_DETAIL, "query response was CNAME");
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg("cname msg", &iq->response->qinfo,
+ iq->response->rep);
+ /* Process the CNAME response. */
+ if(!handle_cname_response(qstate, iq, iq->response,
+ &sname, &snamelen))
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ /* cache the CNAME response under the current query */
+ /* NOTE : set referral=1, so that rrsets get stored but not
+ * the partial query answer (CNAME only). */
+ /* prefetchleeway applied because this updates answer parts */
+ if(!iter_dns_store(qstate->env, &iq->response->qinfo,
+ iq->response->rep, 1, qstate->prefetch_leeway, NULL))
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ /* set the current request's qname to the new value. */
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = snamelen;
+ /* Clear the query state, since this is a query restart. */
+ iq->deleg_msg = NULL;
+ iq->dp = NULL;
+ /* Note the query restart. */
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+
+ /* stop current outstanding queries.
+ * FIXME: should the outstanding queries be waited for and
+ * handled? Say by a subquery that inherits the outbound_entry.
+ */
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ iq->num_target_queries = 0;
+ if(qstate->reply)
+ sock_list_insert(&qstate->reply_origin,
+ &qstate->reply->addr, qstate->reply->addrlen,
+ qstate->region);
+ verbose(VERB_ALGO, "cleared outbound list for query restart");
+ /* go to INIT_REQUEST_STATE for new qname. */
+ return next_state(iq, INIT_REQUEST_STATE);
+ } else if(type == RESPONSE_TYPE_LAME) {
+ /* Cache the LAMEness. */
+ verbose(VERB_DETAIL, "query response was %sLAME",
+ dnsseclame?"DNSSEC ":"");
+ if(!dname_subdomain_c(iq->qchase.qname, iq->dp->name)) {
+ log_err("mark lame: mismatch in qname and dpname");
+ /* throwaway this reply below */
+ } else if(qstate->reply) {
+ /* need addr for lameness cache, but we may have
+ * gotten this from cache, so test to be sure */
+ if(!infra_set_lame(qstate->env->infra_cache,
+ &qstate->reply->addr, qstate->reply->addrlen,
+ iq->dp->name, iq->dp->namelen,
+ *qstate->env->now, dnsseclame, 0,
+ iq->qchase.qtype))
+ log_err("mark host lame: out of memory");
+ } else log_err("%slame response from cache",
+ dnsseclame?"DNSSEC ":"");
+ } else if(type == RESPONSE_TYPE_REC_LAME) {
+ /* Cache the LAMEness. */
+ verbose(VERB_DETAIL, "query response REC_LAME: "
+ "recursive but not authoritative server");
+ if(!dname_subdomain_c(iq->qchase.qname, iq->dp->name)) {
+ log_err("mark rec_lame: mismatch in qname and dpname");
+ /* throwaway this reply below */
+ } else if(qstate->reply) {
+ /* need addr for lameness cache, but we may have
+ * gotten this from cache, so test to be sure */
+ verbose(VERB_DETAIL, "mark as REC_LAME");
+ if(!infra_set_lame(qstate->env->infra_cache,
+ &qstate->reply->addr, qstate->reply->addrlen,
+ iq->dp->name, iq->dp->namelen,
+ *qstate->env->now, 0, 1, iq->qchase.qtype))
+ log_err("mark host lame: out of memory");
+ }
+ } else if(type == RESPONSE_TYPE_THROWAWAY) {
+ /* LAME and THROWAWAY responses are handled the same way.
+ * In this case, the event is just sent directly back to
+ * the QUERYTARGETS_STATE without resetting anything,
+ * because, clearly, the next target must be tried. */
+ verbose(VERB_DETAIL, "query response was THROWAWAY");
+ } else {
+ log_warn("A query response came back with an unknown type: %d",
+ (int)type);
+ }
+
+ /* LAME, THROWAWAY and "unknown" all end up here.
+ * Recycle to the QUERYTARGETS state to hopefully try a
+ * different target. */
+ return next_state(iq, QUERYTARGETS_STATE);
+}
+
+/**
+ * Return priming query results to interestes super querystates.
+ *
+ * Sets the delegation point and delegation message (not nonRD queries).
+ * This is a callback from walk_supers.
+ *
+ * @param qstate: priming query state that finished.
+ * @param id: module id.
+ * @param forq: the qstate for which priming has been done.
+ */
+static void
+prime_supers(struct module_qstate* qstate, int id, struct module_qstate* forq)
+{
+ struct iter_qstate* foriq = (struct iter_qstate*)forq->minfo[id];
+ struct delegpt* dp = NULL;
+
+ log_assert(qstate->is_priming || foriq->wait_priming_stub);
+ log_assert(qstate->return_rcode == LDNS_RCODE_NOERROR);
+ /* Convert our response to a delegation point */
+ dp = delegpt_from_message(qstate->return_msg, forq->region);
+ if(!dp) {
+ /* if there is no convertable delegation point, then
+ * the ANSWER type was (presumably) a negative answer. */
+ verbose(VERB_ALGO, "prime response was not a positive "
+ "ANSWER; failing");
+ foriq->dp = NULL;
+ foriq->state = QUERYTARGETS_STATE;
+ return;
+ }
+
+ log_query_info(VERB_DETAIL, "priming successful for", &qstate->qinfo);
+ delegpt_log(VERB_ALGO, dp);
+ foriq->dp = dp;
+ foriq->deleg_msg = dns_copy_msg(qstate->return_msg, forq->region);
+ if(!foriq->deleg_msg) {
+ log_err("copy prime response: out of memory");
+ foriq->dp = NULL;
+ foriq->state = QUERYTARGETS_STATE;
+ return;
+ }
+
+ /* root priming responses go to init stage 2, priming stub
+ * responses to to stage 3. */
+ if(foriq->wait_priming_stub) {
+ foriq->state = INIT_REQUEST_3_STATE;
+ foriq->wait_priming_stub = 0;
+ } else foriq->state = INIT_REQUEST_2_STATE;
+ /* because we are finished, the parent will be reactivated */
+}
+
+/**
+ * This handles the response to a priming query. This is used to handle both
+ * root and stub priming responses. This is basically the equivalent of the
+ * QUERY_RESP_STATE, but will not handle CNAME responses and will treat
+ * REFERRALs as ANSWERS. It will also update and reactivate the originating
+ * event.
+ *
+ * @param qstate: query state.
+ * @param id: module id.
+ * @return true if the event needs more immediate processing, false if not.
+ * This state always returns false.
+ */
+static int
+processPrimeResponse(struct module_qstate* qstate, int id)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
+ enum response_type type;
+ iq->response->rep->flags &= ~(BIT_RD|BIT_RA); /* ignore rec-lame */
+ type = response_type_from_server(
+ (int)((iq->chase_flags&BIT_RD) || iq->chase_to_rd),
+ iq->response, &iq->qchase, iq->dp);
+ if(type == RESPONSE_TYPE_ANSWER) {
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+ qstate->return_msg = iq->response;
+ } else {
+ qstate->return_rcode = LDNS_RCODE_SERVFAIL;
+ qstate->return_msg = NULL;
+ }
+
+ /* validate the root or stub after priming (if enabled).
+ * This is the same query as the prime query, but with validation.
+ * Now that we are primed, the additional queries that validation
+ * may need can be resolved, such as DLV. */
+ if(qstate->env->cfg->harden_referral_path) {
+ struct module_qstate* subq = NULL;
+ log_nametypeclass(VERB_ALGO, "schedule prime validation",
+ qstate->qinfo.qname, qstate->qinfo.qtype,
+ qstate->qinfo.qclass);
+ if(!generate_sub_request(qstate->qinfo.qname,
+ qstate->qinfo.qname_len, qstate->qinfo.qtype,
+ qstate->qinfo.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ verbose(VERB_ALGO, "could not generate prime check");
+ }
+ generate_a_aaaa_check(qstate, iq, id);
+ }
+
+ /* This event is finished. */
+ qstate->ext_state[id] = module_finished;
+ return 0;
+}
+
+/**
+ * Do final processing on responses to target queries. Events reach this
+ * state after the iterative resolution algorithm terminates. This state is
+ * responsible for reactiving the original event, and housekeeping related
+ * to received target responses (caching, updating the current delegation
+ * point, etc).
+ * Callback from walk_supers for every super state that is interested in
+ * the results from this query.
+ *
+ * @param qstate: query state.
+ * @param id: module id.
+ * @param forq: super query state.
+ */
+static void
+processTargetResponse(struct module_qstate* qstate, int id,
+ struct module_qstate* forq)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
+ struct iter_qstate* foriq = (struct iter_qstate*)forq->minfo[id];
+ struct ub_packed_rrset_key* rrset;
+ struct delegpt_ns* dpns;
+ log_assert(qstate->return_rcode == LDNS_RCODE_NOERROR);
+
+ foriq->state = QUERYTARGETS_STATE;
+ log_query_info(VERB_ALGO, "processTargetResponse", &qstate->qinfo);
+ log_query_info(VERB_ALGO, "processTargetResponse super", &forq->qinfo);
+
+ /* check to see if parent event is still interested (in orig name). */
+ if(!foriq->dp) {
+ verbose(VERB_ALGO, "subq: parent not interested, was reset");
+ return; /* not interested anymore */
+ }
+ dpns = delegpt_find_ns(foriq->dp, qstate->qinfo.qname,
+ qstate->qinfo.qname_len);
+ if(!dpns) {
+ /* If not interested, just stop processing this event */
+ verbose(VERB_ALGO, "subq: parent not interested anymore");
+ /* could be because parent was jostled out of the cache,
+ and a new identical query arrived, that does not want it*/
+ return;
+ }
+
+ /* Tell the originating event that this target query has finished
+ * (regardless if it succeeded or not). */
+ foriq->num_target_queries--;
+
+ /* if iq->query_for_pside_glue then add the pside_glue (marked lame) */
+ if(iq->pside_glue) {
+ /* if the pside_glue is NULL, then it could not be found,
+ * the done_pside is already set when created and a cache
+ * entry created in processFinished so nothing to do here */
+ log_rrset_key(VERB_ALGO, "add parentside glue to dp",
+ iq->pside_glue);
+ if(!delegpt_add_rrset(foriq->dp, forq->region,
+ iq->pside_glue, 1))
+ log_err("out of memory adding pside glue");
+ }
+
+ /* This response is relevant to the current query, so we
+ * add (attempt to add, anyway) this target(s) and reactivate
+ * the original event.
+ * NOTE: we could only look for the AnswerRRset if the
+ * response type was ANSWER. */
+ rrset = reply_find_answer_rrset(&iq->qchase, qstate->return_msg->rep);
+ if(rrset) {
+ /* if CNAMEs have been followed - add new NS to delegpt. */
+ /* BTW. RFC 1918 says NS should not have got CNAMEs. Robust. */
+ if(!delegpt_find_ns(foriq->dp, rrset->rk.dname,
+ rrset->rk.dname_len)) {
+ /* if dpns->lame then set newcname ns lame too */
+ if(!delegpt_add_ns(foriq->dp, forq->region,
+ rrset->rk.dname, (int)dpns->lame))
+ log_err("out of memory adding cnamed-ns");
+ }
+ /* if dpns->lame then set the address(es) lame too */
+ if(!delegpt_add_rrset(foriq->dp, forq->region, rrset,
+ (int)dpns->lame))
+ log_err("out of memory adding targets");
+ verbose(VERB_ALGO, "added target response");
+ delegpt_log(VERB_ALGO, foriq->dp);
+ } else {
+ verbose(VERB_ALGO, "iterator TargetResponse failed");
+ dpns->resolved = 1; /* fail the target */
+ }
+}
+
+/**
+ * Process response for qclass=ANY queries for a particular class.
+ * Append to result or error-exit.
+ *
+ * @param qstate: query state.
+ * @param id: module id.
+ * @param forq: super query state.
+ */
+static void
+processClassResponse(struct module_qstate* qstate, int id,
+ struct module_qstate* forq)
+{
+ struct iter_qstate* foriq = (struct iter_qstate*)forq->minfo[id];
+ struct dns_msg* from = qstate->return_msg;
+ log_query_info(VERB_ALGO, "processClassResponse", &qstate->qinfo);
+ log_query_info(VERB_ALGO, "processClassResponse super", &forq->qinfo);
+ if(qstate->return_rcode != LDNS_RCODE_NOERROR) {
+ /* cause servfail for qclass ANY query */
+ foriq->response = NULL;
+ foriq->state = FINISHED_STATE;
+ return;
+ }
+ /* append result */
+ if(!foriq->response) {
+ /* allocate the response: copy RCODE, sec_state */
+ foriq->response = dns_copy_msg(from, forq->region);
+ if(!foriq->response) {
+ log_err("malloc failed for qclass ANY response");
+ foriq->state = FINISHED_STATE;
+ return;
+ }
+ foriq->response->qinfo.qclass = forq->qinfo.qclass;
+ /* qclass ANY does not receive the AA flag on replies */
+ foriq->response->rep->authoritative = 0;
+ } else {
+ struct dns_msg* to = foriq->response;
+ /* add _from_ this response _to_ existing collection */
+ /* if there are records, copy RCODE */
+ /* lower sec_state if this message is lower */
+ if(from->rep->rrset_count != 0) {
+ size_t n = from->rep->rrset_count+to->rep->rrset_count;
+ struct ub_packed_rrset_key** dest, **d;
+ /* copy appropriate rcode */
+ to->rep->flags = from->rep->flags;
+ /* copy rrsets */
+ dest = regional_alloc(forq->region, sizeof(dest[0])*n);
+ if(!dest) {
+ log_err("malloc failed in collect ANY");
+ foriq->state = FINISHED_STATE;
+ return;
+ }
+ d = dest;
+ /* copy AN */
+ memcpy(dest, to->rep->rrsets, to->rep->an_numrrsets
+ * sizeof(dest[0]));
+ dest += to->rep->an_numrrsets;
+ memcpy(dest, from->rep->rrsets, from->rep->an_numrrsets
+ * sizeof(dest[0]));
+ dest += from->rep->an_numrrsets;
+ /* copy NS */
+ memcpy(dest, to->rep->rrsets+to->rep->an_numrrsets,
+ to->rep->ns_numrrsets * sizeof(dest[0]));
+ dest += to->rep->ns_numrrsets;
+ memcpy(dest, from->rep->rrsets+from->rep->an_numrrsets,
+ from->rep->ns_numrrsets * sizeof(dest[0]));
+ dest += from->rep->ns_numrrsets;
+ /* copy AR */
+ memcpy(dest, to->rep->rrsets+to->rep->an_numrrsets+
+ to->rep->ns_numrrsets,
+ to->rep->ar_numrrsets * sizeof(dest[0]));
+ dest += to->rep->ar_numrrsets;
+ memcpy(dest, from->rep->rrsets+from->rep->an_numrrsets+
+ from->rep->ns_numrrsets,
+ from->rep->ar_numrrsets * sizeof(dest[0]));
+ /* update counts */
+ to->rep->rrsets = d;
+ to->rep->an_numrrsets += from->rep->an_numrrsets;
+ to->rep->ns_numrrsets += from->rep->ns_numrrsets;
+ to->rep->ar_numrrsets += from->rep->ar_numrrsets;
+ to->rep->rrset_count = n;
+ }
+ if(from->rep->security < to->rep->security) /* lowest sec */
+ to->rep->security = from->rep->security;
+ if(from->rep->qdcount != 0) /* insert qd if appropriate */
+ to->rep->qdcount = from->rep->qdcount;
+ if(from->rep->ttl < to->rep->ttl) /* use smallest TTL */
+ to->rep->ttl = from->rep->ttl;
+ if(from->rep->prefetch_ttl < to->rep->prefetch_ttl)
+ to->rep->prefetch_ttl = from->rep->prefetch_ttl;
+ }
+ /* are we done? */
+ foriq->num_current_queries --;
+ if(foriq->num_current_queries == 0)
+ foriq->state = FINISHED_STATE;
+}
+
+/**
+ * Collect class ANY responses and make them into one response. This
+ * state is started and it creates queries for all classes (that have
+ * root hints). The answers are then collected.
+ *
+ * @param qstate: query state.
+ * @param id: module id.
+ * @return true if the event needs more immediate processing, false if not.
+ */
+static int
+processCollectClass(struct module_qstate* qstate, int id)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct module_qstate* subq;
+ /* If qchase.qclass == 0 then send out queries for all classes.
+ * Otherwise, do nothing (wait for all answers to arrive and the
+ * processClassResponse to put them together, and that moves us
+ * towards the Finished state when done. */
+ if(iq->qchase.qclass == 0) {
+ uint16_t c = 0;
+ iq->qchase.qclass = LDNS_RR_CLASS_ANY;
+ while(iter_get_next_root(ie->hints, qstate->env->fwds, &c)) {
+ /* generate query for this class */
+ log_nametypeclass(VERB_ALGO, "spawn collect query",
+ qstate->qinfo.qname, qstate->qinfo.qtype, c);
+ if(!generate_sub_request(qstate->qinfo.qname,
+ qstate->qinfo.qname_len, qstate->qinfo.qtype,
+ c, qstate, id, iq, INIT_REQUEST_STATE,
+ FINISHED_STATE, &subq,
+ (int)!(qstate->query_flags&BIT_CD))) {
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ /* ignore subq, no special init required */
+ iq->num_current_queries ++;
+ if(c == 0xffff)
+ break;
+ else c++;
+ }
+ /* if no roots are configured at all, return */
+ if(iq->num_current_queries == 0) {
+ verbose(VERB_ALGO, "No root hints or fwds, giving up "
+ "on qclass ANY");
+ return error_response(qstate, id, LDNS_RCODE_REFUSED);
+ }
+ /* return false, wait for queries to return */
+ }
+ /* if woke up here because of an answer, wait for more answers */
+ return 0;
+}
+
+/**
+ * This handles the final state for first-tier responses (i.e., responses to
+ * externally generated queries).
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @return true if the event needs more processing, false if not. Since this
+ * is the final state for an event, it always returns false.
+ */
+static int
+processFinished(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id)
+{
+ log_query_info(VERB_QUERY, "finishing processing for",
+ &qstate->qinfo);
+
+ /* store negative cache element for parent side glue. */
+ if(iq->query_for_pside_glue && !iq->pside_glue)
+ iter_store_parentside_neg(qstate->env, &qstate->qinfo,
+ iq->deleg_msg?iq->deleg_msg->rep:
+ (iq->response?iq->response->rep:NULL));
+ if(!iq->response) {
+ verbose(VERB_ALGO, "No response is set, servfail");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* Make sure that the RA flag is set (since the presence of
+ * this module means that recursion is available) */
+ iq->response->rep->flags |= BIT_RA;
+
+ /* Clear the AA flag */
+ /* FIXME: does this action go here or in some other module? */
+ iq->response->rep->flags &= ~BIT_AA;
+
+ /* make sure QR flag is on */
+ iq->response->rep->flags |= BIT_QR;
+
+ /* we have finished processing this query */
+ qstate->ext_state[id] = module_finished;
+
+ /* TODO: we are using a private TTL, trim the response. */
+ /* if (mPrivateTTL > 0){IterUtils.setPrivateTTL(resp, mPrivateTTL); } */
+
+ /* prepend any items we have accumulated */
+ if(iq->an_prepend_list || iq->ns_prepend_list) {
+ if(!iter_prepend(iq, iq->response, qstate->region)) {
+ log_err("prepend rrsets: out of memory");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ /* reset the query name back */
+ iq->response->qinfo = qstate->qinfo;
+ /* the security state depends on the combination */
+ iq->response->rep->security = sec_status_unchecked;
+ /* store message with the finished prepended items,
+ * but only if we did recursion. The nonrecursion referral
+ * from cache does not need to be stored in the msg cache. */
+ if(qstate->query_flags&BIT_RD) {
+ if(!iter_dns_store(qstate->env, &qstate->qinfo,
+ iq->response->rep, 0, qstate->prefetch_leeway,
+ qstate->region))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ }
+ }
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+ qstate->return_msg = iq->response;
+ return 0;
+}
+
+/*
+ * Return priming query results to interestes super querystates.
+ *
+ * Sets the delegation point and delegation message (not nonRD queries).
+ * This is a callback from walk_supers.
+ *
+ * @param qstate: query state that finished.
+ * @param id: module id.
+ * @param super: the qstate to inform.
+ */
+void
+iter_inform_super(struct module_qstate* qstate, int id,
+ struct module_qstate* super)
+{
+ if(!qstate->is_priming && super->qinfo.qclass == LDNS_RR_CLASS_ANY)
+ processClassResponse(qstate, id, super);
+ else if(qstate->return_rcode != LDNS_RCODE_NOERROR)
+ error_supers(qstate, id, super);
+ else if(qstate->is_priming)
+ prime_supers(qstate, id, super);
+ else processTargetResponse(qstate, id, super);
+}
+
+/**
+ * Handle iterator state.
+ * Handle events. This is the real processing loop for events, responsible
+ * for moving events through the various states. If a processing method
+ * returns true, then it will be advanced to the next state. If false, then
+ * processing will stop.
+ *
+ * @param qstate: query state.
+ * @param ie: iterator shared global environment.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+iter_handle(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ int cont = 1;
+ while(cont) {
+ verbose(VERB_ALGO, "iter_handle processing q with state %s",
+ iter_state_to_string(iq->state));
+ switch(iq->state) {
+ case INIT_REQUEST_STATE:
+ cont = processInitRequest(qstate, iq, ie, id);
+ break;
+ case INIT_REQUEST_2_STATE:
+ cont = processInitRequest2(qstate, iq, ie, id);
+ break;
+ case INIT_REQUEST_3_STATE:
+ cont = processInitRequest3(qstate, iq, id);
+ break;
+ case QUERYTARGETS_STATE:
+ cont = processQueryTargets(qstate, iq, ie, id);
+ break;
+ case QUERY_RESP_STATE:
+ cont = processQueryResponse(qstate, iq, id);
+ break;
+ case PRIME_RESP_STATE:
+ cont = processPrimeResponse(qstate, id);
+ break;
+ case COLLECT_CLASS_STATE:
+ cont = processCollectClass(qstate, id);
+ break;
+ case FINISHED_STATE:
+ cont = processFinished(qstate, iq, id);
+ break;
+ default:
+ log_warn("iterator: invalid state: %d",
+ iq->state);
+ cont = 0;
+ break;
+ }
+ }
+}
+
+/**
+ * This is the primary entry point for processing request events. Note that
+ * this method should only be used by external modules.
+ * @param qstate: query state.
+ * @param ie: iterator shared global environment.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+process_request(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ /* external requests start in the INIT state, and finish using the
+ * FINISHED state. */
+ iq->state = INIT_REQUEST_STATE;
+ iq->final_state = FINISHED_STATE;
+ verbose(VERB_ALGO, "process_request: new external request event");
+ iter_handle(qstate, iq, ie, id);
+}
+
+/** process authoritative server reply */
+static void
+process_response(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id, struct outbound_entry* outbound,
+ enum module_ev event)
+{
+ struct msg_parse* prs;
+ struct edns_data edns;
+ ldns_buffer* pkt;
+
+ verbose(VERB_ALGO, "process_response: new external response event");
+ iq->response = NULL;
+ iq->state = QUERY_RESP_STATE;
+ if(event == module_event_noreply || event == module_event_error) {
+ goto handle_it;
+ }
+ if( (event != module_event_reply && event != module_event_capsfail)
+ || !qstate->reply) {
+ log_err("Bad event combined with response");
+ outbound_list_remove(&iq->outlist, outbound);
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return;
+ }
+
+ /* parse message */
+ prs = (struct msg_parse*)regional_alloc(qstate->env->scratch,
+ sizeof(struct msg_parse));
+ if(!prs) {
+ log_err("out of memory on incoming message");
+ /* like packet got dropped */
+ goto handle_it;
+ }
+ memset(prs, 0, sizeof(*prs));
+ memset(&edns, 0, sizeof(edns));
+ pkt = qstate->reply->c->buffer;
+ ldns_buffer_set_position(pkt, 0);
+ if(parse_packet(pkt, prs, qstate->env->scratch) != LDNS_RCODE_NOERROR) {
+ verbose(VERB_ALGO, "parse error on reply packet");
+ goto handle_it;
+ }
+ /* edns is not examined, but removed from message to help cache */
+ if(parse_extract_edns(prs, &edns) != LDNS_RCODE_NOERROR)
+ goto handle_it;
+ /* remove CD-bit, we asked for in case we handle validation ourself */
+ prs->flags &= ~BIT_CD;
+
+ /* normalize and sanitize: easy to delete items from linked lists */
+ if(!scrub_message(pkt, prs, &iq->qchase, iq->dp->name,
+ qstate->env->scratch, qstate->env, ie))
+ goto handle_it;
+
+ /* allocate response dns_msg in region */
+ iq->response = dns_alloc_msg(pkt, prs, qstate->region);
+ if(!iq->response)
+ goto handle_it;
+ log_query_info(VERB_DETAIL, "response for", &qstate->qinfo);
+ log_name_addr(VERB_DETAIL, "reply from", iq->dp->name,
+ &qstate->reply->addr, qstate->reply->addrlen);
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg("incoming scrubbed packet:", &iq->response->qinfo,
+ iq->response->rep);
+
+ if(event == module_event_capsfail) {
+ if(!iq->caps_fallback) {
+ /* start fallback */
+ iq->caps_fallback = 1;
+ iq->caps_server = 0;
+ iq->caps_reply = iq->response->rep;
+ iq->state = QUERYTARGETS_STATE;
+ iq->num_current_queries--;
+ verbose(VERB_DETAIL, "Capsforid: starting fallback");
+ goto handle_it;
+ } else {
+ /* check if reply is the same, otherwise, fail */
+ if(!reply_equal(iq->response->rep, iq->caps_reply,
+ qstate->env->scratch_buffer)) {
+ verbose(VERB_DETAIL, "Capsforid fallback: "
+ "getting different replies, failed");
+ outbound_list_remove(&iq->outlist, outbound);
+ (void)error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ return;
+ }
+ /* continue the fallback procedure at next server */
+ iq->caps_server++;
+ iq->state = QUERYTARGETS_STATE;
+ iq->num_current_queries--;
+ verbose(VERB_DETAIL, "Capsforid: reply is equal. "
+ "go to next fallback");
+ goto handle_it;
+ }
+ }
+ iq->caps_fallback = 0; /* if we were in fallback, 0x20 is OK now */
+
+handle_it:
+ outbound_list_remove(&iq->outlist, outbound);
+ iter_handle(qstate, iq, ie, id);
+}
+
+void
+iter_operate(struct module_qstate* qstate, enum module_ev event, int id,
+ struct outbound_entry* outbound)
+{
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
+ verbose(VERB_QUERY, "iterator[module %d] operate: extstate:%s event:%s",
+ id, strextstate(qstate->ext_state[id]), strmodulevent(event));
+ if(iq) log_query_info(VERB_QUERY, "iterator operate: query",
+ &qstate->qinfo);
+ if(iq && qstate->qinfo.qname != iq->qchase.qname)
+ log_query_info(VERB_QUERY, "iterator operate: chased to",
+ &iq->qchase);
+
+ /* perform iterator state machine */
+ if((event == module_event_new || event == module_event_pass) &&
+ iq == NULL) {
+ if(!iter_new(qstate, id)) {
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return;
+ }
+ iq = (struct iter_qstate*)qstate->minfo[id];
+ process_request(qstate, iq, ie, id);
+ return;
+ }
+ if(iq && event == module_event_pass) {
+ iter_handle(qstate, iq, ie, id);
+ return;
+ }
+ if(iq && outbound) {
+ process_response(qstate, iq, ie, id, outbound, event);
+ return;
+ }
+ if(event == module_event_error) {
+ verbose(VERB_ALGO, "got called with event error, giving up");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return;
+ }
+
+ log_err("bad event for iterator");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+}
+
+void
+iter_clear(struct module_qstate* qstate, int id)
+{
+ struct iter_qstate* iq;
+ if(!qstate)
+ return;
+ iq = (struct iter_qstate*)qstate->minfo[id];
+ if(iq) {
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ }
+ qstate->minfo[id] = NULL;
+}
+
+size_t
+iter_get_mem(struct module_env* env, int id)
+{
+ struct iter_env* ie = (struct iter_env*)env->modinfo[id];
+ if(!ie)
+ return 0;
+ return sizeof(*ie) + sizeof(int)*((size_t)ie->max_dependency_depth+1)
+ + hints_get_mem(ie->hints) + donotq_get_mem(ie->donotq)
+ + priv_get_mem(ie->priv);
+}
+
+/**
+ * The iterator function block
+ */
+static struct module_func_block iter_block = {
+ "iterator",
+ &iter_init, &iter_deinit, &iter_operate, &iter_inform_super,
+ &iter_clear, &iter_get_mem
+};
+
+struct module_func_block*
+iter_get_funcblock(void)
+{
+ return &iter_block;
+}
+
+const char*
+iter_state_to_string(enum iter_state state)
+{
+ switch (state)
+ {
+ case INIT_REQUEST_STATE :
+ return "INIT REQUEST STATE";
+ case INIT_REQUEST_2_STATE :
+ return "INIT REQUEST STATE (stage 2)";
+ case INIT_REQUEST_3_STATE:
+ return "INIT REQUEST STATE (stage 3)";
+ case QUERYTARGETS_STATE :
+ return "QUERY TARGETS STATE";
+ case PRIME_RESP_STATE :
+ return "PRIME RESPONSE STATE";
+ case COLLECT_CLASS_STATE :
+ return "COLLECT CLASS STATE";
+ case QUERY_RESP_STATE :
+ return "QUERY RESPONSE STATE";
+ case FINISHED_STATE :
+ return "FINISHED RESPONSE STATE";
+ default :
+ return "UNKNOWN ITER STATE";
+ }
+}
+
+int
+iter_state_is_responsestate(enum iter_state s)
+{
+ switch(s) {
+ case INIT_REQUEST_STATE :
+ case INIT_REQUEST_2_STATE :
+ case INIT_REQUEST_3_STATE :
+ case QUERYTARGETS_STATE :
+ case COLLECT_CLASS_STATE :
+ return 0;
+ default:
+ break;
+ }
+ return 1;
+}
diff --git a/3rdParty/Unbound/src/src/iterator/iterator.h b/3rdParty/Unbound/src/src/iterator/iterator.h
new file mode 100644
index 0000000..0272fe1
--- /dev/null
+++ b/3rdParty/Unbound/src/src/iterator/iterator.h
@@ -0,0 +1,374 @@
+/*
+ * iterator/iterator.h - iterative resolver DNS query response module
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a module that performs recusive iterative DNS query
+ * processing.
+ */
+
+#ifndef ITERATOR_ITERATOR_H
+#define ITERATOR_ITERATOR_H
+#include "services/outbound_list.h"
+#include "util/data/msgreply.h"
+#include "util/module.h"
+struct delegpt;
+struct iter_hints;
+struct iter_forwards;
+struct iter_donotq;
+struct iter_prep_list;
+struct iter_priv;
+
+/** max number of query restarts. Determines max number of CNAME chain. */
+#define MAX_RESTART_COUNT 8
+/** max number of referrals. Makes sure resolver does not run away */
+#define MAX_REFERRAL_COUNT 130
+/** max number of queries-sent-out. Make sure large NS set does not loop */
+#define MAX_SENT_COUNT 16
+/** at what query-sent-count to stop target fetch policy */
+#define TARGET_FETCH_STOP 3
+/** how nice is a server without further information, in msec
+ * Equals rtt initial timeout value.
+ */
+#define UNKNOWN_SERVER_NICENESS 376
+/** maximum timeout before a host is deemed unsuitable, in msec.
+ * After host_ttl this will be timed out and the host will be tried again.
+ * Equals RTT_MAX_TIMEOUT
+ */
+#define USEFUL_SERVER_TOP_TIMEOUT 120000
+/** Number of lost messages in a row that get a host blacklisted.
+ * With 16, a couple different queries have to time out and no working
+ * queries are happening */
+#define USEFUL_SERVER_MAX_LOST 16
+/** number of retries on outgoing queries */
+#define OUTBOUND_MSG_RETRY 5
+/** RTT band, within this amount from the best, servers are chosen randomly.
+ * Chosen so that the UNKNOWN_SERVER_NICENESS falls within the band of a
+ * fast server, this causes server exploration as a side benefit. msec. */
+#define RTT_BAND 400
+/** Start value for blacklisting a host, 2*USEFUL_SERVER_TOP_TIMEOUT in sec */
+#define INFRA_BACKOFF_INITIAL 240
+
+/**
+ * Global state for the iterator.
+ */
+struct iter_env {
+ /**
+ * The hints -- these aren't stored in the cache because they don't
+ * expire. The hints are always used to "prime" the cache. Note
+ * that both root hints and stub zone "hints" are stored in this
+ * data structure.
+ */
+ struct iter_hints* hints;
+
+ /** A flag to indicate whether or not we have an IPv6 route */
+ int supports_ipv6;
+
+ /** A flag to indicate whether or not we have an IPv4 route */
+ int supports_ipv4;
+
+ /** A set of inetaddrs that should never be queried. */
+ struct iter_donotq* donotq;
+
+ /** private address space and private domains */
+ struct iter_priv* priv;
+
+ /** The maximum dependency depth that this resolver will pursue. */
+ int max_dependency_depth;
+
+ /**
+ * The target fetch policy for each dependency level. This is
+ * described as a simple number (per dependency level):
+ * negative numbers (usually just -1) mean fetch-all,
+ * 0 means only fetch on demand, and
+ * positive numbers mean to fetch at most that many targets.
+ * array of max_dependency_depth+1 size.
+ */
+ int* target_fetch_policy;
+};
+
+/**
+ * State of the iterator for a query.
+ */
+enum iter_state {
+ /**
+ * Externally generated queries start at this state. Query restarts are
+ * reset to this state.
+ */
+ INIT_REQUEST_STATE = 0,
+
+ /**
+ * Root priming events reactivate here, most other events pass
+ * through this naturally as the 2nd part of the INIT_REQUEST_STATE.
+ */
+ INIT_REQUEST_2_STATE,
+
+ /**
+ * Stub priming events reactivate here, most other events pass
+ * through this naturally as the 3rd part of the INIT_REQUEST_STATE.
+ */
+ INIT_REQUEST_3_STATE,
+
+ /**
+ * Each time a delegation point changes for a given query or a
+ * query times out and/or wakes up, this state is (re)visited.
+ * This state is reponsible for iterating through a list of
+ * nameserver targets.
+ */
+ QUERYTARGETS_STATE,
+
+ /**
+ * Responses to queries start at this state. This state handles
+ * the decision tree associated with handling responses.
+ */
+ QUERY_RESP_STATE,
+
+ /** Responses to priming queries finish at this state. */
+ PRIME_RESP_STATE,
+
+ /** Collecting query class information, for qclass=ANY, when
+ * it spawns off queries for every class, it returns here. */
+ COLLECT_CLASS_STATE,
+
+ /** Responses that are to be returned upstream end at this state.
+ * As well as responses to target queries. */
+ FINISHED_STATE
+};
+
+/**
+ * Per query state for the iterator module.
+ */
+struct iter_qstate {
+ /**
+ * State of the iterator module.
+ * This is the state that event is in or should sent to -- all
+ * requests should start with the INIT_REQUEST_STATE. All
+ * responses should start with QUERY_RESP_STATE. Subsequent
+ * processing of the event will change this state.
+ */
+ enum iter_state state;
+
+ /**
+ * Final state for the iterator module.
+ * This is the state that responses should be routed to once the
+ * response is final. For externally initiated queries, this
+ * will be FINISHED_STATE, locally initiated queries will have
+ * different final states.
+ */
+ enum iter_state final_state;
+
+ /**
+ * The depth of this query, this means the depth of recursion.
+ * This address is needed for another query, which is an address
+ * needed for another query, etc. Original client query has depth 0.
+ */
+ int depth;
+
+ /**
+ * The response
+ */
+ struct dns_msg* response;
+
+ /**
+ * This is a list of RRsets that must be prepended to the
+ * ANSWER section of a response before being sent upstream.
+ */
+ struct iter_prep_list* an_prepend_list;
+ /** Last element of the prepend list */
+ struct iter_prep_list* an_prepend_last;
+
+ /**
+ * This is the list of RRsets that must be prepended to the
+ * AUTHORITY section of the response before being sent upstream.
+ */
+ struct iter_prep_list* ns_prepend_list;
+ /** Last element of the authority prepend list */
+ struct iter_prep_list* ns_prepend_last;
+
+ /** query name used for chasing the results. Initially the same as
+ * the state qinfo, but after CNAMEs this will be different.
+ * The query info used to elicit the results needed. */
+ struct query_info qchase;
+ /** query flags to use when chasing the answer (i.e. RD flag) */
+ uint16_t chase_flags;
+ /** true if we set RD bit because of last resort recursion lame query*/
+ int chase_to_rd;
+
+ /**
+ * This is the current delegation point for an in-progress query. This
+ * object retains state as to which delegation targets need to be
+ * (sub)queried for vs which ones have already been visited.
+ */
+ struct delegpt* dp;
+
+ /** state for 0x20 fallback when capsfail happens, 0 not a fallback */
+ int caps_fallback;
+ /** state for capsfail: current server number to try */
+ size_t caps_server;
+ /** state for capsfail: stored query for comparisons */
+ struct reply_info* caps_reply;
+
+ /** Current delegation message - returned for non-RD queries */
+ struct dns_msg* deleg_msg;
+
+ /** number of outstanding target sub queries */
+ int num_target_queries;
+
+ /** outstanding direct queries */
+ int num_current_queries;
+
+ /** the number of times this query has been restarted. */
+ int query_restart_count;
+
+ /** the number of times this query as followed a referral. */
+ int referral_count;
+
+ /** number of queries fired off */
+ int sent_count;
+
+ /**
+ * The query must store NS records from referrals as parentside RRs
+ * Enabled once it hits resolution problems, to throttle retries.
+ * If enabled it is the pointer to the old delegation point with
+ * the old retry counts for bad-nameserver-addresses.
+ */
+ struct delegpt* store_parent_NS;
+
+ /**
+ * The query is for parent-side glue(A or AAAA) for a nameserver.
+ * If the item is seen as glue in a referral, and pside_glue is NULL,
+ * then it is stored in pside_glue for later.
+ * If it was never seen, at the end, then a negative caching element
+ * must be created.
+ * The (data or negative) RR cache element then throttles retries.
+ */
+ int query_for_pside_glue;
+ /** the parent-side-glue element (NULL if none, its first match) */
+ struct ub_packed_rrset_key* pside_glue;
+
+ /**
+ * expected dnssec information for this iteration step.
+ * If dnssec rrsigs are expected and not given, the server is marked
+ * lame (dnssec-lame).
+ */
+ int dnssec_expected;
+
+ /**
+ * We are expecting dnssec information, but we also know the server
+ * is DNSSEC lame. The response need not be marked dnssec-lame again.
+ */
+ int dnssec_lame_query;
+
+ /**
+ * This is flag that, if true, means that this event is
+ * waiting for a stub priming query.
+ */
+ int wait_priming_stub;
+
+ /**
+ * This is a flag that, if true, means that this query is
+ * for (re)fetching glue from a zone. Since the address should
+ * have been glue, query again to the servers that should have
+ * been returning it as glue.
+ * The delegation point must be set to the one that should *not*
+ * be used when creating the state. A higher one will be attempted.
+ */
+ int refetch_glue;
+
+ /** list of pending queries to authoritative servers. */
+ struct outbound_list outlist;
+};
+
+/**
+ * List of prepend items
+ */
+struct iter_prep_list {
+ /** next in list */
+ struct iter_prep_list* next;
+ /** rrset */
+ struct ub_packed_rrset_key* rrset;
+};
+
+/**
+ * Get the iterator function block.
+ * @return: function block with function pointers to iterator methods.
+ */
+struct module_func_block* iter_get_funcblock(void);
+
+/**
+ * Get iterator state as a string
+ * @param state: to convert
+ * @return constant string that is printable.
+ */
+const char* iter_state_to_string(enum iter_state state);
+
+/**
+ * See if iterator state is a response state
+ * @param s: to inspect
+ * @return true if response state.
+ */
+int iter_state_is_responsestate(enum iter_state s);
+
+/** iterator init */
+int iter_init(struct module_env* env, int id);
+
+/** iterator deinit */
+void iter_deinit(struct module_env* env, int id);
+
+/** iterator operate on a query */
+void iter_operate(struct module_qstate* qstate, enum module_ev event, int id,
+ struct outbound_entry* outbound);
+
+/**
+ * Return priming query results to interestes super querystates.
+ *
+ * Sets the delegation point and delegation message (not nonRD queries).
+ * This is a callback from walk_supers.
+ *
+ * @param qstate: query state that finished.
+ * @param id: module id.
+ * @param super: the qstate to inform.
+ */
+void iter_inform_super(struct module_qstate* qstate, int id,
+ struct module_qstate* super);
+
+/** iterator cleanup query state */
+void iter_clear(struct module_qstate* qstate, int id);
+
+/** iterator alloc size routine */
+size_t iter_get_mem(struct module_env* env, int id);
+
+#endif /* ITERATOR_ITERATOR_H */