|
@@ -20,6 +20,7 @@
|
|
|
|
|
|
#include <algorithm>
|
|
|
#include <boost/foreach.hpp>
|
|
|
+#include <boost/random.hpp>
|
|
|
#include <dns/rrttl.h>
|
|
|
#include <dns/rdataclass.h>
|
|
|
|
|
@@ -32,6 +33,19 @@ using namespace dns;
|
|
|
|
|
|
namespace nsas {
|
|
|
|
|
|
+ZoneEntry::ZoneEntry(boost::shared_ptr<ResolverInterface> resolver,
|
|
|
+ const std::string& name, const isc::dns::RRClass& class_code,
|
|
|
+ boost::shared_ptr<HashTable<NameserverEntry> > nameserver_table,
|
|
|
+ boost::shared_ptr<LruList<NameserverEntry> > nameserver_lru) :
|
|
|
+ expiry_(0),
|
|
|
+ name_(name), class_code_(class_code), resolver_(resolver),
|
|
|
+ nameserver_table_(nameserver_table), nameserver_lru_(nameserver_lru)
|
|
|
+{
|
|
|
+ in_process_[ANY_OK] = false;
|
|
|
+ in_process_[V4_ONLY] = false;
|
|
|
+ in_process_[V6_ONLY] = false;
|
|
|
+}
|
|
|
+
|
|
|
namespace {
|
|
|
// Shorter aliases for frequently used types
|
|
|
typedef mutex::scoped_lock Lock; // Local lock, nameservers not locked
|
|
@@ -49,87 +63,91 @@ newNs(const std::string* name, const RRClass* class_code) {
|
|
|
|
|
|
}
|
|
|
|
|
|
-// A struct, the class is unaccessible anyway and is ours
|
|
|
-struct ZoneEntry::ResolverCallback : public ResolverInterface::Callback {
|
|
|
- ResolverCallback(shared_ptr<ZoneEntry> entry) :
|
|
|
- entry_(entry)
|
|
|
- { }
|
|
|
- virtual void success(shared_ptr<AbstractRRset> answer) {
|
|
|
- shared_ptr<Lock> lock(new Lock(entry_->mutex_));
|
|
|
- RdataIteratorPtr iterator(answer->getRdataIterator());
|
|
|
- iterator->first();
|
|
|
- // If there are no data
|
|
|
- if (iterator->isLast()) {
|
|
|
- failureInternal(lock, answer->getTTL().getValue());
|
|
|
- return;
|
|
|
- } else {
|
|
|
- // Store the current ones so we can keep them
|
|
|
- map<string, NameserverPtr> old;
|
|
|
- BOOST_FOREACH(const NameserverPtr& ptr, entry_->nameservers_) {
|
|
|
- old[ptr->getName()] = ptr;
|
|
|
- }
|
|
|
+class ZoneEntry::ResolverCallback : public ResolverInterface::Callback {
|
|
|
+ public:
|
|
|
+ ResolverCallback(shared_ptr<ZoneEntry> entry) :
|
|
|
+ entry_(entry)
|
|
|
+ { }
|
|
|
+ virtual void success(shared_ptr<AbstractRRset> answer) {
|
|
|
+ shared_ptr<Lock> lock(new Lock(entry_->mutex_));
|
|
|
+ RdataIteratorPtr iterator(answer->getRdataIterator());
|
|
|
+ iterator->first();
|
|
|
+ // If there are no data
|
|
|
+ if (iterator->isLast()) {
|
|
|
+ failureInternal(lock, answer->getTTL().getValue());
|
|
|
+ return;
|
|
|
+ } else {
|
|
|
+ // Store the current ones so we can keep them
|
|
|
+ map<string, NameserverPtr> old;
|
|
|
+ BOOST_FOREACH(const NameserverPtr& ptr, entry_->nameservers_) {
|
|
|
+ old[ptr->getName()] = ptr;
|
|
|
+ }
|
|
|
|
|
|
- // Now drop the old ones and insert the new ones
|
|
|
- entry_->nameservers_.clear();
|
|
|
- for (; !iterator->isLast(); iterator->next()) {
|
|
|
- try {
|
|
|
- // Get the name from there
|
|
|
- Name ns_name(dynamic_cast<const rdata::generic::NS&>(
|
|
|
- iterator->getCurrent()).getNSName());
|
|
|
- // Try to find it in the old ones
|
|
|
- map<string, NameserverPtr>::iterator old_ns(old.find(
|
|
|
- ns_name.toText()));
|
|
|
- // It is not there, look it up in the table or create
|
|
|
- // new one
|
|
|
- if (old_ns == old.end()) {
|
|
|
- // Look it up or create it
|
|
|
- string ns_name_str(ns_name.toText());
|
|
|
- pair<bool, NameserverPtr> from_hash(
|
|
|
- entry_->nameserver_table_->getOrAdd(HashKey(
|
|
|
- ns_name_str, entry_->class_code_), bind(
|
|
|
- newNs, &ns_name_str, &entry_->class_code_)));
|
|
|
- // Touch it if it is not newly created
|
|
|
- if (!from_hash.first) {
|
|
|
- entry_->nameserver_lru_->touch(from_hash.second);
|
|
|
+ // Now drop the old ones and insert the new ones
|
|
|
+ entry_->nameservers_.clear();
|
|
|
+ for (; !iterator->isLast(); iterator->next()) {
|
|
|
+ try {
|
|
|
+ // Get the name from there
|
|
|
+ Name ns_name(dynamic_cast<const rdata::generic::NS&>(
|
|
|
+ iterator->getCurrent()).getNSName());
|
|
|
+ // Try to find it in the old ones
|
|
|
+ map<string, NameserverPtr>::iterator old_ns(old.find(
|
|
|
+ ns_name.toText()));
|
|
|
+ // It is not there, look it up in the table or create
|
|
|
+ // new one
|
|
|
+ if (old_ns == old.end()) {
|
|
|
+ // Look it up or create it
|
|
|
+ string ns_name_str(ns_name.toText());
|
|
|
+ pair<bool, NameserverPtr> from_hash(
|
|
|
+ entry_->nameserver_table_->getOrAdd(HashKey(
|
|
|
+ ns_name_str, entry_->class_code_), bind(
|
|
|
+ newNs, &ns_name_str, &entry_->class_code_)));
|
|
|
+ // Touch it if it is not newly created
|
|
|
+ if (!from_hash.first) {
|
|
|
+ entry_->nameserver_lru_->touch(
|
|
|
+ from_hash.second);
|
|
|
+ }
|
|
|
+ // And add it at last
|
|
|
+ entry_->nameservers_.push_back(from_hash.second);
|
|
|
+ } else {
|
|
|
+ // We have it, so just use it
|
|
|
+ entry_->nameservers_.push_back(old_ns->second);
|
|
|
}
|
|
|
- // And add it at last
|
|
|
- entry_->nameservers_.push_back(from_hash.second);
|
|
|
- } else {
|
|
|
- // We have it, so just use it
|
|
|
- entry_->nameservers_.push_back(old_ns->second);
|
|
|
}
|
|
|
+ // OK, we skip this one it is not NS (log?)
|
|
|
+ catch (bad_cast&) { }
|
|
|
}
|
|
|
- // OK, we skip this one it is not NS (log?)
|
|
|
- catch (bad_cast&) { }
|
|
|
- }
|
|
|
|
|
|
- // It is unbelievable, but we found no nameservers there
|
|
|
- if (entry_->nameservers_.empty()) {
|
|
|
- failureInternal(lock, answer->getTTL().getValue());
|
|
|
- return;
|
|
|
- } else {
|
|
|
- entry_->setState(READY);
|
|
|
- entry_->expiry_ = answer->getTTL().getValue() + time(NULL);
|
|
|
- entry_->process(CallbackPtr(), ADDR_REQ_MAX, NULL, lock);
|
|
|
- return;
|
|
|
+ // It is unbelievable, but we found no nameservers there
|
|
|
+ if (entry_->nameservers_.empty()) {
|
|
|
+ failureInternal(lock, answer->getTTL().getValue());
|
|
|
+ return;
|
|
|
+ } else {
|
|
|
+ entry_->setState(READY);
|
|
|
+ entry_->expiry_ = answer->getTTL().getValue() + time(NULL);
|
|
|
+ entry_->process(CallbackPtr(), ADDR_REQ_MAX,
|
|
|
+ NameserverPtr(), entry_, lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
- }
|
|
|
- virtual void failure() {
|
|
|
- shared_ptr<Lock> lock(new Lock(entry_->mutex_));
|
|
|
- /*
|
|
|
- * FIXME: That 5 minutes is just made up and wrong.
|
|
|
- * Where is the correct place to get the correct number?
|
|
|
- */
|
|
|
- failureInternal(lock, 300);
|
|
|
- }
|
|
|
- void failureInternal(shared_ptr<Lock> lock, time_t ttl) {
|
|
|
- entry_->setState(UNREACHABLE);
|
|
|
- entry_->expiry_ = ttl + time(NULL);
|
|
|
- // Process all three callback lists and tell them KO
|
|
|
- entry_->process(CallbackPtr(), ADDR_REQ_MAX, NULL, lock);
|
|
|
- }
|
|
|
- shared_ptr<ZoneEntry> entry_;
|
|
|
+ virtual void failure() {
|
|
|
+ shared_ptr<Lock> lock(new Lock(entry_->mutex_));
|
|
|
+ /*
|
|
|
+ * FIXME: That 5 minutes is just made up and wrong.
|
|
|
+ * Where is the correct place to get the correct number?
|
|
|
+ */
|
|
|
+ failureInternal(lock, 300);
|
|
|
+ }
|
|
|
+ private:
|
|
|
+ void failureInternal(shared_ptr<Lock> lock, time_t ttl) {
|
|
|
+ entry_->setState(UNREACHABLE);
|
|
|
+ entry_->expiry_ = ttl + time(NULL);
|
|
|
+ // Process all three callback lists and tell them KO
|
|
|
+ entry_->process(CallbackPtr(), ADDR_REQ_MAX, NameserverPtr(),
|
|
|
+ entry_, lock);
|
|
|
+ }
|
|
|
+ shared_ptr<ZoneEntry> entry_;
|
|
|
};
|
|
|
|
|
|
void
|
|
@@ -156,7 +174,7 @@ ZoneEntry::addCallback(CallbackPtr callback, AddressFamily family,
|
|
|
} else {
|
|
|
// Try to process it right away, store if not possible to handle
|
|
|
lock.unlock();
|
|
|
- process(callback, family, NULL);
|
|
|
+ process(callback, family, NameserverPtr(), self);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -172,6 +190,7 @@ ZoneEntry::addCallback(CallbackPtr callback, AddressFamily family,
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
+// This just moves items from one container to another
|
|
|
template<class Container>
|
|
|
void
|
|
|
move(Container& into, Container& from) {
|
|
@@ -179,11 +198,62 @@ move(Container& into, Container& from) {
|
|
|
from.clear();
|
|
|
}
|
|
|
|
|
|
+mutex randMutex;
|
|
|
+
|
|
|
+size_t
|
|
|
+randIndex(size_t count) {
|
|
|
+ // We need to lock the global generator
|
|
|
+ // TODO If there's contention locking, we might want a generator
|
|
|
+ // for each thread?
|
|
|
+ mutex::scoped_lock lock(randMutex);
|
|
|
+ // This seems to be enough to use pseudo-random generator and according
|
|
|
+ // to boost docs, this one is fast.
|
|
|
+ static rand48 generator;
|
|
|
+ return variate_generator<rand48&, uniform_int<size_t> >(generator,
|
|
|
+ uniform_int<size_t>(0, count - 1))();
|
|
|
+}
|
|
|
+
|
|
|
+asiolink::IOAddress
|
|
|
+chooseAddress(const NameserverEntry::AddressVector& addresses) {
|
|
|
+ // TODO Something little bit more inteligent than just picking random
|
|
|
+ // one
|
|
|
+ assert(!addresses.empty()); // Should not be called with empty list
|
|
|
+ return (addresses[randIndex(addresses.size())].getAddress());
|
|
|
+}
|
|
|
+
|
|
|
}
|
|
|
|
|
|
+// Sets to false on exit of current scope
|
|
|
+class ZoneEntry::ProcessGuard {
|
|
|
+ public:
|
|
|
+ ProcessGuard(bool& guarded) :
|
|
|
+ guarded_(guarded)
|
|
|
+ { }
|
|
|
+ ~ ProcessGuard() {
|
|
|
+ guarded_ = false;
|
|
|
+ }
|
|
|
+ private:
|
|
|
+ bool& guarded_;
|
|
|
+};
|
|
|
+
|
|
|
+class ZoneEntry::NameserverCallback : public NameserverEntry::Callback {
|
|
|
+ public:
|
|
|
+ NameserverCallback(shared_ptr<ZoneEntry> entry, AddressFamily family) :
|
|
|
+ entry_(entry),
|
|
|
+ family_(family)
|
|
|
+ { }
|
|
|
+ virtual void operator()(NameserverPtr ns) {
|
|
|
+ entry_->process(CallbackPtr(), family_, ns, entry_);
|
|
|
+ }
|
|
|
+ private:
|
|
|
+ shared_ptr<ZoneEntry> entry_;
|
|
|
+ AddressFamily family_;
|
|
|
+};
|
|
|
+
|
|
|
void
|
|
|
ZoneEntry::process(CallbackPtr callback, AddressFamily family,
|
|
|
- NameserverEntry*, shared_ptr<Lock> lock)
|
|
|
+ shared_ptr<NameserverEntry> nameserver, shared_ptr<ZoneEntry> self,
|
|
|
+ shared_ptr<Lock> lock)
|
|
|
{
|
|
|
// If we were not provided with a lock, get one
|
|
|
if (!lock) {
|
|
@@ -220,8 +290,87 @@ ZoneEntry::process(CallbackPtr callback, AddressFamily family,
|
|
|
return;
|
|
|
}
|
|
|
case READY:
|
|
|
- // TODO Write
|
|
|
- ;
|
|
|
+ if (family == ADDR_REQ_MAX) {
|
|
|
+ // Just process each one separately
|
|
|
+ process(CallbackPtr(), ANY_OK, nameserver, self, lock);
|
|
|
+ process(CallbackPtr(), V4_ONLY, nameserver, self, lock);
|
|
|
+ process(CallbackPtr(), V6_ONLY, nameserver, self, lock);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * Check that we are only in one process call on stack.
|
|
|
+ * It eliminates the problem when there are multiple nameserver
|
|
|
+ * IP addresses in the cache, but the first one would trigger
|
|
|
+ * calling all callbacks. We do not want that, we want to wait
|
|
|
+ * for all cached ones to arriwe. Therefore we bail out if
|
|
|
+ * theres a call here in the stack already and let that sort
|
|
|
+ * everything out when it returns.
|
|
|
+ */
|
|
|
+ // Check that we are only in one process call on stack
|
|
|
+ if (in_process_[family]) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ // Mark we are on the stack
|
|
|
+ ProcessGuard guard(in_process_[family]);
|
|
|
+ in_process_[family] = true;
|
|
|
+ // Variables to store the data to
|
|
|
+ NameserverEntry::AddressVector addresses;
|
|
|
+ NameserverVector to_ask;
|
|
|
+ bool pending(false);
|
|
|
+
|
|
|
+ // Pick info from the nameservers
|
|
|
+ BOOST_FOREACH(const NameserverPtr& ns, nameservers_) {
|
|
|
+ Fetchable::State ns_state(ns->getAddresses(addresses,
|
|
|
+ family, ns == nameserver));
|
|
|
+ switch (ns_state) {
|
|
|
+ case IN_PROGRESS:
|
|
|
+ pending = true;
|
|
|
+ break;
|
|
|
+ case NOT_ASKED:
|
|
|
+ case EXPIRED:
|
|
|
+ to_ask.push_back(ns);
|
|
|
+ break;
|
|
|
+ case UNREACHABLE:
|
|
|
+ case READY:
|
|
|
+ // Not interested, but avoiding warning
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // We have someone to ask, so do it
|
|
|
+ if (!to_ask.empty()) {
|
|
|
+ // We should not be locked, because this function can
|
|
|
+ // be called directly from the askIP again
|
|
|
+ lock->unlock();
|
|
|
+ shared_ptr<NameserverCallback> ns_callback(new
|
|
|
+ NameserverCallback(self, family));
|
|
|
+ /*
|
|
|
+ * TODO: Possible place for an optimisation. We now ask
|
|
|
+ * everything we can. We should limit this to something like
|
|
|
+ * 2 concurrent NS fetches (and fetch cache first, then
|
|
|
+ * fetch the remote ones). But fetching everything right
|
|
|
+ * away is simpler.
|
|
|
+ */
|
|
|
+ BOOST_FOREACH(const NameserverPtr& ns, to_ask) {
|
|
|
+ ns->askIP(resolver_, ns_callback, family, ns);
|
|
|
+ }
|
|
|
+ // Retry with all the data that might have arrived
|
|
|
+ in_process_[family] = false;
|
|
|
+ process(callback, family, nameserver, self);
|
|
|
+ // And be done
|
|
|
+ return;
|
|
|
+ // We have some addresses to answer
|
|
|
+ } else if (!addresses.empty()) {
|
|
|
+ // Extract the callbacks
|
|
|
+ vector<CallbackPtr> to_execute;
|
|
|
+ to_execute.swap(callbacks_[family]);
|
|
|
+
|
|
|
+ // Unlock, the callbacks might want to call us
|
|
|
+ BOOST_FOREACH(const CallbackPtr& callback, to_execute) {
|
|
|
+ callback->success(chooseAddress(addresses));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return;
|
|
|
}
|
|
|
}
|
|
|
|