Browse Source

[trac493]Merge branch 'master' into trac493

Conflicts:
	src/lib/cache/tests/Makefile.am
	src/lib/cache/tests/message_cache_unittest.cc
	src/lib/cache/tests/message_entry_unittest.cc
Ocean Wang 14 years ago
parent
commit
f8fb852bc6
100 changed files with 4366 additions and 2011 deletions
  1. 168 1
      ChangeLog
  2. 13 1
      Makefile.am
  3. 6 8
      README
  4. 96 115
      configure.ac
  5. 1 1
      doc/Doxyfile
  6. 44 38
      doc/guide/bind10-guide.html
  7. 28 38
      doc/guide/bind10-guide.xml
  8. 1 1
      ext/asio/asio/detail/epoll_reactor.hpp
  9. 1 1
      ext/asio/asio/detail/kqueue_reactor.hpp
  10. 1 1
      ext/asio/asio/detail/null_thread.hpp
  11. 3 1
      src/bin/auth/Makefile.am
  12. 70 36
      src/bin/auth/auth.spec.pre.in
  13. 19 0
      src/bin/auth/auth_srv.cc
  14. 14 0
      src/bin/auth/auth_srv.h
  15. 15 47
      src/bin/auth/b10-auth.8
  16. 10 48
      src/bin/auth/b10-auth.xml
  17. 2 0
      src/bin/auth/benchmarks/Makefile.am
  18. 1 1
      src/bin/auth/benchmarks/query_bench.cc
  19. 60 1
      src/bin/auth/config.cc
  20. 14 59
      src/bin/auth/main.cc
  21. 2 0
      src/bin/auth/tests/Makefile.am
  22. 18 2
      src/bin/auth/tests/auth_srv_unittest.cc
  23. 21 1
      src/bin/auth/tests/config_unittest.cc
  24. 1 1
      src/bin/auth/tests/query_unittest.cc
  25. 1 2
      src/bin/bind10/Makefile.am
  26. 4 33
      src/bin/bind10/bind10.8
  27. 127 50
      src/bin/bind10/bind10.py.in
  28. 8 35
      src/bin/bind10/bind10.xml
  29. 0 315
      src/bin/bind10/tests/bind10_test.py
  30. 463 0
      src/bin/bind10/tests/bind10_test.py.in
  31. 6 5
      src/bin/bindctl/Makefile.am
  32. 165 95
      src/bin/bindctl/bindcmd.py
  33. 20 1
      src/bin/bindctl/bindctl.xml
  34. 50 41
      src/bin/bindctl/bindctl-source.py.in
  35. 55 1
      src/bin/bindctl/cmdparse.py
  36. 57 11
      src/bin/bindctl/moduleinfo.py
  37. 2 2
      src/bin/bindctl/tests/Makefile.am
  38. 144 8
      src/bin/bindctl/tests/bindctl_test.py
  39. 88 0
      src/bin/bindctl/tests/cmdparse_test.py
  40. 0 1
      src/bin/cfgmgr/Makefile.am
  41. 11 3
      src/bin/cfgmgr/b10-cfgmgr.py.in
  42. 1 2
      src/bin/cmdctl/Makefile.am
  43. 5 2
      src/bin/host/host.cc
  44. 0 1
      src/bin/msgq/Makefile.am
  45. 1 1
      src/bin/msgq/msgq.py.in
  46. 5 1
      src/bin/resolver/Makefile.am
  47. 26 25
      src/bin/resolver/b10-resolver.8
  48. 45 13
      src/bin/resolver/b10-resolver.xml
  49. 49 5
      src/bin/resolver/main.cc
  50. 77 126
      src/bin/resolver/resolver.cc
  51. 21 0
      src/bin/resolver/resolver.h
  52. 20 20
      src/bin/resolver/resolver.spec.pre.in
  53. 4 0
      src/bin/resolver/tests/Makefile.am
  54. 24 92
      src/bin/resolver/tests/resolver_config_unittest.cc
  55. 21 0
      src/bin/resolver/tests/resolver_unittest.cc
  56. 1 2
      src/bin/stats/Makefile.am
  57. 1 2
      src/bin/usermgr/Makefile.am
  58. 1 2
      src/bin/xfrin/Makefile.am
  59. 1 2
      src/bin/xfrout/Makefile.am
  60. 36 16
      src/bin/xfrout/tests/xfrout_test.py
  61. 116 76
      src/bin/xfrout/xfrout.py.in
  62. 1 1
      src/bin/zonemgr/Makefile.am
  63. 15 0
      src/cppcheck-suppress.lst
  64. 2 2
      src/lib/Makefile.am
  65. 25 19
      src/lib/asiolink/Makefile.am
  66. 80 1
      src/lib/asiolink/README
  67. 37 0
      src/lib/asiolink/asiodef.cc
  68. 21 0
      src/lib/asiolink/asiodef.h
  69. 56 0
      src/lib/asiolink/asiodef.msg
  70. 1 19
      src/lib/asiolink/asiolink.h
  71. 61 0
      src/lib/asiolink/asiolink_utilities.h
  72. 8 5
      src/lib/asiolink/dns_server.h
  73. 13 5
      src/lib/asiolink/dns_service.cc
  74. 7 1
      src/lib/asiolink/dns_service.h
  75. 59 0
      src/lib/asiolink/dummy_io_cb.h
  76. 5 5
      src/lib/asiolink/interval_timer.cc
  77. 1 1
      src/lib/asiolink/interval_timer.h
  78. 4 1
      src/lib/asiolink/io_address.cc
  79. 3 7
      src/lib/asiolink/io_address.h
  80. 399 0
      src/lib/asiolink/io_asio_socket.h
  81. 3 1
      src/lib/asiolink/io_endpoint.cc
  82. 3 7
      src/lib/asiolink/io_endpoint.h
  83. 35 0
      src/lib/asiolink/io_error.h
  84. 355 0
      src/lib/asiolink/io_fetch.cc
  85. 179 0
      src/lib/asiolink/io_fetch.h
  86. 4 7
      src/lib/asiolink/io_message.h
  87. 5 4
      src/lib/asiolink/io_service.cc
  88. 4 7
      src/lib/asiolink/io_socket.h
  89. 54 0
      src/lib/asiolink/qid_gen.cc
  90. 85 0
      src/lib/asiolink/qid_gen.h
  91. 0 460
      src/lib/asiolink/recursive_query.cc
  92. 38 23
      src/lib/asiolink/tcp_endpoint.h
  93. 47 12
      src/lib/asiolink/tcp_server.cc
  94. 4 0
      src/lib/asiolink/tcp_server.h
  95. 380 16
      src/lib/asiolink/tcp_socket.h
  96. 20 11
      src/lib/asiolink/tests/Makefile.am
  97. 74 0
      src/lib/asiolink/tests/asiolink_utilities_unittest.cc
  98. 6 2
      src/lib/asiolink/tests/interval_timer_unittest.cc
  99. 7 1
      src/lib/asiolink/tests/ioaddress_unittest.cc
  100. 0 0
      src/lib/asiolink/tests/ioendpoint_unittest.cc

+ 168 - 1
ChangeLog

@@ -1,9 +1,176 @@
+  198.	[bug]		jinmei
+	b10-auth, src/lib/datasrc: fixed a bug where hot spot cache failed
+	to reuse cached SOA for negative responses.  Due to this bug
+	b10-auth returned SERVFAIL when it was expected to return a
+	negative response immediately after a specific SOA query for
+	the zone.
+	(Trac #626, git 721a53160c15e8218f6798309befe940b9597ba0)
+
+  197.  [bug]		zhang likun
+	Remove expired message and rrset entries when looking up them
+	in cache, touch or remove the rrset entry in cache properly
+	when doing lookup or update.
+	(Trac #661, git 9efbe64fe3ff22bb5fba46de409ae058f199c8a7)
+
+  196.	[bug]		jinmei
+	b10-auth, src/lib/datasrc: the backend of the in-memory data
+	source could not handle the root name.  As a result b10-auth could
+	not work as a root server when using the in-memory data source.
+	(Trac #683, git 420ec42bd913fb83da37b26b75faae49c7957c46)
+
+  195.  [func]      stephen
+	Resolver will now re-try a query over TCP if a response to a UDP
+	query has the TC bit set.
+	(Trac #499, git 4c05048ba059b79efeab53498737abe94d37ee07)
+
+  194.  [bug]       vorner
+	Solved a 100% CPU usage problem after switching addresses in b10-auth
+	(and possibly, but unconfirmed, in b10-resolver). It was caused by
+	repeated reads/accepts on closed socket (the bug was in the code for a
+	long time, recent changes made it show).
+	(Trac #657, git e0863720a874d75923ea66adcfbf5b2948efb10a)
+
+  193.	[func]*		jreed
+	Listen on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses
+	for b10-auth. This returns to previous behavior prior to
+	change #184. Document the listen_on configuration in manual.
+	(Trac #649, git 65a77d8fde64d464c75917a1ab9b6b3f02640ca6)
+
+  192.	[func]*		jreed
+	Listen on standard domain port 53 for b10-auth and
+	b10-resolver.
+	(Trac #617, #618, git 137a6934a14cf0c5b5c065e910b8b364beb0973f)
+
+  191.	[func]		jinmei
+	Imported system test framework of BIND 9.  It can be run by
+	'make systest' at the top source directory.  Notes: currently it
+	doesn't work when built in a separate tree.  It also requires
+	perl, an inherited dependency from the original framework.
+	Also, mainly for the purpose of tests, a new option "--pid-file"
+	was added to BoB, with which the boss process will dump its PID
+	to the specified file.
+	(Trac #606, git 6ac000df85625f5921e8895a1aafff5e4be3ba9c)
+
+  190.	[func]		jelte
+	Resolver now sets random qids on outgoing queries using
+	the boost::mt19937 prng.
+	(Trac #583, git 5222b51a047d8f2352bc9f92fd022baf1681ed81)
+
+  189.	[bug]		jreed
+	Do not install the log message compiler.
+	(Trac #634, git eb6441aca464980d00e3ff827cbf4195c5a7afc5)
+
+  188.  [bug]		zhang likun
+	Make the rrset trust level ranking algorithm used by
+	isc::cache::MessageEntry::getRRsetTrustLevel() follow RFC2181
+	section 5.4.1.
+	(Trac #595 git 19197b5bc9f2955bd6a8ca48a2d04472ed696e81)
+
+  187.  [bug]		zhang likun
+	Fix the assert error in class isc::cache::RRsetCache by adding the
+	check for empty pointer and test case for it.
+	(Trac #638, git 54e61304131965c4a1d88c9151f8697dcbb3ce12)
+
+  186.  [bug]		jelte
+	b10-resolver could stop with an assertion failure on certain kinds
+	of messages (there was a problem in error message creation). This
+	fixes that.
+	(Trac #607, git 25a5f4ec755bc09b54410fcdff22691283147f32)
+
+  185.  [bug]        vorner
+	Tests use port from private range (53210), lowering chance of
+	a conflict with something else (eg. running bind 10).
+	(Trac #523, git 301da7d26d41e64d87c0cf72727f3347aa61fb40)
+
+  184.  [func]*      vorner
+	Listening address and port configuration of b10-auth is the same as
+	for b10-resolver now. That means, it is configured through bindctl
+	at runtime, in the Auth/listen_on list, not through command line
+	arguments.
+	(Trac #575, #576, git f06ce638877acf6f8e1994962bf2dbfbab029edf)
+
+  183.  [bug]      jerry
+	src/bin/xfrout: Enable parallel sessions between xfrout server and
+	muti-Auth. The session needs to be created only on the first time
+	or if an error occur.
+	(Trac #419, git 1d60afb59e9606f312caef352ecb2fe488c4e751)
+
+  182.	[func]		jinmei
+	Support cppcheck for static code check on C++ code.  If cppcheck
+	is available, 'make cppcheck' on the top source directory will run
+	the checker and should cleanly complete with an exit code of 0
+	(at least with cppcheck 1.47).
+	Note: the suppression list isn't included in the final
+	distributions.  It should be created by hand or retrieved from
+	the git repository.
+	(Trac #613, git b973f67520682b63ef38b1451d309be9f4f4b218)
+
+  181.  [func]      feng
+	Add stop interface into dns server, so we can stop each running
+	server individually. With it, user can reconfigure her running server
+	with different ip address or port.
+	(Trac #388, git 6df94e2db856c1adc020f658cc77da5edc967555)
+
+  180.  [build]     jreed
+	Fix custom DESTDIR for make install. Patch from Jan Engelhardt.
+	(Trac #629, git 5ac67ede03892a5eacf42ce3ace1e4e376164c9f)
+
+bind10-devel-20110224 released on February 24, 2011
+
+  179.  [func]      vorner
+	It is possible to start and stop resolver and authoritative
+	server without restart of the whole system. Change of the
+	configuration (Boss/start_auth and Boss/start_resolver) is
+	enough.
+	(Trac #565, git 0ac0b4602fa30852b0d86cc3c0b4730deb1a58fe)
+
+  178.  [func]      jelte
+	Resolver now makes (limited) use of the cache
+	(Trac #491, git 8b41f77f0099ddc7ca7d34d39ad8c39bb1a8363c)
+
+  177.  [func]      stephen
+	The upstream fetch code in asiolink is now protocol agnostic to
+	allow for the addition of fallback to TCP if a fetch response
+	indicates truncation.
+	(Trac #554, git 9739cbce2eaffc7e80640db58a8513295cf684de)
+
+  176.  [func]      zhang likun
+	src/lib/cache: Rename one interface: from lookupClosestRRset()
+	to lookupDeepestNS(), and remove one parameter of it.
+	(Trac #492, git ecbfb7cf929d62a018dd4cdc7a841add3d5a35ae)
+
+  175.	[bug]		jerry
+	src/bin/xfrout: Xfrout use the case-sensitive mode to compress
+	names in an AXFR massage.
+	(Trac #253, git 004e382616150f8a2362e94d3458b59bb2710182)
+
+  174.	[bug]*		jinmei
+	src/lib/dns: revised dnssectime functions so that they don't rely
+	on the time_t type (whose size varies on different systems, which
+	can lead to subtle bugs like some form of "year 2038 problem").
+	Also handled 32-bit wrap around issues more explicitly, with more
+	detailed tests.  The function API has been changed, but the effect
+	should be minimal because these functions are mostly private.
+	(Trac #61, git 09ece8cdd41c0f025e8b897b4883885d88d4ba5d)
+
+  173.	[bug]		jerry
+	python/isc/notify: A notify_out test fails without network
+	connectivity, encapsulate the socket behavior using a mock
+	socket class to fix it.
+	(Trac #346, git 319debfb957641f311102739a15059f8453c54ce)
+
+  172.  [func]      jelte
+	Improved the bindctl cli in various ways, mainly concerning
+	list and map item addressing, the correct display of actual values,
+	and internal help.
+	(Trac #384, git e5fb3bc1ed5f3c0aec6eb40a16c63f3d0fc6a7b2)
+
   171.  [func]      feng, jerry, jinmei, vorner
   171.  [func]      feng, jerry, jinmei, vorner
 	b10-auth, src/lib/datasrc: in memory data source now works as a
 	b10-auth, src/lib/datasrc: in memory data source now works as a
 	complete data source for authoritative DNS servers and b10-auth
 	complete data source for authoritative DNS servers and b10-auth
 	uses it.  It still misses major features, however, including
 	uses it.  It still misses major features, however, including
 	DNSSEC support and zone transfer.
 	DNSSEC support and zone transfer.
-	(Last trac #552, but many more,
+	(Last trac #553, but many more,
 	git 6f031a09a248e7684723c000f3e8cc981dcdb349)
 	git 6f031a09a248e7684723c000f3e8cc981dcdb349)
 
 
   170.	[bug]		jinmei
   170.	[bug]		jinmei

+ 13 - 1
Makefile.am

@@ -1,4 +1,4 @@
-SUBDIRS = doc src
+SUBDIRS = doc src tests
 USE_LCOV=@USE_LCOV@
 USE_LCOV=@USE_LCOV@
 LCOV=@LCOV@
 LCOV=@LCOV@
 GENHTML=@GENHTML@
 GENHTML=@GENHTML@
@@ -70,6 +70,18 @@ clean-coverage: clean-cpp-coverage clean-python-coverage
 
 
 report-coverage: report-cpp-coverage report-python-coverage
 report-coverage: report-cpp-coverage report-python-coverage
 
 
+# for static C++ check using cppcheck (when available)
+cppcheck:
+	cppcheck --enable=all --suppressions src/cppcheck-suppress.lst \
+		--quiet --error-exitcode=1 \
+		--template '{file}:{line}: check_fail: {message} ({severity},{id})' \
+		src
+
+# system tests
+systest:
+	cd tests/system; \
+	sh $(abs_srcdir)/tests/system/runall.sh
+
 #### include external sources in the distributed tarball:
 #### include external sources in the distributed tarball:
 EXTRA_DIST = ext/asio/README
 EXTRA_DIST = ext/asio/README
 EXTRA_DIST += ext/asio/asio/local/stream_protocol.hpp
 EXTRA_DIST += ext/asio/asio/local/stream_protocol.hpp

+ 6 - 8
README

@@ -14,12 +14,12 @@ five year plan are described here:
 	https://bind10.isc.org/wiki/Year2Milestones
 	https://bind10.isc.org/wiki/Year2Milestones
 
 
 This release includes the bind10 master process, b10-msgq message
 This release includes the bind10 master process, b10-msgq message
-bus, b10-auth authoritative DNS server (with SQLite3 backend),
+bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
-b10-resolver forwarding DNS server, b10-cmdctl remote control daemon,
+backends), b10-resolver forwarding DNS server, b10-cmdctl remote
-b10-cfgmgr configuration manager, b10-xfrin AXFR inbound service,
+control daemon, b10-cfgmgr configuration manager, b10-xfrin AXFR
-b10-xfrout outgoing AXFR service, b10-zonemgr secondary manager,
+inbound service, b10-xfrout outgoing AXFR service, b10-zonemgr
-b10-stats statistics collection and reporting daemon, and a new
+secondary manager, b10-stats statistics collection and reporting
-libdns++ library for C++ with a python wrapper.
+daemon, and a new libdns++ library for C++ with a python wrapper.
 
 
 Documentation is included and also available via the BIND 10
 Documentation is included and also available via the BIND 10
 website at http://bind10.isc.org/
 website at http://bind10.isc.org/
@@ -164,8 +164,6 @@ source tree:
 (Which will use the modules and configurations also from the source
 (Which will use the modules and configurations also from the source
 tree.)
 tree.)
 
 
-The server will listen on port 5300 for DNS requests.
-
 CONFIGURATION
 CONFIGURATION
 
 
 Commands can be given through the bindctl tool.
 Commands can be given through the bindctl tool.

+ 96 - 115
configure.ac

@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 # Process this file with autoconf to produce a configure script.
 
 
 AC_PREREQ([2.59])
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20110120, bind10-dev@isc.org)
+AC_INIT(bind10-devel, 20110224, bind10-dev@isc.org)
 AC_CONFIG_SRCDIR(README)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE
 AM_INIT_AUTOMAKE
 AC_CONFIG_HEADERS([config.h])
 AC_CONFIG_HEADERS([config.h])
@@ -14,18 +14,22 @@ AC_PROG_CXX
 #
 #
 # On FreeBSD (and probably some others), clang++ does not meet an autoconf
 # On FreeBSD (and probably some others), clang++ does not meet an autoconf
 # assumption in identifying libtool configuration regarding shared library:
 # assumption in identifying libtool configuration regarding shared library:
-# the configure script will execute "$CC -shared $CFLAGS -v -o" and expect
+# the configure script will execute "$CC -shared $CFLAGS/$CXXFLAGS -v" and
-# the output contains -Lxxx or -Ryyy.  This is the case for g++, but not for
+# expect the output contains -Lxxx or -Ryyy.  This is the case for g++, but
-# clang++, and, as a result, it will cause various errors in linking programs
+# not for clang++, and, as a result, it will cause various errors in linking
-# or running them with a shared object (such as some of our python scripts).
+# programs or running them with a shared object (such as some of our python
+# scripts).
 # To work around this problem we define a temporary variable
 # To work around this problem we define a temporary variable
 # "CXX_LIBTOOL_LDFLAGS".  It's expected to be defined as, e.g, "-L/usr/lib"
 # "CXX_LIBTOOL_LDFLAGS".  It's expected to be defined as, e.g, "-L/usr/lib"
 # to temporarily fake the output so that it will be compatible with that of
 # to temporarily fake the output so that it will be compatible with that of
 # g++.
 # g++.
 CFLAGS_SAVED=$CFLAGS
 CFLAGS_SAVED=$CFLAGS
+CXXFLAGS_SAVED=$CXXFLAGS
 CFLAGS="$CFLAGS $CXX_LIBTOOL_LDFLAGS"
 CFLAGS="$CFLAGS $CXX_LIBTOOL_LDFLAGS"
+CXXFLAGS="$CXXFLAGS $CXX_LIBTOOL_LDFLAGS"
 AC_PROG_LIBTOOL
 AC_PROG_LIBTOOL
 CFLAGS=$CFLAGS_SAVED
 CFLAGS=$CFLAGS_SAVED
+CXXFLAGS=$CXXFLAGS_SAVED
 
 
 # Use C++ language
 # Use C++ language
 AC_LANG([C++])
 AC_LANG([C++])
@@ -66,6 +70,11 @@ if test $enable_shared = no; then
 	AC_MSG_ERROR([BIND 10 requires shared libraries to be built])
 	AC_MSG_ERROR([BIND 10 requires shared libraries to be built])
 fi
 fi
 
 
+AC_ARG_ENABLE(boost-threads,
+AC_HELP_STRING([--enable-boost-threads],
+  [use boost threads. Currently this only means using its locks instead of dummy locks, in the cache and NSAS]),
+  use_boost_threads=$enableval, use_boost_threads=no)
+
 # allow configuring without setproctitle.
 # allow configuring without setproctitle.
 AC_ARG_ENABLE(setproctitle-check,
 AC_ARG_ENABLE(setproctitle-check,
 AC_HELP_STRING([--disable-setproctitle-check],
 AC_HELP_STRING([--disable-setproctitle-check],
@@ -193,8 +202,9 @@ if test "$setproctitle_check" = "yes" ; then
         AC_MSG_RESULT(ok)
         AC_MSG_RESULT(ok)
     else
     else
         AC_MSG_RESULT(missing)
         AC_MSG_RESULT(missing)
-        AC_MSG_ERROR([Missing setproctitle module. Either install it or provide --disable-setproctitle-check.
+        AC_MSG_WARN([Missing setproctitle python module.
-In that case we will continue, but naming of python processes will not work.])
+Use --disable-setproctitle-check to skip this check.
+In this case we will continue, but naming of python processes will not work.])
     fi
     fi
 fi
 fi
 
 
@@ -285,6 +295,7 @@ AC_SUBST(B10_CXXFLAGS)
 
 
 AC_SEARCH_LIBS(inet_pton, [nsl])
 AC_SEARCH_LIBS(inet_pton, [nsl])
 AC_SEARCH_LIBS(recvfrom, [socket])
 AC_SEARCH_LIBS(recvfrom, [socket])
+AC_SEARCH_LIBS(nanosleep, [rt])
 
 
 # Checks for header files.
 # Checks for header files.
 
 
@@ -363,57 +374,6 @@ if test "$lcov" != "no"; then
 fi
 fi
 AC_SUBST(USE_LCOV)
 AC_SUBST(USE_LCOV)
 
 
-# Configure log4cxx header and library path
-#
-# If explicitly specified, use it.
-
-AC_ARG_WITH([log4cxx],
-  AC_HELP_STRING([--with-log4cxx=PATH],
-    [specify directory where log4cxx is installed]),
-  [
-   log4cxx_include_path="${withval}/include";
-   log4cxx_library_path="${withval}/lib"
-  ])
-
-# This is an urgent fix to avoid regression due to log4cxx on some
-# platforms.  It should be cleaned up with a better fix.
-if test "X$with_log4cxx" != "Xno"; then
-
-# If not specified, try some common paths.  These default to
-# /usr/include and /usr/lib if not found
-
-if test -z "$with_log4cxx"; then
-	log4cxxdirs="/usr/local /usr/pkg /opt /opt/local"
-	for d in $log4cxxdirs
-	do
-		if test -d $d/include/log4cxx; then
-			log4cxx_include_path=$d/include
-			log4cxx_library_path=$d/lib
-			break
-		fi
-	done
-fi
-
-CPPFLAGS_SAVES="$CPPFLAGS"
-if test "${log4cxx_include_path}" ; then
-	LOG4CXX_INCLUDES="-I${log4cxx_include_path}"
-	CPPFLAGS="$CPPFLAGS $LOG4CXX_INCLUDES"
-fi
-AC_CHECK_HEADER([log4cxx/logger.h],, AC_MSG_ERROR([Missing log4cxx header files.]))
-CPPFLAGS="$CPPFLAGS_SAVES"
-AC_SUBST(LOG4CXX_INCLUDES)
-
-LOG4CXX_LDFLAGS="-llog4cxx";
-if test "${log4cxx_library_path}"; then
-    LOG4CXX_LDFLAGS="-L${log4cxx_library_path} -llog4cxx"
-fi
-AC_SUBST(LOG4CXX_LDFLAGS)
-
-# The following two lines are part of the urgent fix, and should be cleaned
-# up with a better fix.
-fi
-AM_CONDITIONAL(USE_LOG4CXX, test "X${with_log4cxx}" != "Xno")
-
 #
 #
 # Configure Boost header path
 # Configure Boost header path
 #
 #
@@ -443,62 +403,69 @@ AC_CHECK_HEADERS([boost/shared_ptr.hpp boost/foreach.hpp boost/interprocess/sync
 CPPFLAGS="$CPPFLAGS_SAVES"
 CPPFLAGS="$CPPFLAGS_SAVES"
 AC_SUBST(BOOST_INCLUDES)
 AC_SUBST(BOOST_INCLUDES)
 
 
-# Using boost::mutex can result in requiring libboost_thread with older
+
-# versions of Boost.  We'd like to avoid relying on a compiled Boost library
+if test "${use_boost_threads}" = "yes" ; then
-# whenever possible, so we need to check for it step by step.
+    AC_DEFINE([USE_BOOST_THREADS], [], [Use boost threads])
-#
+
-# NOTE: another fix of this problem is to simply require newer versions of
+    # Using boost::mutex can result in requiring libboost_thread with older
-# boost.  If we choose that solution we should simplify the following tricky
+    # versions of Boost.  We'd like to avoid relying on a compiled Boost library
-# checks accordingly and all Makefile.am's that refer to NEED_LIBBOOST_THREAD.
+    # whenever possible, so we need to check for it step by step.
-AC_MSG_CHECKING(for boost::mutex)
+    #
-CPPFLAGS_SAVES="$CPPFLAGS"
+    # NOTE: another fix of this problem is to simply require newer versions of
-LIBS_SAVES="$LIBS"
+    # boost.  If we choose that solution we should simplify the following tricky
-CPPFLAGS="$BOOST_INCLUDES $CPPFLAGS $MULTITHREADING_FLAG"
+    # checks accordingly and all Makefile.am's that refer to NEED_LIBBOOST_THREAD.
-need_libboost_thread=0
+    AC_MSG_CHECKING(for boost::mutex)
-need_sunpro_workaround=0
+    CPPFLAGS_SAVES="$CPPFLAGS"
-AC_TRY_LINK([
+    LIBS_SAVES="$LIBS"
-#include <boost/thread.hpp>
+    CPPFLAGS="$BOOST_INCLUDES $CPPFLAGS $MULTITHREADING_FLAG"
-],[
+    need_libboost_thread=0
-boost::mutex m;
+    need_sunpro_workaround=0
-],
+    AC_TRY_LINK([
-	[ AC_MSG_RESULT(yes (without libboost_thread)) ],
+    #include <boost/thread.hpp>
-
+    ],[
-    # there is one specific problem with SunStudio 5.10
+    boost::mutex m;
-    # where including boost/thread causes a compilation failure
+    ],
-    # There is a workaround in boost but it checks the version not being 5.10
+        [ AC_MSG_RESULT(yes (without libboost_thread)) ],
-    # This will probably be fixed in the future, in which case this
+        # there is one specific problem with SunStudio 5.10
-    # is only a temporary workaround
+        # where including boost/thread causes a compilation failure
-    [ AC_TRY_LINK([
+        # There is a workaround in boost but it checks the version not being 5.10
-#if defined(__SUNPRO_CC) && __SUNPRO_CC == 0x5100
+        # This will probably be fixed in the future, in which case this
-#undef __SUNPRO_CC
+        # is only a temporary workaround
-#define __SUNPRO_CC 0x5090
+        [ AC_TRY_LINK([
-#endif
+    #if defined(__SUNPRO_CC) && __SUNPRO_CC == 0x5100
-#include <boost/thread.hpp>
+    #undef __SUNPRO_CC
-],[
+    #define __SUNPRO_CC 0x5090
-boost::mutex m;
+    #endif
-],
+    #include <boost/thread.hpp>
-    [ AC_MSG_RESULT(yes (with SUNOS workaround))
+    ],[
-      need_sunpro_workaround=1 ],
+    boost::mutex m;
-    	[ LIBS=" $LIBS -lboost_thread"
+    ],
-	  AC_TRY_LINK([
+        [ AC_MSG_RESULT(yes (with SUNOS workaround))
-#include <boost/thread.hpp>
+          need_sunpro_workaround=1 ],
-],[
+            [ LIBS=" $LIBS -lboost_thread"
-boost::mutex m;
+          AC_TRY_LINK([
-],
+    #include <boost/thread.hpp>
-		  [ AC_MSG_RESULT(yes (with libboost_thread))
+    ],[
-		    need_libboost_thread=1 ],
+    boost::mutex m;
-		  [ AC_MSG_RESULT(no)
+    ],
-		    AC_MSG_ERROR([boost::mutex cannot be linked in this build environment.
+              [ AC_MSG_RESULT(yes (with libboost_thread))
-Perhaps you are using an older version of Boost that requires libboost_thread for the mutex support, which does not appear to be available.
+                need_libboost_thread=1 ],
-You may want to check the availability of the library or to upgrade Boost.])
+              [ AC_MSG_RESULT(no)
-   		  ])])])
+                AC_MSG_ERROR([boost::mutex cannot be linked in this build environment.
-CPPFLAGS="$CPPFLAGS_SAVES"
+    Perhaps you are using an older version of Boost that requires libboost_thread for the mutex support, which does not appear to be available.
-LIBS="$LIBS_SAVES"
+    You may want to check the availability of the library or to upgrade Boost.])
-AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test $need_libboost_thread = 1)
+              ])])])
-if test $need_sunpro_workaround = 1; then
+    CPPFLAGS="$CPPFLAGS_SAVES"
-    AC_DEFINE([NEED_SUNPRO_WORKAROUND], [], [Need boost sunstudio workaround])
+    LIBS="$LIBS_SAVES"
+    AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test $need_libboost_thread = 1)
+    if test $need_sunpro_workaround = 1; then
+        AC_DEFINE([NEED_SUNPRO_WORKAROUND], [], [Need boost sunstudio workaround])
+    fi
+else
+    AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
 fi
 fi
 
 
+
 #
 #
 # Check availability of gtest, which will be used for unit tests.
 # Check availability of gtest, which will be used for unit tests.
 #
 #
@@ -616,6 +583,12 @@ if test "X$ac_cv_have_devpoll" = "Xyes" -a "X$GXX" = "Xyes"; then
 	CPPFLAGS="$CPPFLAGS -DASIO_DISABLE_DEV_POLL=1"
 	CPPFLAGS="$CPPFLAGS -DASIO_DISABLE_DEV_POLL=1"
 fi
 fi
 
 
+#
+# Perl is optional; it is used only by some of the system test scripts.
+#
+AC_PATH_PROGS(PERL, perl5 perl)
+AC_SUBST(PERL)
+
 AC_ARG_ENABLE(man, [AC_HELP_STRING([--enable-man],
 AC_ARG_ENABLE(man, [AC_HELP_STRING([--enable-man],
   [regenerate man pages [default=no]])], enable_man=yes, enable_man=no)
   [regenerate man pages [default=no]])], enable_man=yes, enable_man=no)
 
 
@@ -714,6 +687,10 @@ AC_CONFIG_FILES([Makefile
                  src/lib/nsas/tests/Makefile
                  src/lib/nsas/tests/Makefile
                  src/lib/cache/Makefile
                  src/lib/cache/Makefile
                  src/lib/cache/tests/Makefile
                  src/lib/cache/tests/Makefile
+                 src/lib/server_common/Makefile
+                 src/lib/server_common/tests/Makefile
+                 tests/Makefile
+                 tests/system/Makefile
                ])
                ])
 AC_OUTPUT([doc/version.ent
 AC_OUTPUT([doc/version.ent
            src/bin/cfgmgr/b10-cfgmgr.py
            src/bin/cfgmgr/b10-cfgmgr.py
@@ -743,9 +720,10 @@ AC_OUTPUT([doc/version.ent
            src/bin/stats/tests/stats_test
            src/bin/stats/tests/stats_test
            src/bin/bind10/bind10.py
            src/bin/bind10/bind10.py
            src/bin/bind10/tests/bind10_test
            src/bin/bind10/tests/bind10_test
+           src/bin/bind10/tests/bind10_test.py
            src/bin/bind10/run_bind10.sh
            src/bin/bind10/run_bind10.sh
            src/bin/bindctl/run_bindctl.sh
            src/bin/bindctl/run_bindctl.sh
-           src/bin/bindctl/bindctl-source.py
+           src/bin/bindctl/bindctl_main.py
            src/bin/bindctl/tests/bindctl_test
            src/bin/bindctl/tests/bindctl_test
            src/bin/loadzone/run_loadzone.sh
            src/bin/loadzone/run_loadzone.sh
            src/bin/loadzone/tests/correct/correct_test.sh
            src/bin/loadzone/tests/correct/correct_test.sh
@@ -770,6 +748,10 @@ AC_OUTPUT([doc/version.ent
            src/lib/cc/session_config.h.pre
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
            src/lib/cc/tests/session_unittests_config.h
            src/lib/log/tests/run_time_init_test.sh
            src/lib/log/tests/run_time_init_test.sh
+           tests/system/conf.sh
+           tests/system/glue/setup.sh
+           tests/system/glue/nsx1/b10-config.db
+           tests/system/bindctl/nsx1/b10-config.db.template
           ], [
           ], [
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
@@ -794,6 +776,7 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/lib/dns/gen-rdatacode.py
            chmod +x src/lib/dns/gen-rdatacode.py
            chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
            chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
            chmod +x src/lib/log/tests/run_time_init_test.sh
            chmod +x src/lib/log/tests/run_time_init_test.sh
+           chmod +x tests/system/conf.sh
           ])
           ])
 AC_OUTPUT
 AC_OUTPUT
 
 
@@ -821,8 +804,6 @@ dnl includes too
                  ${PYTHON_LDFLAGS}
                  ${PYTHON_LDFLAGS}
                  ${PYTHON_LIB}
                  ${PYTHON_LIB}
   Boost:         ${BOOST_INCLUDES}
   Boost:         ${BOOST_INCLUDES}
-  log4cxx:       ${LOG4CXX_INCLUDES}
-                 ${LOG4CXX_LDFLAGS}
   SQLite:        $SQLITE_CFLAGS
   SQLite:        $SQLITE_CFLAGS
                  $SQLITE_LIBS
                  $SQLITE_LIBS
 
 

+ 1 - 1
doc/Doxyfile

@@ -568,7 +568,7 @@ WARN_LOGFILE           =
 # directories like "/usr/src/myproject". Separate the files or directories
 # directories like "/usr/src/myproject". Separate the files or directories
 # with spaces.
 # with spaces.
 
 
-INPUT                  = ../src/lib/cc ../src/lib/config ../src/lib/dns ../src/lib/exceptions ../src/lib/datasrc ../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log ../src/lib/asiolink/ ../src/lib/nsas ../src/lib/testutils ../src/lib/cache
+INPUT                  = ../src/lib/cc ../src/lib/config ../src/lib/dns ../src/lib/exceptions ../src/lib/datasrc ../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log ../src/lib/asiolink/ ../src/lib/nsas ../src/lib/testutils ../src/lib/cache ../src/lib/server_common/
 
 
 # This tag can be used to specify the character encoding of the source files
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is

File diff suppressed because it is too large
+ 44 - 38
doc/guide/bind10-guide.html


+ 28 - 38
doc/guide/bind10-guide.xml

@@ -52,7 +52,8 @@
     <note>
     <note>
       <para>
       <para>
         BIND 10 provides a EDNS0- and DNSSEC-capable
         BIND 10 provides a EDNS0- and DNSSEC-capable
-        authoritative DNS server and a forwarding DNS server.
+        authoritative DNS server and a caching recursive name server
+        which also provides forwarding.
       </para>
       </para>
     </note>
     </note>
 
 
@@ -79,9 +80,7 @@
       </para>
       </para>
 
 
       <note><para>
       <note><para>
-	For this development prototype release, the only supported
+	The authoritative server requires SQLite 3.3.9 or newer.
-	data source backend is SQLite3. The authoritative server
-	requires SQLite 3.3.9 or newer.
 	The <command>b10-xfrin</command>, <command>b10-xfrout</command>,
 	The <command>b10-xfrin</command>, <command>b10-xfrout</command>,
 	and <command>b10-zonemgr</command> modules require the
 	and <command>b10-zonemgr</command> modules require the
 	libpython3 library and the Python _sqlite3.so module.
 	libpython3 library and the Python _sqlite3.so module.
@@ -337,14 +336,6 @@ var/
         </simpara>
         </simpara>
       </note>
       </note>
 
 
-      <note>
-        <simpara>
-          The development prototype of the b10-auth server listens on
-          0.0.0.0 (all interfaces) port 5300. (This is not the standard
-          domain service port.)
-        </simpara>
-      </note>
-
       <para>
       <para>
         To quickly get started with BIND 10, follow these steps.
         To quickly get started with BIND 10, follow these steps.
       </para>
       </para>
@@ -398,7 +389,7 @@ var/
         <listitem>
         <listitem>
 
 
          <para>Test it; for example:
          <para>Test it; for example:
-            <screen>$ <userinput>dig @127.0.0.1 -p 5300 -c CH -t TXT authors.bind</userinput></screen>
+            <screen>$ <userinput>dig @127.0.0.1 -c CH -t TXT authors.bind</userinput></screen>
          </para>
          </para>
         </listitem>
         </listitem>
 
 
@@ -1045,11 +1036,6 @@ TODO
       process.
       process.
     </para>
     </para>
 
 
-    <note><simpara>
-      This development prototype release listens on all interfaces
-      and the non-standard port 5300.
-    </simpara></note>
-
     <section>
     <section>
       <title>Server Configurations</title>
       <title>Server Configurations</title>
 
 
@@ -1108,9 +1094,10 @@ This may be a temporary setting until then.
 
 
       <note><para>
       <note><para>
         For the development prototype release, <command>b10-auth</command>
         For the development prototype release, <command>b10-auth</command>
-        only supports the SQLite3 data source backend.
+        supports a SQLite3 data source backend and in-memory data source
+        backend.
         Upcoming versions will be able to use multiple different
         Upcoming versions will be able to use multiple different
-        data sources, such as MySQL, Berkeley DB, or in-memory DB.
+        data sources, such as MySQL and Berkeley DB.
       </para></note>
       </para></note>
 
 
 
 
@@ -1309,12 +1296,6 @@ what is XfroutClient xfr_client??
 -->
 -->
     </para>
     </para>
 
 
-    <note><simpara>
-      The current version only provides a forwarding DNS server.
-      It does not cache and does not iterate to find answers.
-      It simply forwards the query on to another full resolver.
-    </simpara></note>
-
     <para>
     <para>
       The main <command>bind10</command> process can be configured
       The main <command>bind10</command> process can be configured
       to select to run either the authoritative or resolver.
       to select to run either the authoritative or resolver.
@@ -1331,15 +1312,26 @@ what is XfroutClient xfr_client??
 
 
     </para>
     </para>
 
 
-<!-- TODO: -->
+    <para>
-    <note><simpara>
+       The master <command>bind10</command> will stop and start
-       In the current version, the master <command>bind10</command>
+       the desired services.
-       process must be stopped and restarted to start up the resolver.
+    </para>
-    </simpara></note>
 
 
     <para>
     <para>
-      Then the upstream address and port must be configured to
+      The resolver also needs to be configured to listen on an address
-      forward queries to, such as:
+      and port:
+
+      <screen>
+&gt; <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+&gt; <userinput>config commit</userinput>
+</screen>
+    </para>
+
+<!-- TODO: later the above will have some defaults -->
+
+    <para>
+      To enable forwarding, the upstream address and port must be
+      configured to forward queries to, such as:
 
 
       <screen>
       <screen>
 &gt; <userinput>config set Resolver/forward_addresses [{ "address": "<replaceable>192.168.1.1</replaceable>", "port": 53 }]</userinput>
 &gt; <userinput>config set Resolver/forward_addresses [{ "address": "<replaceable>192.168.1.1</replaceable>", "port": 53 }]</userinput>
@@ -1351,17 +1343,15 @@ what is XfroutClient xfr_client??
     </para>
     </para>
 
 
     <para>
     <para>
-      The resolver also needs to be configured to listen on an address
+      Normal iterative name service can be re-enabled by clearing the
-      and port:
+      forwarding address(es); for example:
 
 
       <screen>
       <screen>
-&gt; <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+&gt; <userinput>config set Resolver/forward_addresses []</userinput>
 &gt; <userinput>config commit</userinput>
 &gt; <userinput>config commit</userinput>
 </screen>
 </screen>
     </para>
     </para>
 
 
-<!-- TODO: later the above will have some defaults -->
-
 <!-- TODO: later try this
 <!-- TODO: later try this
 
 
 > config set Resolver/forward_addresses[0]/address "192.168.8.8"
 > config set Resolver/forward_addresses[0]/address "192.168.8.8"

+ 1 - 1
ext/asio/asio/detail/epoll_reactor.hpp

@@ -207,7 +207,7 @@ public:
   // Cancel all operations associated with the given descriptor. The
   // Cancel all operations associated with the given descriptor. The
   // handlers associated with the descriptor will be invoked with the
   // handlers associated with the descriptor will be invoked with the
   // operation_aborted error.
   // operation_aborted error.
-  void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data)
+  void cancel_ops(socket_type, per_descriptor_data& descriptor_data)
   {
   {
     mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
     mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
 
 

+ 1 - 1
ext/asio/asio/detail/kqueue_reactor.hpp

@@ -205,7 +205,7 @@ public:
   // Cancel all operations associated with the given descriptor. The
   // Cancel all operations associated with the given descriptor. The
   // handlers associated with the descriptor will be invoked with the
   // handlers associated with the descriptor will be invoked with the
   // operation_aborted error.
   // operation_aborted error.
-  void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data)
+  void cancel_ops(socket_type , per_descriptor_data& descriptor_data)
   {
   {
     mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
     mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
 
 

+ 1 - 1
ext/asio/asio/detail/null_thread.hpp

@@ -40,7 +40,7 @@ class null_thread
 public:
 public:
   // Constructor.
   // Constructor.
   template <typename Function>
   template <typename Function>
-  null_thread(Function f)
+  null_thread(Function )
   {
   {
     asio::system_error e(
     asio::system_error e(
         asio::error::operation_not_supported, "thread");
         asio::error::operation_not_supported, "thread");

+ 3 - 1
src/bin/auth/Makefile.am

@@ -50,11 +50,13 @@ b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
 b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 b10_auth_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_auth_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+b10_auth_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 b10_auth_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 b10_auth_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+b10_auth_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
 b10_auth_LDADD += $(SQLITE_LIBS)
 b10_auth_LDADD += $(SQLITE_LIBS)
 
 
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir
 # and can't use @datadir@ because doesn't expand default ${prefix}
 # and can't use @datadir@ because doesn't expand default ${prefix}
-b10_authdir = $(DESTDIR)$(pkgdatadir)
+b10_authdir = $(pkgdatadir)
 b10_auth_DATA = auth.spec
 b10_auth_DATA = auth.spec
 
 

+ 70 - 36
src/bin/auth/auth.spec.pre.in

@@ -12,51 +12,85 @@
         "item_type": "list",
         "item_type": "list",
         "item_optional": true,
         "item_optional": true,
         "item_default": [],
         "item_default": [],
-	"list_item_spec": {
+        "list_item_spec":
-          "item_name": "list_element",
+        { "item_name": "list_element",
           "item_type": "map",
           "item_type": "map",
           "item_optional": false,
           "item_optional": false,
           "item_default": {},
           "item_default": {},
-	  "map_item_spec": [
+          "map_item_spec": [
-	    { "item_name": "type",
+          { "item_name": "type",
-	      "item_type": "string",
+            "item_type": "string",
-	      "item_optional": false,
+            "item_optional": false,
-	      "item_default": ""
+            "item_default": ""
-	    },
+          },
-	    { "item_name": "class",
+          { "item_name": "class",
-	      "item_type": "string",
+            "item_type": "string",
-	      "item_optional": false,
+            "item_optional": false,
-	      "item_default": "IN"
+            "item_default": "IN"
-	    },
+          },
-	    { "item_name": "zones",
+          { "item_name": "zones",
-	      "item_type": "list",
+            "item_type": "list",
-	      "item_optional": false,
+            "item_optional": false,
-	      "item_default": [],
+            "item_default": [],
-	      "list_item_spec": {
+            "list_item_spec":
-	        "item_name": "list_element",
+            { "item_name": "list_element",
-	        "item_type": "map",
+              "item_type": "map",
-	        "item_optional": true,
+              "item_optional": true,
-	        "map_item_spec": [
+              "item_default": { "origin": "", "file": "" },
-		  { "item_name": "origin",
+              "map_item_spec": [
-		    "item_type": "string",
+              { "item_name": "origin",
-		    "item_optional": false,
+                "item_type": "string",
-		    "item_default": ""
+                "item_optional": false,
-		  },
+                "item_default": ""
-		  { "item_name": "file",
+              },
-		    "item_type": "string",
+              { "item_name": "file",
-		    "item_optional": false,
+                "item_type": "string",
-		    "item_default": ""
+                "item_optional": false,
-		  }
+                "item_default": ""
-		]
+              }]
-	      }
+            }
-	    }
+          }]
-	  ]
         }
         }
       },
       },
       { "item_name": "statistics-interval",
       { "item_name": "statistics-interval",
         "item_type": "integer",
         "item_type": "integer",
         "item_optional": true,
         "item_optional": true,
         "item_default": 60
         "item_default": 60
+      },
+      {
+        "item_name": "listen_on",
+        "item_type": "list",
+        "item_optional": false,
+        "item_default": [
+          {
+            "address": "::",
+            "port": 53
+          },
+          {
+            "address": "0.0.0.0",
+            "port": 53
+          }
+        ],
+        "list_item_spec": {
+          "item_name": "address",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+            {
+              "item_name": "address",
+              "item_type": "string",
+              "item_optional": false,
+              "item_default": "::1"
+            },
+            {
+              "item_name": "port",
+              "item_type": "integer",
+              "item_optional": false,
+              "item_default": 53
+            }
+          ]
+        }
       }
       }
     ],
     ],
     "commands": [
     "commands": [

+ 19 - 0
src/bin/auth/auth_srv.cc

@@ -69,6 +69,7 @@ using namespace isc::data;
 using namespace isc::config;
 using namespace isc::config;
 using namespace isc::xfr;
 using namespace isc::xfr;
 using namespace asiolink;
 using namespace asiolink;
+using namespace isc::server_common::portconfig;
 
 
 class AuthSrvImpl {
 class AuthSrvImpl {
 private:
 private:
@@ -109,6 +110,9 @@ public:
 
 
     /// Query counters for statistics
     /// Query counters for statistics
     AuthCounters counters_;
     AuthCounters counters_;
+
+    /// Addresses we listen on
+    AddressList listen_addresses_;
 private:
 private:
     std::string db_file_;
     std::string db_file_;
 
 
@@ -750,3 +754,18 @@ uint64_t
 AuthSrv::getCounter(const AuthCounters::CounterType type) const {
 AuthSrv::getCounter(const AuthCounters::CounterType type) const {
     return (impl_->counters_.getCounter(type));
     return (impl_->counters_.getCounter(type));
 }
 }
+
+const AddressList&
+AuthSrv::getListenAddresses() const {
+    return (impl_->listen_addresses_);
+}
+
+void
+AuthSrv::setListenAddresses(const AddressList& addresses) {
+    installListenAddresses(addresses, impl_->listen_addresses_, *dnss_);
+}
+
+void
+AuthSrv::setDNSService(asiolink::DNSService& dnss) {
+    dnss_ = &dnss;
+}

+ 14 - 0
src/bin/auth/auth_srv.h

@@ -25,6 +25,7 @@
 #include <config/ccsession.h>
 #include <config/ccsession.h>
 
 
 #include <asiolink/asiolink.h>
 #include <asiolink/asiolink.h>
+#include <server_common/portconfig.h>
 #include <auth/statistics.h>
 #include <auth/statistics.h>
 
 
 namespace isc {
 namespace isc {
@@ -353,11 +354,24 @@ public:
     /// \return the value of the counter.
     /// \return the value of the counter.
     uint64_t getCounter(const AuthCounters::CounterType type) const;
     uint64_t getCounter(const AuthCounters::CounterType type) const;
 
 
+    /**
+     * \brief Set and get the addresses we listen on.
+     */
+    void setListenAddresses(const isc::server_common::portconfig::AddressList&
+                            addreses);
+    const isc::server_common::portconfig::AddressList& getListenAddresses()
+        const;
+
+    /// \brief Assign an ASIO DNS Service queue to this Auth object
+    void setDNSService(asiolink::DNSService& dnss);
+
+
 private:
 private:
     AuthSrvImpl* impl_;
     AuthSrvImpl* impl_;
     asiolink::SimpleCallback* checkin_;
     asiolink::SimpleCallback* checkin_;
     asiolink::DNSLookup* dns_lookup_;
     asiolink::DNSLookup* dns_lookup_;
     asiolink::DNSAnswer* dns_answer_;
     asiolink::DNSAnswer* dns_answer_;
+    asiolink::DNSService* dnss_;
 };
 };
 
 
 #endif // __AUTH_SRV_H
 #endif // __AUTH_SRV_H

+ 15 - 47
src/bin/auth/b10-auth.8

@@ -2,12 +2,12 @@
 .\"     Title: b10-auth
 .\"     Title: b10-auth
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: January 19, 2011
+.\"      Date: March 8, 2011
 .\"    Manual: BIND10
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"  Language: English
 .\"
 .\"
-.TH "B10\-AUTH" "8" "January 19, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" * set default formatting
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
 b10-auth \- Authoritative DNS server
 b10-auth \- Authoritative DNS server
 .SH "SYNOPSIS"
 .SH "SYNOPSIS"
 .HP \w'\fBb10\-auth\fR\ 'u
 .HP \w'\fBb10\-auth\fR\ 'u
-\fBb10\-auth\fR [\fB\-4\fR] [\fB\-6\fR] [\fB\-a\ \fR\fB\fIaddress\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fInumber\fR\fR] [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
+\fBb10\-auth\fR [\fB\-n\fR] [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
 .SH "DESCRIPTION"
 .SH "DESCRIPTION"
 .PP
 .PP
 The
 The
@@ -42,55 +42,11 @@ It receives its configurations from
 .PP
 .PP
 The arguments are as follows:
 The arguments are as follows:
 .PP
 .PP
-\fB\-4\fR
-.RS 4
-Enables IPv4 only mode\&. This switch may not be used with
-\fB\-6\fR
-nor
-\fB\-a\fR\&. By default, it listens on both IPv4 and IPv6 (if capable)\&.
-.RE
-.PP
-\fB\-6\fR
-.RS 4
-Enables IPv6 only mode\&. This switch may not be used with
-\fB\-4\fR
-nor
-\fB\-a\fR\&. By default, it listens on both IPv4 and IPv6 (if capable)\&.
-.RE
-.PP
-\fB\-a \fR\fB\fIaddress\fR\fR
-.RS 4
-The IPv4 or IPv6 address to listen on\&. This switch may not be used with
-\fB\-4\fR
-nor
-\fB\-6\fR\&. The default is to listen on all addresses\&. (This is a short term workaround\&. This argument may change\&.)
-.RE
-.PP
 \fB\-n\fR
 \fB\-n\fR
 .RS 4
 .RS 4
 Do not cache answers in memory\&. The default is to use the cache for faster responses\&. The cache keeps the most recent 30,000 answers (positive and negative) in memory for 30 seconds (instead of querying the data source, such as SQLite3 database, each time)\&.
 Do not cache answers in memory\&. The default is to use the cache for faster responses\&. The cache keeps the most recent 30,000 answers (positive and negative) in memory for 30 seconds (instead of querying the data source, such as SQLite3 database, each time)\&.
 .RE
 .RE
 .PP
 .PP
-\fB\-p \fR\fB\fInumber\fR\fR
-.RS 4
-The port number it listens on\&. The default is 5300\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-The Y1 prototype runs on all interfaces and on this nonstandard port\&.
-.sp .5v
-.RE
-.RE
-.PP
 \fB\-u \fR\fB\fIusername\fR\fR
 \fB\-u \fR\fB\fIusername\fR\fR
 .RS 4
 .RS 4
 The user name of the
 The user name of the
@@ -114,6 +70,18 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
 /usr/local/var/bind10\-devel/zone\&.sqlite3\&.
 /usr/local/var/bind10\-devel/zone\&.sqlite3\&.
 .PP
 .PP
 
 
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
 \fIdatasources\fR
 \fIdatasources\fR
 configures data sources\&. The list items include:
 configures data sources\&. The list items include:
 \fItype\fR
 \fItype\fR

+ 10 - 48
src/bin/auth/b10-auth.xml

@@ -20,7 +20,7 @@
 <refentry>
 <refentry>
 
 
   <refentryinfo>
   <refentryinfo>
-    <date>January 19, 2011</date>
+    <date>March 8, 2011</date>
   </refentryinfo>
   </refentryinfo>
 
 
   <refmeta>
   <refmeta>
@@ -44,11 +44,7 @@
   <refsynopsisdiv>
   <refsynopsisdiv>
     <cmdsynopsis>
     <cmdsynopsis>
       <command>b10-auth</command>
       <command>b10-auth</command>
-      <arg><option>-4</option></arg>
-      <arg><option>-6</option></arg>
-      <arg><option>-a <replaceable>address</replaceable></option></arg>
       <arg><option>-n</option></arg>
       <arg><option>-n</option></arg>
-      <arg><option>-p <replaceable>number</replaceable></option></arg>
       <arg><option>-u <replaceable>username</replaceable></option></arg>
       <arg><option>-u <replaceable>username</replaceable></option></arg>
       <arg><option>-v</option></arg>
       <arg><option>-v</option></arg>
     </cmdsynopsis>
     </cmdsynopsis>
@@ -85,39 +81,6 @@
 
 
     <variablelist>
     <variablelist>
       <varlistentry>
       <varlistentry>
-        <term><option>-4</option></term>
-        <listitem><para>
-          Enables IPv4 only mode.
-          This switch may not be used with <option>-6</option> nor
-          <option>-a</option>.
-          By default, it listens on both IPv4 and IPv6 (if capable).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry>
-        <term><option>-6</option></term>
-        <listitem><para>
-          Enables IPv6 only mode.
-          This switch may not be used with <option>-4</option> nor
-          <option>-a</option>.
-          By default, it listens on both IPv4 and IPv6 (if capable).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry>
-        <term><option>-a <replaceable>address</replaceable></option></term>
-
-        <listitem>
-          <para>The IPv4 or IPv6 address to listen on.
-            This switch may not be used with <option>-4</option> nor
-            <option>-6</option>.
-            The default is to listen on all addresses.
-            (This is a short term workaround. This argument may change.)   
-          </para>                      
-         </listitem>
-      </varlistentry>
-
-      <varlistentry>
         <term><option>-n</option></term>
         <term><option>-n</option></term>
         <listitem><para>
         <listitem><para>
           Do not cache answers in memory.
           Do not cache answers in memory.
@@ -130,16 +93,6 @@
       </varlistentry>
       </varlistentry>
 
 
       <varlistentry>
       <varlistentry>
-        <term><option>-p <replaceable>number</replaceable></option></term>
-        <listitem><para>
-          The port number it listens on.
-          The default is 5300.</para>
-	  <note><simpara>This prototype runs on all interfaces
-	  and on this nonstandard port.</simpara></note>
-        </listitem>
-      </varlistentry>
-
-      <varlistentry>
         <term><option>-u <replaceable>username</replaceable></option></term>
         <term><option>-u <replaceable>username</replaceable></option></term>
         <listitem>
         <listitem>
 	  <para>
 	  <para>
@@ -179,6 +132,15 @@
     </para>
     </para>
 
 
     <para>
     <para>
+      <varname>listen_on</varname> is a list of addresses and ports for
+      <command>b10-auth</command> to listen on.
+      The list items are the <varname>address</varname> string
+      and <varname>port</varname> number.
+      By default, <command>b10-auth</command> listens on port 53
+      on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+    </para>
+
+    <para>
       <varname>datasources</varname> configures data sources.
       <varname>datasources</varname> configures data sources.
       The list items include:
       The list items include:
       <varname>type</varname> to optionally choose the data source type
       <varname>type</varname> to optionally choose the data source type

+ 2 - 0
src/bin/auth/benchmarks/Makefile.am

@@ -21,5 +21,7 @@ query_bench_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
 query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
 query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
+query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
 query_bench_LDADD += $(SQLITE_LIBS)
 query_bench_LDADD += $(SQLITE_LIBS)

+ 1 - 1
src/bin/auth/benchmarks/query_bench.cc

@@ -77,7 +77,7 @@ protected:
         dummy_socket(IOSocket::getDummyUDPSocket()),
         dummy_socket(IOSocket::getDummyUDPSocket()),
         dummy_endpoint(IOEndpointPtr(IOEndpoint::create(IPPROTO_UDP,
         dummy_endpoint(IOEndpointPtr(IOEndpoint::create(IPPROTO_UDP,
                                                         IOAddress("192.0.2.1"),
                                                         IOAddress("192.0.2.1"),
-                                                        5300)))
+                                                        53210)))
     {}
     {}
 public:
 public:
     unsigned int run() {
     unsigned int run() {

+ 60 - 1
src/bin/auth/config.cc

@@ -32,11 +32,14 @@
 #include <auth/config.h>
 #include <auth/config.h>
 #include <auth/common.h>
 #include <auth/common.h>
 
 
+#include <server_common/portconfig.h>
+
 using namespace std;
 using namespace std;
 using boost::shared_ptr;
 using boost::shared_ptr;
 using namespace isc::dns;
 using namespace isc::dns;
 using namespace isc::data;
 using namespace isc::data;
 using namespace isc::datasrc;
 using namespace isc::datasrc;
+using namespace isc::server_common::portconfig;
 
 
 namespace {
 namespace {
 // Forward declaration
 // Forward declaration
@@ -210,6 +213,60 @@ public:
     }
     }
 };
 };
 
 
+/**
+ * \brief Configuration parser for listen_on.
+ *
+ * It parses and sets the listening addresses of the server.
+ *
+ * It acts in unusual way. Since actually binding (changing) the sockets
+ * is an operation that is expected to throw often, it shouldn't happen
+ * in commit. Thefere we do it in build. But if the config is not committed
+ * then, we would have it wrong. So we store the old addresses and if
+ * commit is not called before destruction of the object, we return the
+ * old addresses (which is the same kind of dangerous operation, but it is
+ * expected that if we just managed to bind some and had the old ones binded
+ * before, it should work).
+ *
+ * We might do something better in future (like open only the ports that are
+ * extra, put them in in commit and close the old ones), but that's left out
+ * for now.
+ */
+class ListenAddressConfig : public AuthConfigParser {
+public:
+    ListenAddressConfig(AuthSrv& server) :
+        server_(server)
+    { }
+    ~ ListenAddressConfig() {
+        if (rollbackAddresses_.get() != NULL) {
+            server_.setListenAddresses(*rollbackAddresses_);
+        }
+    }
+private:
+    typedef auto_ptr<AddressList> AddrListPtr;
+public:
+    virtual void build(ConstElementPtr config) {
+        AddressList newAddresses = parseAddresses(config, "listen_on");
+        AddrListPtr old(new AddressList(server_.getListenAddresses()));
+        server_.setListenAddresses(newAddresses);
+        /*
+         * Set the rollback addresses only after successful setting of the
+         * new addresses, so we don't try to rollback if the setup is
+         * unsuccessful (the above can easily throw).
+         */
+        rollbackAddresses_ = old;
+    }
+    virtual void commit() {
+        rollbackAddresses_.release();
+    }
+private:
+    AuthSrv& server_;
+    /**
+     * This is the old address list, if we expect to roll back. When we commit,
+     * this is set to NULL.
+     */
+    AddrListPtr rollbackAddresses_;
+};
+
 // This is a generalized version of create function that can create
 // This is a generalized version of create function that can create
 // an AuthConfigParser object for "internal" use.
 // an AuthConfigParser object for "internal" use.
 AuthConfigParser*
 AuthConfigParser*
@@ -226,6 +283,8 @@ createAuthConfigParser(AuthSrv& server, const std::string& config_id,
         return (new StatisticsIntervalConfig(server));
         return (new StatisticsIntervalConfig(server));
     } else if (internal && config_id == "datasources/memory") {
     } else if (internal && config_id == "datasources/memory") {
         return (new MemoryDatasourceConfig(server));
         return (new MemoryDatasourceConfig(server));
+    } else if (config_id == "listen_on") {
+        return (new ListenAddressConfig(server));
     } else if (config_id == "_commit_throw") {
     } else if (config_id == "_commit_throw") {
         // This is for testing purpose only and should not appear in the
         // This is for testing purpose only and should not appear in the
         // actual configuration syntax.  While this could crash the caller
         // actual configuration syntax.  While this could crash the caller
@@ -271,7 +330,7 @@ configureAuthServer(AuthSrv& server, ConstElementPtr config_set) {
             parsers.push_back(parser);
             parsers.push_back(parser);
         }
         }
     } catch (const AuthConfigError& ex) {
     } catch (const AuthConfigError& ex) {
-        throw ex;                  // simply rethrowing it
+        throw;                  // simply rethrowing it
     } catch (const isc::Exception& ex) {
     } catch (const isc::Exception& ex) {
         isc_throw(AuthConfigError, "Server configuration failed: " <<
         isc_throw(AuthConfigError, "Server configuration failed: " <<
                   ex.what());
                   ex.what());

+ 14 - 59
src/bin/auth/main.cc

@@ -42,6 +42,7 @@
 #include <auth/change_user.h>
 #include <auth/change_user.h>
 #include <auth/auth_srv.h>
 #include <auth/auth_srv.h>
 #include <asiolink/asiolink.h>
 #include <asiolink/asiolink.h>
+#include <log/dummylog.h>
 
 
 using namespace std;
 using namespace std;
 using namespace isc::data;
 using namespace isc::data;
@@ -55,9 +56,6 @@ namespace {
 
 
 bool verbose_mode = false;
 bool verbose_mode = false;
 
 
-// Default port current 5300 for testing purposes
-const char* DNSPORT = "5300";
-
 /* need global var for config/command handlers.
 /* need global var for config/command handlers.
  * todo: turn this around, and put handlers in the authserver
  * todo: turn this around, and put handlers in the authserver
  * class itself? */
  * class itself? */
@@ -76,13 +74,8 @@ my_command_handler(const string& command, ConstElementPtr args) {
 
 
 void
 void
 usage() {
 usage() {
-    cerr << "Usage:  b10-auth [-a address] [-p port] [-u user] [-4|-6] [-nv]"
+    cerr << "Usage:  b10-auth [-u user] [-nv]"
-         << endl;
-    cerr << "\t-a: specify the address to listen on (default: all) " << endl;
-    cerr << "\t-p: specify the port to listen on (default: " << DNSPORT << ")"
          << endl;
          << endl;
-    cerr << "\t-4: listen on all IPv4 addresses (incompatible with -a)" << endl;
-    cerr << "\t-6: listen on all IPv6 addresses (incompatible with -a)" << endl;
     cerr << "\t-n: do not cache answers in memory" << endl;
     cerr << "\t-n: do not cache answers in memory" << endl;
     cerr << "\t-u: change process UID to the specified user" << endl;
     cerr << "\t-u: change process UID to the specified user" << endl;
     cerr << "\t-v: verbose output" << endl;
     cerr << "\t-v: verbose output" << endl;
@@ -93,38 +86,20 @@ usage() {
 int
 int
 main(int argc, char* argv[]) {
 main(int argc, char* argv[]) {
     int ch;
     int ch;
-    const char* port = DNSPORT;
-    const char* address = NULL;
     const char* uid = NULL;
     const char* uid = NULL;
-    bool use_ipv4 = true, use_ipv6 = true, cache = true;
+    bool cache = true;
 
 
-    while ((ch = getopt(argc, argv, "46a:np:u:v")) != -1) {
+    while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
         switch (ch) {
         switch (ch) {
-        case '4':
-            // Note that -4 means "ipv4 only", we need to set "use_ipv6" here,
-            // not "use_ipv4".  We could use something like "ipv4_only", but
-            // we found the negatively named variable could confuse the code
-            // logic.
-            use_ipv6 = false;
-            break;
-        case '6':
-            // The same note as -4 applies.
-            use_ipv4 = false;
-            break;
         case 'n':
         case 'n':
             cache = false;
             cache = false;
             break;
             break;
-        case 'a':
-            address = optarg;
-            break;
-        case 'p':
-            port = optarg;
-            break;
         case 'u':
         case 'u':
             uid = optarg;
             uid = optarg;
             break;
             break;
         case 'v':
         case 'v':
             verbose_mode = true;
             verbose_mode = true;
+            isc::log::denabled = true;
             break;
             break;
         case '?':
         case '?':
         default:
         default:
@@ -136,18 +111,6 @@ main(int argc, char* argv[]) {
         usage();
         usage();
     }
     }
 
 
-    if (!use_ipv4 && !use_ipv6) {
-        cerr << "[b10-auth] Error: Cannot specify both -4 and -6 "
-             << "at the same time" << endl;
-        usage();
-    }
-
-    if ((!use_ipv4 || !use_ipv6) && address != NULL) {
-        cerr << "[b10-auth] Error: Cannot specify -4 or -6 "
-             << "at the same time as -a" << endl;
-        usage();
-    }
-
     int ret = 0;
     int ret = 0;
 
 
     // XXX: we should eventually pass io_service here.
     // XXX: we should eventually pass io_service here.
@@ -159,7 +122,13 @@ main(int argc, char* argv[]) {
     ModuleCCSession* config_session = NULL;
     ModuleCCSession* config_session = NULL;
     string xfrout_socket_path;
     string xfrout_socket_path;
     if (getenv("B10_FROM_BUILD") != NULL) {
     if (getenv("B10_FROM_BUILD") != NULL) {
-        xfrout_socket_path = string(getenv("B10_FROM_BUILD")) + "/auth_xfrout_conn";
+        if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) {
+            xfrout_socket_path = string("B10_FROM_SOURCE_LOCALSTATEDIR") +
+                "/auth_xfrout_conn";
+        } else {
+            xfrout_socket_path = string(getenv("B10_FROM_BUILD")) +
+                "/auth_xfrout_conn";
+        }
     } else {
     } else {
         xfrout_socket_path = UNIX_SOCKET_FILE;
         xfrout_socket_path = UNIX_SOCKET_FILE;
     }
     }
@@ -182,21 +151,8 @@ main(int argc, char* argv[]) {
         DNSLookup* lookup = auth_server->getDNSLookupProvider();
         DNSLookup* lookup = auth_server->getDNSLookupProvider();
         DNSAnswer* answer = auth_server->getDNSAnswerProvider();
         DNSAnswer* answer = auth_server->getDNSAnswerProvider();
 
 
-        DNSService* dns_service;
+        DNSService dns_service(io_service, checkin, lookup, answer);
-        if (address != NULL) {
+        auth_server->setDNSService(dns_service);
-            // XXX: we can only specify at most one explicit address.
-            // This also means the server cannot run in the dual address
-            // family mode if explicit addresses need to be specified.
-            // We don't bother to fix this problem, however.  The -a option
-            // is a short term workaround until we support dynamic listening
-            // port allocation.
-            dns_service = new DNSService(io_service,  *port, *address,
-                                         checkin, lookup, answer);
-        } else {
-            dns_service = new DNSService(io_service, *port, use_ipv4,
-                                         use_ipv6, checkin, lookup,
-                                         answer);
-        }
         cout << "[b10-auth] DNSServices created." << endl;
         cout << "[b10-auth] DNSServices created." << endl;
 
 
         cc_session = new Session(io_service.get_io_service());
         cc_session = new Session(io_service.get_io_service());
@@ -237,7 +193,6 @@ main(int argc, char* argv[]) {
         cout << "[b10-auth] Server started." << endl;
         cout << "[b10-auth] Server started." << endl;
         io_service.run();
         io_service.run();
 
 
-        delete dns_service;
     } catch (const std::exception& ex) {
     } catch (const std::exception& ex) {
         cerr << "[b10-auth] Server failed: " << ex.what() << endl;
         cerr << "[b10-auth] Server failed: " << ex.what() << endl;
         ret = 1;
         ret = 1;

+ 2 - 0
src/bin/auth/tests/Makefile.am

@@ -45,6 +45,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 endif
 endif
 
 
 noinst_PROGRAMS = $(TESTS)
 noinst_PROGRAMS = $(TESTS)

+ 18 - 2
src/bin/auth/tests/auth_srv_unittest.cc

@@ -26,6 +26,8 @@
 #include <dns/rrttl.h>
 #include <dns/rrttl.h>
 #include <dns/rdataclass.h>
 #include <dns/rdataclass.h>
 
 
+#include <server_common/portconfig.h>
+
 #include <datasrc/memory_datasrc.h>
 #include <datasrc/memory_datasrc.h>
 #include <auth/auth_srv.h>
 #include <auth/auth_srv.h>
 #include <auth/common.h>
 #include <auth/common.h>
@@ -34,6 +36,7 @@
 #include <dns/tests/unittest_util.h>
 #include <dns/tests/unittest_util.h>
 #include <testutils/dnsmessage_test.h>
 #include <testutils/dnsmessage_test.h>
 #include <testutils/srv_test.h>
 #include <testutils/srv_test.h>
+#include <testutils/portconfig.h>
 
 
 using namespace std;
 using namespace std;
 using namespace isc::cc;
 using namespace isc::cc;
@@ -43,6 +46,7 @@ using namespace isc::data;
 using namespace isc::xfr;
 using namespace isc::xfr;
 using namespace asiolink;
 using namespace asiolink;
 using namespace isc::testutils;
 using namespace isc::testutils;
+using namespace isc::server_common::portconfig;
 using isc::UnitTestUtil;
 using isc::UnitTestUtil;
 
 
 namespace {
 namespace {
@@ -55,7 +59,12 @@ const char* const BADCONFIG_TESTDB =
 
 
 class AuthSrvTest : public SrvTestBase {
 class AuthSrvTest : public SrvTestBase {
 protected:
 protected:
-    AuthSrvTest() : server(true, xfrout), rrclass(RRClass::IN()) {
+    AuthSrvTest() :
+        dnss_(ios_, NULL, NULL, NULL),
+        server(true, xfrout),
+        rrclass(RRClass::IN())
+    {
+        server.setDNSService(dnss_);
         server.setXfrinSession(&notify_session);
         server.setXfrinSession(&notify_session);
         server.setStatisticsSession(&statistics_session);
         server.setStatisticsSession(&statistics_session);
     }
     }
@@ -63,6 +72,8 @@ protected:
         server.processMessage(*io_message, parse_message, response_obuffer,
         server.processMessage(*io_message, parse_message, response_obuffer,
                               &dnsserv);
                               &dnsserv);
     }
     }
+    IOService ios_;
+    DNSService dnss_;
     MockSession statistics_session;
     MockSession statistics_session;
     MockXfroutClient xfrout;
     MockXfroutClient xfrout;
     AuthSrv server;
     AuthSrv server;
@@ -633,7 +644,7 @@ TEST_F(AuthSrvTest, queryCounterUnexpected) {
     // Modify the message.
     // Modify the message.
     delete io_message;
     delete io_message;
     endpoint = IOEndpoint::create(IPPROTO_UDP,
     endpoint = IOEndpoint::create(IPPROTO_UDP,
-                                  IOAddress(DEFAULT_REMOTE_ADDRESS), 5300);
+                                  IOAddress(DEFAULT_REMOTE_ADDRESS), 53210);
     io_message = new IOMessage(request_renderer.getData(),
     io_message = new IOMessage(request_renderer.getData(),
                                request_renderer.getLength(),
                                request_renderer.getLength(),
                                getDummyUnknownSocket(), *endpoint);
                                getDummyUnknownSocket(), *endpoint);
@@ -650,4 +661,9 @@ TEST_F(AuthSrvTest, stop) {
     // If/when the interval timer has finer granularity we'll probably add
     // If/when the interval timer has finer granularity we'll probably add
     // our own tests here, so we keep this empty test case.
     // our own tests here, so we keep this empty test case.
 }
 }
+
+TEST_F(AuthSrvTest, listenAddresses) {
+    isc::testutils::portconfig::listenAddresses(server);
+}
+
 }
 }

+ 21 - 1
src/bin/auth/tests/config_unittest.cc

@@ -30,6 +30,7 @@
 #include <auth/common.h>
 #include <auth/common.h>
 
 
 #include <testutils/mockups.h>
 #include <testutils/mockups.h>
+#include <testutils/portconfig.h>
 
 
 using namespace isc::dns;
 using namespace isc::dns;
 using namespace isc::data;
 using namespace isc::data;
@@ -39,7 +40,15 @@ using namespace asiolink;
 namespace {
 namespace {
 class AuthConfigTest : public ::testing::Test {
 class AuthConfigTest : public ::testing::Test {
 protected:
 protected:
-    AuthConfigTest() : rrclass(RRClass::IN()), server(true, xfrout) {}
+    AuthConfigTest() :
+        dnss_(ios_, NULL, NULL, NULL),
+        rrclass(RRClass::IN()),
+        server(true, xfrout)
+    {
+        server.setDNSService(dnss_);
+    }
+    IOService ios_;
+    DNSService dnss_;
     const RRClass rrclass;
     const RRClass rrclass;
     MockXfroutClient xfrout;
     MockXfroutClient xfrout;
     AuthSrv server;
     AuthSrv server;
@@ -112,6 +121,17 @@ TEST_F(AuthConfigTest, exceptionFromCommit) {
                  FatalError);
                  FatalError);
 }
 }
 
 
+// Test invalid address configs are rejected
+TEST_F(AuthConfigTest, invalidListenAddressConfig) {
+    // This currently passes simply because the config doesn't know listen_on
+    isc::testutils::portconfig::invalidListenAddressConfig(server);
+}
+
+// Try setting addresses trough config
+TEST_F(AuthConfigTest, listenAddressConfig) {
+    isc::testutils::portconfig::listenAddressConfig(server);
+}
+
 class MemoryDatasrcConfigTest : public AuthConfigTest {
 class MemoryDatasrcConfigTest : public AuthConfigTest {
 protected:
 protected:
     MemoryDatasrcConfigTest() :
     MemoryDatasrcConfigTest() :

+ 1 - 1
src/bin/auth/tests/query_unittest.cc

@@ -201,7 +201,7 @@ MockZone::find(const Name& name, const RRType& type,
         // If not found but we have a target, fill it with all RRsets here
         // If not found but we have a target, fill it with all RRsets here
         if (!found_domain->second.empty() && target != NULL) {
         if (!found_domain->second.empty() && target != NULL) {
             for (found_rrset = found_domain->second.begin();
             for (found_rrset = found_domain->second.begin();
-                 found_rrset != found_domain->second.end(); found_rrset++) {
+                 found_rrset != found_domain->second.end(); ++found_rrset) {
                 // Insert RRs under the domain name into target
                 // Insert RRs under the domain name into target
                 target->addRRset(
                 target->addRRset(
                     boost::const_pointer_cast<RRset>(found_rrset->second));
                     boost::const_pointer_cast<RRset>(found_rrset->second));

+ 1 - 2
src/bin/bind10/Makefile.am

@@ -5,7 +5,7 @@ CLEANFILES = bind10 bind10.pyc
 
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 
-bind10dir = $(DESTDIR)$(pkgdatadir)
+bind10dir = $(pkgdatadir)
 bind10_DATA = bob.spec
 bind10_DATA = bob.spec
 EXTRA_DIST = bob.spec
 EXTRA_DIST = bob.spec
 
 
@@ -19,7 +19,6 @@ bind10.8: bind10.xml
 
 
 endif
 endif
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 bind10: bind10.py
 bind10: bind10.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \

+ 4 - 33
src/bin/bind10/bind10.8

@@ -2,12 +2,12 @@
 .\"     Title: bind10
 .\"     Title: bind10
 .\"    Author: [see the "AUTHORS" section]
 .\"    Author: [see the "AUTHORS" section]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: July 29, 2010
+.\"      Date: February 22, 2011
 .\"    Manual: BIND10
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"  Language: English
 .\"
 .\"
-.TH "BIND10" "8" "July 29, 2010" "BIND10" "BIND10"
+.TH "BIND10" "8" "February 22, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" * set default formatting
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
 bind10 \- BIND 10 boss process
 bind10 \- BIND 10 boss process
 .SH "SYNOPSIS"
 .SH "SYNOPSIS"
 .HP \w'\fBbind10\fR\ 'u
 .HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-a\ \fR\fB\fIaddress\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fInumber\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-address\ \fR\fB\fIaddress\fR\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-port\ \fR\fB\fInumber\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-verbose\fR]
+\fBbind10\fR [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-verbose\fR]
 .SH "DESCRIPTION"
 .SH "DESCRIPTION"
 .PP
 .PP
 The
 The
@@ -32,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
 .PP
 .PP
 The arguments are as follows:
 The arguments are as follows:
 .PP
 .PP
-\fB\-a\fR \fIaddress\fR, \fB\-\-address\fR \fIaddress\fR
-.RS 4
-The IPv4 or IPv6 address for the
-\fBb10-auth\fR(8)
-daemon to listen on\&. The default is to listen on all addresses\&. (This is a short term workaround\&. This argument may change\&.)
-.RE
-.PP
 \fB\-m\fR \fIfile\fR, \fB\-\-msgq\-socket\-file\fR \fIfile\fR
 \fB\-m\fR \fIfile\fR, \fB\-\-msgq\-socket\-file\fR \fIfile\fR
 .RS 4
 .RS 4
 The UNIX domain socket file for the
 The UNIX domain socket file for the
@@ -54,28 +47,6 @@ Disables the hot\-spot caching used by the
 daemon\&.
 daemon\&.
 .RE
 .RE
 .PP
 .PP
-\fB\-p\fR \fInumber\fR, \fB\-\-port\fR \fInumber\fR
-.RS 4
-The port number for the
-\fBb10-auth\fR(8)
-daemon to listen on\&. The default is 5300\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-The Y1 prototype release uses a non\-default port for domain service\&.
-.sp .5v
-.RE
-.RE
-.PP
 \fB\-u\fR \fIuser\fR, \fB\-\-user\fR \fIname\fR
 \fB\-u\fR \fIuser\fR, \fB\-\-user\fR \fIname\fR
 .RS 4
 .RS 4
 The username for
 The username for
@@ -125,5 +96,5 @@ The
 daemon was initially designed by Shane Kerr of ISC\&.
 daemon was initially designed by Shane Kerr of ISC\&.
 .SH "COPYRIGHT"
 .SH "COPYRIGHT"
 .br
 .br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
 .br
 .br

+ 127 - 50
src/bin/bind10/bind10.py.in

@@ -72,7 +72,7 @@ isc.util.process.rename(sys.argv[0])
 # This is the version that gets displayed to the user.
 # This is the version that gets displayed to the user.
 # The VERSION string consists of the module name, the module version
 # The VERSION string consists of the module name, the module version
 # number, and the overall BIND 10 version number (set in configure.ac).
 # number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20101129 (BIND 10 @PACKAGE_VERSION@)"
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
 
 
 # This is for bind10.boottime of stats module
 # This is for bind10.boottime of stats module
 _BASETIME = time.gmtime()
 _BASETIME = time.gmtime()
@@ -194,8 +194,8 @@ class CChannelConnectError(Exception): pass
 class BoB:
 class BoB:
     """Boss of BIND class."""
     """Boss of BIND class."""
     
     
-    def __init__(self, msgq_socket_file=None, dns_port=5300, address=None,
+    def __init__(self, msgq_socket_file=None, nocache=False, verbose=False,
-                 nocache=False, verbose=False, setuid=None, username=None):
+    setuid=None, username=None):
         """
         """
             Initialize the Boss of BIND. This is a singleton (only one can run).
             Initialize the Boss of BIND. This is a singleton (only one can run).
         
         
@@ -203,29 +203,72 @@ class BoB:
             msgq process listens on.  If verbose is True, then the boss reports
             msgq process listens on.  If verbose is True, then the boss reports
             what it is doing.
             what it is doing.
         """
         """
-        self.address = address
-        self.dns_port = dns_port
         self.cc_session = None
         self.cc_session = None
         self.ccs = None
         self.ccs = None
         self.cfg_start_auth = True
         self.cfg_start_auth = True
         self.cfg_start_resolver = False
         self.cfg_start_resolver = False
+        self.started_auth_family = False
+        self.started_resolver_family = False
         self.curproc = None
         self.curproc = None
         self.dead_processes = {}
         self.dead_processes = {}
         self.msgq_socket_file = msgq_socket_file
         self.msgq_socket_file = msgq_socket_file
         self.nocache = nocache
         self.nocache = nocache
         self.processes = {}
         self.processes = {}
+        self.expected_shutdowns = {}
         self.runnable = False
         self.runnable = False
         self.uid = setuid
         self.uid = setuid
         self.username = username
         self.username = username
         self.verbose = verbose
         self.verbose = verbose
 
 
     def config_handler(self, new_config):
     def config_handler(self, new_config):
+        # If this is initial update, don't do anything now, leave it to startup
+        if not self.runnable:
+            return
+        # Now we declare few functions used only internally here. Besides the
+        # benefit of not polluting the name space, they are closures, so we
+        # don't need to pass some variables
+        def start_stop(name, started, start, stop):
+            if not'start_' + name in new_config:
+                return
+            if new_config['start_' + name]:
+                if not started:
+                    if self.uid is not None:
+                        sys.stderr.write("[bind10] Starting " + name + " as " +
+                            "a user, not root. This might fail.\n")
+                    start()
+            else:
+                stop()
+        # These four functions are passed to start_stop (smells like functional
+        # programming little bit)
+        def resolver_on():
+            self.start_resolver(self.c_channel_env)
+            self.started_resolver_family = True
+        def resolver_off():
+            self.stop_resolver()
+            self.started_resolver_family = False
+        def auth_on():
+            self.start_auth(self.c_channel_env)
+            self.start_xfrout(self.c_channel_env)
+            self.start_xfrin(self.c_channel_env)
+            self.start_zonemgr(self.c_channel_env)
+            self.started_auth_family = True
+        def auth_off():
+            self.stop_zonemgr()
+            self.stop_xfrin()
+            self.stop_xfrout()
+            self.stop_auth()
+            self.started_auth_family = False
+
+        # The real code of the config handler function follows here
         if self.verbose:
         if self.verbose:
             sys.stdout.write("[bind10] Handling new configuration: " +
             sys.stdout.write("[bind10] Handling new configuration: " +
                 str(new_config) + "\n")
                 str(new_config) + "\n")
+        start_stop('resolver', self.started_resolver_family, resolver_on,
+            resolver_off)
+        start_stop('auth', self.started_auth_family, auth_on, auth_off)
+
         answer = isc.config.ccsession.create_answer(0)
         answer = isc.config.ccsession.create_answer(0)
         return answer
         return answer
-        # TODO
 
 
     def command_handler(self, command, args):
     def command_handler(self, command, args):
         if self.verbose:
         if self.verbose:
@@ -314,8 +357,8 @@ class BoB:
             sys.stdout.write("\n")
             sys.stdout.write("\n")
 
 
     # The next few methods start the individual processes of BIND-10.  They
     # The next few methods start the individual processes of BIND-10.  They
-    # are called via start_all_process().  If any fail, an exception is raised
+    # are called via start_all_processes().  If any fail, an exception is
-    # which is caught by the caller of start_all_processes(); this kills
+    # raised which is caught by the caller of start_all_processes(); this kills
     # processes started up to that point before terminating the program.
     # processes started up to that point before terminating the program.
 
 
     def start_msgq(self, c_channel_env):
     def start_msgq(self, c_channel_env):
@@ -417,9 +460,6 @@ class BoB:
             Start the Authoritative server
             Start the Authoritative server
         """
         """
         authargs = ['b10-auth']
         authargs = ['b10-auth']
-        authargs += ['-p', str(self.dns_port)]
-        if self.address:
-            authargs += ['-a', str(self.address)]
         if self.nocache:
         if self.nocache:
             authargs += ['-n']
             authargs += ['-n']
         if self.uid:
         if self.uid:
@@ -428,8 +468,7 @@ class BoB:
             authargs += ['-v']
             authargs += ['-v']
 
 
         # ... and start
         # ... and start
-        self.start_process("b10-auth", authargs, c_channel_env,
+        self.start_process("b10-auth", authargs, c_channel_env)
-            self.dns_port, self.address)
 
 
     def start_resolver(self, c_channel_env):
     def start_resolver(self, c_channel_env):
         """
         """
@@ -464,11 +503,12 @@ class BoB:
         # XXX: we hardcode port 8080
         # XXX: we hardcode port 8080
         self.start_simple("b10-cmdctl", c_channel_env, 8080)
         self.start_simple("b10-cmdctl", c_channel_env, 8080)
 
 
-    def start_all_processes(self, c_channel_env):
+    def start_all_processes(self):
         """
         """
             Starts up all the processes.  Any exception generated during the
             Starts up all the processes.  Any exception generated during the
             starting of the processes is handled by the caller.
             starting of the processes is handled by the caller.
         """
         """
+        c_channel_env = self.c_channel_env
         self.start_msgq(c_channel_env)
         self.start_msgq(c_channel_env)
         self.start_cfgmgr(c_channel_env)
         self.start_cfgmgr(c_channel_env)
         self.start_ccsession(c_channel_env)
         self.start_ccsession(c_channel_env)
@@ -485,6 +525,7 @@ class BoB:
         # ... and resolver (if selected):
         # ... and resolver (if selected):
         if self.cfg_start_resolver:
         if self.cfg_start_resolver:
             self.start_resolver(c_channel_env)
             self.start_resolver(c_channel_env)
+            self.started_resolver_family = True
 
 
         # Everything after the main components can run as non-root.
         # Everything after the main components can run as non-root.
         # TODO: this is only temporary - once the privileged socket creator is
         # TODO: this is only temporary - once the privileged socket creator is
@@ -498,6 +539,7 @@ class BoB:
             self.start_xfrout(c_channel_env)
             self.start_xfrout(c_channel_env)
             self.start_xfrin(c_channel_env)
             self.start_xfrin(c_channel_env)
             self.start_zonemgr(c_channel_env)
             self.start_zonemgr(c_channel_env)
+            self.started_auth_family = True
 
 
         # ... and finally start the remaining processes
         # ... and finally start the remaining processes
         self.start_stats(c_channel_env)
         self.start_stats(c_channel_env)
@@ -528,7 +570,8 @@ class BoB:
         # Start all processes.  If any one fails to start, kill all started
         # Start all processes.  If any one fails to start, kill all started
         # processes and exit with an error indication.
         # processes and exit with an error indication.
         try:
         try:
-            self.start_all_processes(c_channel_env)
+            self.c_channel_env = c_channel_env
+            self.start_all_processes()
         except Exception as e:
         except Exception as e:
             self.kill_started_processes()
             self.kill_started_processes()
             return "Unable to start " + self.curproc + ": " + str(e)
             return "Unable to start " + self.curproc + ": " + str(e)
@@ -550,10 +593,35 @@ class BoB:
         self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
         self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
         self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
         self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
 
 
-    def stop_process(self, process):
+    def stop_process(self, process, recipient):
-        """Stop the given process, friendly-like."""
+        """
-        # XXX nothing yet
+        Stop the given process, friendly-like. The process is the name it has
-        pass
+        (in logs, etc), the recipient is the address on msgq.
+        """
+        if self.verbose:
+            sys.stdout.write("[bind10] Asking %s to terminate\n" % process)
+        # TODO: Some timeout to solve processes that don't want to die would
+        # help. We can even store it in the dict, it is used only as a set
+        self.expected_shutdowns[process] = 1
+        # Ask the process to die willingly
+        self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+            recipient)
+
+    # Series of stop_process wrappers
+    def stop_resolver(self):
+        self.stop_process('b10-resolver', 'Resolver')
+
+    def stop_auth(self):
+        self.stop_process('b10-auth', 'Auth')
+
+    def stop_xfrout(self):
+        self.stop_process('b10-xfrout', 'Xfrout')
+
+    def stop_xfrin(self):
+        self.stop_process('b10-xfrin', 'Xfrin')
+
+    def stop_zonemgr(self):
+        self.stop_process('b10-zonemgr', 'Zonemgr')
 
 
     def shutdown(self):
     def shutdown(self):
         """Stop the BoB instance."""
         """Stop the BoB instance."""
@@ -659,6 +727,10 @@ class BoB:
         still_dead = {}
         still_dead = {}
         now = time.time()
         now = time.time()
         for proc_info in self.dead_processes.values():
         for proc_info in self.dead_processes.values():
+            if proc_info.name in self.expected_shutdowns:
+                # We don't restart, we wanted it to die
+                del self.expected_shutdowns[proc_info.name]
+                continue
             restart_time = proc_info.restart_schedule.get_restart_time(now)
             restart_time = proc_info.restart_schedule.get_restart_time(now)
             if restart_time > now:
             if restart_time > now:
                 if (next_restart is None) or (next_restart > restart_time):
                 if (next_restart is None) or (next_restart > restart_time):
@@ -709,32 +781,39 @@ def fatal_signal(signal_number, stack_frame):
     signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     boss_of_bind.runnable = False
     boss_of_bind.runnable = False
 
 
-def check_port(option, opt_str, value, parser):
-    """Function to insure that the port we are passed is actually 
-    a valid port number. Used by OptionParser() on startup."""
-    try:
-        if opt_str in ['-p', '--port']:
-            parser.values.dns_port = isc.net.parse.port_parse(value)
-        else:
-            raise OptionValueError("Unknown option " + opt_str)
-    except ValueError as e:
-        raise OptionValueError(str(e))
-
-def check_addr(option, opt_str, value, parser):
-    """Function to insure that the address we are passed is actually 
-    a valid address. Used by OptionParser() on startup."""
-    try:
-        if opt_str in ['-a', '--address']:
-            parser.values.address = isc.net.parse.addr_parse(value)
-        else:
-            raise OptionValueError("Unknown option " + opt_str)
-    except ValueError:
-        raise OptionValueError("%s requires a valid IPv4 or IPv6 address" % opt_str)
-
 def process_rename(option, opt_str, value, parser):
 def process_rename(option, opt_str, value, parser):
     """Function that renames the process if it is requested by a option."""
     """Function that renames the process if it is requested by a option."""
     isc.util.process.rename(value)
     isc.util.process.rename(value)
 
 
+def dump_pid(pid_file):
+    """
+    Dump the PID of the current process to the specified file.  If the given
+    file is None this function does nothing.  If the file already exists,
+    the existing content will be removed.  If a system error happens in
+    creating or writing to the file, the corresponding exception will be
+    propagated to the caller.
+    """
+    if pid_file is None:
+        return
+    f = open(pid_file, "w")
+    f.write('%d\n' % os.getpid())
+    f.close()
+
+def unlink_pid_file(pid_file):
+    """
+    Remove the given file, which is basically expected to be the PID file
+    created by dump_pid().  The specified may or may not exist; if it
+    doesn't this function does nothing.  Other system level errors in removing
+    the file will be propagated as the corresponding exception.
+    """
+    if pid_file is None:
+        return
+    try:
+        os.unlink(pid_file)
+    except OSError as error:
+        if error.errno is not errno.ENOENT:
+            raise
+
 def main():
 def main():
     global options
     global options
     global boss_of_bind
     global boss_of_bind
@@ -743,17 +822,11 @@ def main():
 
 
     # Parse any command-line options.
     # Parse any command-line options.
     parser = OptionParser(version=VERSION)
     parser = OptionParser(version=VERSION)
-    parser.add_option("-a", "--address", dest="address", type="string",
-                      action="callback", callback=check_addr, default=None,
-                      help="address the DNS server will use (default: listen on all addresses)")
     parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
     parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
                       type="string", default=None,
                       type="string", default=None,
                       help="UNIX domain socket file the b10-msgq daemon will use")
                       help="UNIX domain socket file the b10-msgq daemon will use")
     parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
     parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
                       default=False, help="disable hot-spot cache in authoritative DNS server")
                       default=False, help="disable hot-spot cache in authoritative DNS server")
-    parser.add_option("-p", "--port", dest="dns_port", type="int",
-                      action="callback", callback=check_port, default=5300,
-                      help="port the DNS server will use (default 5300)")
     parser.add_option("-u", "--user", dest="user", type="string", default=None,
     parser.add_option("-u", "--user", dest="user", type="string", default=None,
                       help="Change user after startup (must run as root)")
                       help="Change user after startup (must run as root)")
     parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
     parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
@@ -761,6 +834,9 @@ def main():
     parser.add_option("--pretty-name", type="string", action="callback",
     parser.add_option("--pretty-name", type="string", action="callback",
                       callback=process_rename,
                       callback=process_rename,
                       help="Set the process name (displayed in ps, top, ...)")
                       help="Set the process name (displayed in ps, top, ...)")
+    parser.add_option("--pid-file", dest="pid_file", type="string",
+                      default=None,
+                      help="file to dump the PID of the BIND 10 process")
     (options, args) = parser.parse_args()
     (options, args) = parser.parse_args()
     if args:
     if args:
         parser.print_help()
         parser.print_help()
@@ -814,14 +890,14 @@ def main():
     signal.signal(signal.SIGPIPE, signal.SIG_IGN)
     signal.signal(signal.SIGPIPE, signal.SIG_IGN)
 
 
     # Go bob!
     # Go bob!
-    boss_of_bind = BoB(options.msgq_socket_file, options.dns_port,
+    boss_of_bind = BoB(options.msgq_socket_file, options.nocache,
-                       options.address, options.nocache, options.verbose,
+                       options.verbose, setuid, username)
-                       setuid, username)
     startup_result = boss_of_bind.startup()
     startup_result = boss_of_bind.startup()
     if startup_result:
     if startup_result:
         sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
         sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
         sys.exit(1)
         sys.exit(1)
     sys.stdout.write("[bind10] BIND 10 started\n")
     sys.stdout.write("[bind10] BIND 10 started\n")
+    dump_pid(options.pid_file)
 
 
     # send "bind10.boot_time" to b10-stats
     # send "bind10.boot_time" to b10-stats
     time.sleep(1) # wait a second
     time.sleep(1) # wait a second
@@ -875,6 +951,7 @@ def main():
     signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     boss_of_bind.shutdown()
     boss_of_bind.shutdown()
     sys.stdout.write("[bind10] BIND 10 exiting\n");
     sys.stdout.write("[bind10] BIND 10 exiting\n");
+    unlink_pid_file(options.pid_file)
     sys.exit(0)
     sys.exit(0)
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 8 - 35
src/bin/bind10/bind10.xml

@@ -20,7 +20,7 @@
 <refentry>
 <refentry>
 
 
   <refentryinfo>
   <refentryinfo>
-    <date>July 29, 2010</date>
+    <date>February 22, 2011</date>
   </refentryinfo>
   </refentryinfo>
 
 
   <refmeta>
   <refmeta>
@@ -36,24 +36,20 @@
 
 
   <docinfo>
   <docinfo>
     <copyright>
     <copyright>
-      <year>2010</year>
+      <year>2011</year>
       <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
       <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
     </copyright>
     </copyright>
   </docinfo>
   </docinfo>
 
 
   <refsynopsisdiv>
   <refsynopsisdiv>
     <cmdsynopsis>
     <cmdsynopsis>
-      <command>bind10</command>    
+      <command>bind10</command>
-      <arg><option>-a <replaceable>address</replaceable></option></arg>
       <arg><option>-m <replaceable>file</replaceable></option></arg>
       <arg><option>-m <replaceable>file</replaceable></option></arg>
       <arg><option>-n</option></arg>
       <arg><option>-n</option></arg>
-      <arg><option>-p <replaceable>number</replaceable></option></arg>
       <arg><option>-u <replaceable>user</replaceable></option></arg>
       <arg><option>-u <replaceable>user</replaceable></option></arg>
       <arg><option>-v</option></arg>
       <arg><option>-v</option></arg>
-      <arg><option>--address <replaceable>address</replaceable></option></arg>
       <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
       <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
       <arg><option>--no-cache</option></arg>
       <arg><option>--no-cache</option></arg>
-      <arg><option>--port <replaceable>number</replaceable></option></arg>
       <arg><option>--user <replaceable>user</replaceable></option></arg>
       <arg><option>--user <replaceable>user</replaceable></option></arg>
       <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
       <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
       <arg><option>--verbose</option></arg>
       <arg><option>--verbose</option></arg>
@@ -86,19 +82,6 @@
     <variablelist>
     <variablelist>
 
 
       <varlistentry>
       <varlistentry>
-        <term><option>-a</option> <replaceable>address</replaceable>, <option>--address</option> <replaceable>address</replaceable></term>
-
-        <listitem>
-	  <para>The IPv4 or IPv6 address for the
-	    <citerefentry><refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum></citerefentry>
-            daemon to listen on.
-            The default is to listen on all addresses. 
-            (This is a short term workaround. This argument may change.)
-          </para>
-         </listitem>
-      </varlistentry>
-
-      <varlistentry>
         <term><option>-m</option> <replaceable>file</replaceable>,
         <term><option>-m</option> <replaceable>file</replaceable>,
            <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
            <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
 
 
@@ -123,20 +106,6 @@
       </varlistentry>
       </varlistentry>
 
 
       <varlistentry>
       <varlistentry>
-        <term><option>-p</option> <replaceable>number</replaceable>, <option>--port</option> <replaceable>number</replaceable></term>
-
-        <listitem>
-          <para>The port number for the
-	    <citerefentry><refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum></citerefentry>
-            daemon to listen on.
-            The default is 5300.</para>
-<!-- TODO: -->
-	    <note><simpara>This prototype release uses a non-default
-	    port for domain service.</simpara></note>
-         </listitem>
-      </varlistentry>
-
-      <varlistentry>
         <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
         <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
 
 
         <listitem>
         <listitem>
@@ -155,7 +124,11 @@
           <para>The name this process should have in tools like
           <para>The name this process should have in tools like
           <command>ps</command> or <command>top</command>. This
           <command>ps</command> or <command>top</command>. This
           is handy if you have multiple versions/installations
           is handy if you have multiple versions/installations
-          of <command>bind10</command>.</para>
+          of <command>bind10</command>.
+<!-- TODO: only supported with setproctitle feature
+The default is the basename of ARG 0.
+-->
+</para>
         </listitem>
         </listitem>
       </varlistentry>
       </varlistentry>
 
 

+ 0 - 315
src/bin/bind10/tests/bind10_test.py

@@ -1,315 +0,0 @@
-from bind10 import ProcessInfo, BoB
-
-# XXX: environment tests are currently disabled, due to the preprocessor
-#      setup that we have now complicating the environment
-
-import unittest
-import sys
-import os
-import signal
-import socket
-from isc.net.addr import IPAddr
-
-class TestProcessInfo(unittest.TestCase):
-    def setUp(self):
-        # redirect stdout to a pipe so we can check that our
-        # process spawning is doing the right thing with stdout
-        self.old_stdout = os.dup(sys.stdout.fileno())
-        self.pipes = os.pipe()
-        os.dup2(self.pipes[1], sys.stdout.fileno())
-        os.close(self.pipes[1])
-        # note that we use dup2() to restore the original stdout
-        # to the main program ASAP in each test... this prevents
-        # hangs reading from the child process (as the pipe is only
-        # open in the child), and also insures nice pretty output
-
-    def tearDown(self):
-        # clean up our stdout munging
-        os.dup2(self.old_stdout, sys.stdout.fileno())
-        os.close(self.pipes[0])
-
-    def test_init(self):
-        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
-        os.dup2(self.old_stdout, sys.stdout.fileno())
-        self.assertEqual(pi.name, 'Test Process')
-        self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
-        self.assertEqual(pi.dev_null_stdout, False)
-        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
-        self.assertNotEqual(pi.process, None)
-        self.assertTrue(type(pi.pid) is int)
-
-#    def test_setting_env(self):
-#        pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
-#        os.dup2(self.old_stdout, sys.stdout.fileno())
-#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
-#                                   'FOO': 'BAR' })
-
-    def test_setting_null_stdout(self):
-        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ], 
-                         dev_null_stdout=True)
-        os.dup2(self.old_stdout, sys.stdout.fileno())
-        self.assertEqual(pi.dev_null_stdout, True)
-        self.assertEqual(os.read(self.pipes[0], 100), b"")
-
-    def test_respawn(self):
-        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
-        # wait for old process to work...
-        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
-        # respawn it
-        old_pid = pi.pid
-        pi.respawn()
-        os.dup2(self.old_stdout, sys.stdout.fileno())
-        # make sure the new one started properly
-        self.assertEqual(pi.name, 'Test Process')
-        self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
-        self.assertEqual(pi.dev_null_stdout, False)
-        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
-        self.assertNotEqual(pi.process, None)
-        self.assertTrue(type(pi.pid) is int)
-        self.assertNotEqual(pi.pid, old_pid)
-
-class TestBoB(unittest.TestCase):
-    def test_init(self):
-        bob = BoB()
-        self.assertEqual(bob.verbose, False)
-        self.assertEqual(bob.msgq_socket_file, None)
-        self.assertEqual(bob.dns_port, 5300)
-        self.assertEqual(bob.address, None)
-        self.assertEqual(bob.cc_session, None)
-        self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
-        self.assertEqual(bob.runnable, False)
-        self.assertEqual(bob.uid, None)
-        self.assertEqual(bob.username, None)
-        self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-    def test_init_alternate_socket(self):
-        bob = BoB("alt_socket_file")
-        self.assertEqual(bob.verbose, False)
-        self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
-        self.assertEqual(bob.address, None)
-        self.assertEqual(bob.dns_port, 5300)
-        self.assertEqual(bob.cc_session, None)
-        self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
-        self.assertEqual(bob.runnable, False)
-        self.assertEqual(bob.uid, None)
-        self.assertEqual(bob.username, None)
-        self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-    def test_init_alternate_dns_port(self):
-        bob = BoB(None, 9999)
-        self.assertEqual(bob.verbose, False)
-        self.assertEqual(bob.msgq_socket_file, None)
-        self.assertEqual(bob.dns_port, 9999)
-        self.assertEqual(bob.address, None)
-        self.assertEqual(bob.cc_session, None)
-        self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
-        self.assertEqual(bob.runnable, False)
-        self.assertEqual(bob.uid, None)
-        self.assertEqual(bob.username, None)
-        self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-    def test_init_alternate_address(self):
-        bob = BoB(None, 1234, IPAddr('127.127.127.127'))
-        self.assertEqual(bob.verbose, False)
-        self.assertEqual(bob.msgq_socket_file, None)
-        self.assertEqual(bob.dns_port, 1234)
-        self.assertEqual(bob.address.addr, socket.inet_aton('127.127.127.127'))
-        self.assertEqual(bob.cc_session, None)
-        self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
-        self.assertEqual(bob.runnable, False)
-        self.assertEqual(bob.uid, None)
-        self.assertEqual(bob.username, None)
-        self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-# Class for testing the Bob.start_all_processes() method call.
-#
-# Although testing that external processes start is outside the scope
-# of the unit test, by overriding the process start methods we can check
-# that the right processes are started depending on the configuration
-# options.
-class StartAllProcessesBob(BoB):
-    def __init__(self):
-        BoB.__init__(self)
-
-# Set flags as to which of the overridden methods has been run.
-        self.msgq = False
-        self.cfgmgr = False
-        self.ccsession = False
-        self.auth = False
-        self.resolver = False
-        self.xfrout = False
-        self.xfrin = False
-        self.zonemgr = False
-        self.stats = False
-        self.cmdctl = False
-
-    def read_bind10_config(self):
-        # Configuration options are set directly
-        pass
-
-    def start_msgq(self, c_channel_env):
-        self.msgq = True
-
-    def start_cfgmgr(self, c_channel_env):
-        self.cfgmgr = True
-
-    def start_ccsession(self, c_channel_env):
-        self.ccsession = True
-
-    def start_auth(self, c_channel_env):
-        self.auth = True
-
-    def start_resolver(self, c_channel_env):
-        self.resolver = True
-
-    def start_xfrout(self, c_channel_env):
-        self.xfrout = True
-
-    def start_xfrin(self, c_channel_env):
-        self.xfrin = True
-
-    def start_zonemgr(self, c_channel_env):
-        self.zonemgr = True
-
-    def start_stats(self, c_channel_env):
-        self.stats = True
-
-    def start_cmdctl(self, c_channel_env):
-        self.cmdctl = True
-
-# Check that the start_all_processes method starts the right combination
-# of processes.
-class TestStartAllProcessesBob(unittest.TestCase):
-    def check_preconditions(self, bob):
-        self.assertEqual(bob.msgq, False)
-        self.assertEqual(bob.cfgmgr, False)
-        self.assertEqual(bob.ccsession, False)
-        self.assertEqual(bob.auth, False)
-        self.assertEqual(bob.resolver, False)
-        self.assertEqual(bob.xfrout, False)
-        self.assertEqual(bob.xfrin, False)
-        self.assertEqual(bob.zonemgr, False)
-        self.assertEqual(bob.stats, False)
-        self.assertEqual(bob.cmdctl, False)
-
-    # Checks the processes started when starting neither auth nor resolver
-    # is specified.
-    def test_start_none(self):
-        # Created Bob and ensure initialization correct
-        bob = StartAllProcessesBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        c_channel_env = {}
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = False
-
-        bob.start_all_processes(c_channel_env)
-
-        self.assertEqual(bob.msgq, True)
-        self.assertEqual(bob.cfgmgr, True)
-        self.assertEqual(bob.ccsession, True)
-        self.assertEqual(bob.auth, False)
-        self.assertEqual(bob.resolver, False)
-        self.assertEqual(bob.xfrout, False)
-        self.assertEqual(bob.xfrin, False)
-        self.assertEqual(bob.zonemgr, False)
-        self.assertEqual(bob.stats, True)
-        self.assertEqual(bob.cmdctl, True)
-
-    # Checks the processes started when starting only the auth process
-    def test_start_auth(self):
-        # Created Bob and ensure initialization correct
-        bob = StartAllProcessesBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        c_channel_env = {}
-        bob.cfg_start_auth = True
-        bob.cfg_start_resolver = False
-
-        bob.start_all_processes(c_channel_env)
-
-        self.assertEqual(bob.msgq, True)
-        self.assertEqual(bob.cfgmgr, True)
-        self.assertEqual(bob.ccsession, True)
-        self.assertEqual(bob.auth, True)
-        self.assertEqual(bob.resolver, False)
-        self.assertEqual(bob.xfrout, True)
-        self.assertEqual(bob.xfrin, True)
-        self.assertEqual(bob.zonemgr, True)
-        self.assertEqual(bob.stats, True)
-        self.assertEqual(bob.cmdctl, True)
-
-    # Checks the processes started when starting only the resolver process
-    def test_start_resolver(self):
-        # Created Bob and ensure initialization correct
-        bob = StartAllProcessesBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        c_channel_env = {}
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = True
-
-        bob.start_all_processes(c_channel_env)
-
-        self.assertEqual(bob.msgq, True)
-        self.assertEqual(bob.cfgmgr, True)
-        self.assertEqual(bob.ccsession, True)
-        self.assertEqual(bob.auth, False)
-        self.assertEqual(bob.resolver, True)
-        self.assertEqual(bob.xfrout, False)
-        self.assertEqual(bob.xfrin, False)
-        self.assertEqual(bob.zonemgr, False)
-        self.assertEqual(bob.stats, True)
-        self.assertEqual(bob.cmdctl, True)
-
-    # Checks the processes started when starting both auth and resolver process
-    def test_start_both(self):
-        # Created Bob and ensure initialization correct
-        bob = StartAllProcessesBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        c_channel_env = {}
-        bob.cfg_start_auth = True
-        bob.cfg_start_resolver = True
-
-        bob.start_all_processes(c_channel_env)
-
-        self.assertEqual(bob.msgq, True)
-        self.assertEqual(bob.cfgmgr, True)
-        self.assertEqual(bob.ccsession, True)
-        self.assertEqual(bob.auth, True)
-        self.assertEqual(bob.resolver, True)
-        self.assertEqual(bob.xfrout, True)
-        self.assertEqual(bob.xfrin, True)
-        self.assertEqual(bob.zonemgr, True)
-        self.assertEqual(bob.stats, True)
-        self.assertEqual(bob.cmdctl, True)
-
-
-if __name__ == '__main__':
-    unittest.main()

+ 463 - 0
src/bin/bind10/tests/bind10_test.py.in

@@ -0,0 +1,463 @@
+from bind10 import ProcessInfo, BoB, dump_pid, unlink_pid_file
+
+# XXX: environment tests are currently disabled, due to the preprocessor
+#      setup that we have now complicating the environment
+
+import unittest
+import sys
+import os
+import signal
+import socket
+from isc.net.addr import IPAddr
+
+class TestProcessInfo(unittest.TestCase):
+    def setUp(self):
+        # redirect stdout to a pipe so we can check that our
+        # process spawning is doing the right thing with stdout
+        self.old_stdout = os.dup(sys.stdout.fileno())
+        self.pipes = os.pipe()
+        os.dup2(self.pipes[1], sys.stdout.fileno())
+        os.close(self.pipes[1])
+        # note that we use dup2() to restore the original stdout
+        # to the main program ASAP in each test... this prevents
+        # hangs reading from the child process (as the pipe is only
+        # open in the child), and also insures nice pretty output
+
+    def tearDown(self):
+        # clean up our stdout munging
+        os.dup2(self.old_stdout, sys.stdout.fileno())
+        os.close(self.pipes[0])
+
+    def test_init(self):
+        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+        os.dup2(self.old_stdout, sys.stdout.fileno())
+        self.assertEqual(pi.name, 'Test Process')
+        self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+        self.assertEqual(pi.dev_null_stdout, False)
+        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+        self.assertNotEqual(pi.process, None)
+        self.assertTrue(type(pi.pid) is int)
+
+#    def test_setting_env(self):
+#        pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
+#        os.dup2(self.old_stdout, sys.stdout.fileno())
+#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
+#                                   'FOO': 'BAR' })
+
+    def test_setting_null_stdout(self):
+        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
+                         dev_null_stdout=True)
+        os.dup2(self.old_stdout, sys.stdout.fileno())
+        self.assertEqual(pi.dev_null_stdout, True)
+        self.assertEqual(os.read(self.pipes[0], 100), b"")
+
+    def test_respawn(self):
+        pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+        # wait for old process to work...
+        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+        # respawn it
+        old_pid = pi.pid
+        pi.respawn()
+        os.dup2(self.old_stdout, sys.stdout.fileno())
+        # make sure the new one started properly
+        self.assertEqual(pi.name, 'Test Process')
+        self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+#        self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+#                                   'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+        self.assertEqual(pi.dev_null_stdout, False)
+        self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+        self.assertNotEqual(pi.process, None)
+        self.assertTrue(type(pi.pid) is int)
+        self.assertNotEqual(pi.pid, old_pid)
+
+class TestBoB(unittest.TestCase):
+    def test_init(self):
+        bob = BoB()
+        self.assertEqual(bob.verbose, False)
+        self.assertEqual(bob.msgq_socket_file, None)
+        self.assertEqual(bob.cc_session, None)
+        self.assertEqual(bob.ccs, None)
+        self.assertEqual(bob.processes, {})
+        self.assertEqual(bob.dead_processes, {})
+        self.assertEqual(bob.runnable, False)
+        self.assertEqual(bob.uid, None)
+        self.assertEqual(bob.username, None)
+        self.assertEqual(bob.nocache, False)
+        self.assertEqual(bob.cfg_start_auth, True)
+        self.assertEqual(bob.cfg_start_resolver, False)
+
+    def test_init_alternate_socket(self):
+        bob = BoB("alt_socket_file")
+        self.assertEqual(bob.verbose, False)
+        self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
+        self.assertEqual(bob.cc_session, None)
+        self.assertEqual(bob.ccs, None)
+        self.assertEqual(bob.processes, {})
+        self.assertEqual(bob.dead_processes, {})
+        self.assertEqual(bob.runnable, False)
+        self.assertEqual(bob.uid, None)
+        self.assertEqual(bob.username, None)
+        self.assertEqual(bob.nocache, False)
+        self.assertEqual(bob.cfg_start_auth, True)
+        self.assertEqual(bob.cfg_start_resolver, False)
+
+# Class for testing the BoB start/stop components routines.
+#
+# Although testing that external processes start is outside the scope
+# of the unit test, by overriding the process start methods we can check
+# that the right processes are started depending on the configuration
+# options.
+class StartStopCheckBob(BoB):
+    def __init__(self):
+        BoB.__init__(self)
+
+# Set flags as to which of the overridden methods has been run.
+        self.msgq = False
+        self.cfgmgr = False
+        self.ccsession = False
+        self.auth = False
+        self.resolver = False
+        self.xfrout = False
+        self.xfrin = False
+        self.zonemgr = False
+        self.stats = False
+        self.cmdctl = False
+        self.c_channel_env = {}
+
+    def read_bind10_config(self):
+        # Configuration options are set directly
+        pass
+
+    def start_msgq(self, c_channel_env):
+        self.msgq = True
+
+    def start_cfgmgr(self, c_channel_env):
+        self.cfgmgr = True
+
+    def start_ccsession(self, c_channel_env):
+        self.ccsession = True
+
+    def start_auth(self, c_channel_env):
+        self.auth = True
+
+    def start_resolver(self, c_channel_env):
+        self.resolver = True
+
+    def start_xfrout(self, c_channel_env):
+        self.xfrout = True
+
+    def start_xfrin(self, c_channel_env):
+        self.xfrin = True
+
+    def start_zonemgr(self, c_channel_env):
+        self.zonemgr = True
+
+    def start_stats(self, c_channel_env):
+        self.stats = True
+
+    def start_cmdctl(self, c_channel_env):
+        self.cmdctl = True
+
+    # We don't really use all of these stop_ methods. But it might turn out
+    # someone would add some stop_ method to BoB and we want that one overriden
+    # in case he forgets to update the tests.
+    def stop_msgq(self):
+        self.msgq = False
+
+    def stop_cfgmgr(self):
+        self.cfgmgr = False
+
+    def stop_ccsession(self):
+        self.ccsession = False
+
+    def stop_auth(self):
+        self.auth = False
+
+    def stop_resolver(self):
+        self.resolver = False
+
+    def stop_xfrout(self):
+        self.xfrout = False
+
+    def stop_xfrin(self):
+        self.xfrin = False
+
+    def stop_zonemgr(self):
+        self.zonemgr = False
+
+    def stop_stats(self):
+        self.stats = False
+
+    def stop_cmdctl(self):
+        self.cmdctl = False
+
+class TestStartStopProcessesBob(unittest.TestCase):
+    """
+    Check that the start_all_processes method starts the right combination
+    of processes and that the right processes are started and stopped
+    according to changes in configuration.
+    """
+    def check_started(self, bob, core, auth, resolver):
+        """
+        Check that the right sets of services are started. The ones that
+        should be running are specified by the core, auth and resolver parameters
+        (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
+        and -zonemgr).
+        """
+        self.assertEqual(bob.msgq, core)
+        self.assertEqual(bob.cfgmgr, core)
+        self.assertEqual(bob.ccsession, core)
+        self.assertEqual(bob.auth, auth)
+        self.assertEqual(bob.resolver, resolver)
+        self.assertEqual(bob.xfrout, auth)
+        self.assertEqual(bob.xfrin, auth)
+        self.assertEqual(bob.zonemgr, auth)
+        self.assertEqual(bob.stats, core)
+        self.assertEqual(bob.cmdctl, core)
+
+    def check_preconditions(self, bob):
+        self.check_started(bob, False, False, False)
+
+    def check_started_none(self, bob):
+        """
+        Check that the situation is according to configuration where no servers
+        should be started. Some processes still need to be running.
+        """
+        self.check_started(bob, True, False, False)
+
+    def check_started_both(self, bob):
+        """
+        Check the situation is according to configuration where both servers
+        (auth and resolver) are enabled.
+        """
+        self.check_started(bob, True, True, True)
+
+    def check_started_auth(self, bob):
+        """
+        Check the set of processes needed to run auth only is started.
+        """
+        self.check_started(bob, True, True, False)
+
+    def check_started_resolver(self, bob):
+        """
+        Check the set of processes needed to run resolver only is started.
+        """
+        self.check_started(bob, True, False, True)
+
+    # Checks the processes started when starting neither auth nor resolver
+    # is specified.
+    def test_start_none(self):
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes and check what was started
+        bob.cfg_start_auth = False
+        bob.cfg_start_resolver = False
+
+        bob.start_all_processes()
+        self.check_started_none(bob)
+
+    # Checks the processes started when starting only the auth process
+    def test_start_auth(self):
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes and check what was started
+        bob.cfg_start_auth = True
+        bob.cfg_start_resolver = False
+
+        bob.start_all_processes()
+
+        self.check_started_auth(bob)
+
+    # Checks the processes started when starting only the resolver process
+    def test_start_resolver(self):
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes and check what was started
+        bob.cfg_start_auth = False
+        bob.cfg_start_resolver = True
+
+        bob.start_all_processes()
+
+        self.check_started_resolver(bob)
+
+    # Checks the processes started when starting both auth and resolver process
+    def test_start_both(self):
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes and check what was started
+        bob.cfg_start_auth = True
+        bob.cfg_start_resolver = True
+
+        bob.start_all_processes()
+
+        self.check_started_both(bob)
+
+    def test_config_start(self):
+        """
+        Test that the configuration starts and stops processes according
+        to configuration changes.
+        """
+
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes (nothing much should be started, as in
+        # test_start_none)
+        bob.cfg_start_auth = False
+        bob.cfg_start_resolver = False
+
+        bob.start_all_processes()
+        bob.runnable = True
+        self.check_started_none(bob)
+
+        # Enable both at once
+        bob.config_handler({'start_auth': True, 'start_resolver': True})
+        self.check_started_both(bob)
+
+        # Not touched by empty change
+        bob.config_handler({})
+        self.check_started_both(bob)
+
+        # Not touched by change to the same configuration
+        bob.config_handler({'start_auth': True, 'start_resolver': True})
+        self.check_started_both(bob)
+
+        # Turn them both off again
+        bob.config_handler({'start_auth': False, 'start_resolver': False})
+        self.check_started_none(bob)
+
+        # Not touched by empty change
+        bob.config_handler({})
+        self.check_started_none(bob)
+
+        # Not touched by change to the same configuration
+        bob.config_handler({'start_auth': False, 'start_resolver': False})
+        self.check_started_none(bob)
+
+        # Start and stop auth separately
+        bob.config_handler({'start_auth': True})
+        self.check_started_auth(bob)
+
+        bob.config_handler({'start_auth': False})
+        self.check_started_none(bob)
+
+        # Start and stop resolver separately
+        bob.config_handler({'start_resolver': True})
+        self.check_started_resolver(bob)
+
+        bob.config_handler({'start_resolver': False})
+        self.check_started_none(bob)
+
+        # Alternate
+        bob.config_handler({'start_auth': True})
+        self.check_started_auth(bob)
+
+        bob.config_handler({'start_auth': False, 'start_resolver': True})
+        self.check_started_resolver(bob)
+
+        bob.config_handler({'start_auth': True, 'start_resolver': False})
+        self.check_started_auth(bob)
+
+    def test_config_start_once(self):
+        """
+        Tests that a process is started only once.
+        """
+        # Create BoB and ensure correct initialization
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        # Start processes (both)
+        bob.cfg_start_auth = True
+        bob.cfg_start_resolver = True
+
+        bob.start_all_processes()
+        bob.runnable = True
+        self.check_started_both(bob)
+
+        bob.start_auth = lambda: self.fail("Started auth again")
+        bob.start_xfrout = lambda: self.fail("Started xfrout again")
+        bob.start_xfrin = lambda: self.fail("Started xfrin again")
+        bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
+        bob.start_resolver = lambda: self.fail("Started resolver again")
+
+        # Send again we want to start them. Should not do it, as they are.
+        bob.config_handler({'start_auth': True})
+        bob.config_handler({'start_resolver': True})
+
+    def test_config_not_started_early(self):
+        """
+        Test that processes are not started by the config handler before
+        startup.
+        """
+        bob = StartStopCheckBob()
+        self.check_preconditions(bob)
+
+        bob.start_auth = lambda: self.fail("Started auth again")
+        bob.start_xfrout = lambda: self.fail("Started xfrout again")
+        bob.start_xfrin = lambda: self.fail("Started xfrin again")
+        bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
+        bob.start_resolver = lambda: self.fail("Started resolver again")
+
+        bob.config_handler({'start_auth': True, 'start_resolver': True})
+
+class TestPIDFile(unittest.TestCase):
+    def setUp(self):
+        self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
+        if os.path.exists(self.pid_file):
+            os.unlink(self.pid_file)
+
+    def tearDown(self):
+        if os.path.exists(self.pid_file):
+            os.unlink(self.pid_file)
+
+    def check_pid_file(self):
+        # dump PID to the file, and confirm the content is correct
+        dump_pid(self.pid_file)
+        my_pid = os.getpid()
+        self.assertEqual(my_pid, int(open(self.pid_file, "r").read()))
+
+    def test_dump_pid(self):
+        self.check_pid_file()
+
+        # make sure any existing content will be removed
+        open(self.pid_file, "w").write('dummy data\n')
+        self.check_pid_file()
+
+    def test_unlink_pid_file_notexist(self):
+        dummy_data = 'dummy_data\n'
+        open(self.pid_file, "w").write(dummy_data)
+        unlink_pid_file("no_such_pid_file")
+        # the file specified for unlink_pid_file doesn't exist,
+        # and the original content of the file should be intact.
+        self.assertEqual(dummy_data, open(self.pid_file, "r").read())
+
+    def test_dump_pid_with_none(self):
+        # Check the behavior of dump_pid() and unlink_pid_file() with None.
+        # This should be no-op.
+        dump_pid(None)
+        self.assertFalse(os.path.exists(self.pid_file))
+
+        dummy_data = 'dummy_data\n'
+        open(self.pid_file, "w").write(dummy_data)
+        unlink_pid_file(None)
+        self.assertEqual(dummy_data, open(self.pid_file, "r").read())
+
+    def test_dump_pid_failure(self):
+        # the attempt to open file will fail, which should result in exception.
+        self.assertRaises(IOError, dump_pid,
+                          'nonexistent_dir' + os.sep + 'bind10.pid')
+
+if __name__ == '__main__':
+    unittest.main()

+ 6 - 5
src/bin/bindctl/Makefile.am

@@ -5,12 +5,13 @@ man_MANS = bindctl.1
 
 
 EXTRA_DIST = $(man_MANS) bindctl.xml
 EXTRA_DIST = $(man_MANS) bindctl.xml
 
 
-python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py mycollections.py
+python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
+		mycollections.py
 pythondir = $(pyexecdir)/bindctl
 pythondir = $(pyexecdir)/bindctl
 
 
-bindctldir = $(DESTDIR)$(pkgdatadir)
+bindctldir = $(pkgdatadir)
 
 
-CLEANFILES = bindctl
+CLEANFILES = bindctl bindctl_main.pyc
 
 
 if ENABLE_MAN
 if ENABLE_MAN
 
 
@@ -19,8 +20,8 @@ bindctl.1: bindctl.xml
 
 
 endif
 endif
 
 
-bindctl: bindctl-source.py
+bindctl: bindctl_main.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
 	       -e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
-	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl-source.py >$@
+	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl_main.py >$@
 	chmod a+x $@
 	chmod a+x $@

+ 165 - 95
src/bin/bindctl/bindcmd.py

@@ -51,7 +51,6 @@ except ImportError:
     my_readline = sys.stdin.readline
     my_readline = sys.stdin.readline
 
 
 CSV_FILE_NAME = 'default_user.csv'
 CSV_FILE_NAME = 'default_user.csv'
-FAIL_TO_CONNECT_WITH_CMDCTL = "Fail to connect with b10-cmdctl module, is it running?"
 CONFIG_MODULE_NAME = 'config'
 CONFIG_MODULE_NAME = 'config'
 CONST_BINDCTL_HELP = """
 CONST_BINDCTL_HELP = """
 usage: <module name> <command name> [param1 = value1 [, param2 = value2]]
 usage: <module name> <command name> [param1 = value1 [, param2 = value2]]
@@ -88,20 +87,29 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
 class BindCmdInterpreter(Cmd):
 class BindCmdInterpreter(Cmd):
     """simple bindctl example."""    
     """simple bindctl example."""    
 
 
-    def __init__(self, server_port = 'localhost:8080', pem_file = None):
+    def __init__(self, server_port='localhost:8080', pem_file=None,
+                 csv_file_dir=None):
         Cmd.__init__(self)
         Cmd.__init__(self)
         self.location = ""
         self.location = ""
         self.prompt_end = '> '
         self.prompt_end = '> '
-        self.prompt = self.prompt_end
+        if sys.stdin.isatty():
+            self.prompt = self.prompt_end
+        else:
+            self.prompt = ""
         self.ruler = '-'
         self.ruler = '-'
         self.modules = OrderedDict()
         self.modules = OrderedDict()
-        self.add_module_info(ModuleInfo("help", desc = "Get help for bindctl"))
+        self.add_module_info(ModuleInfo("help", desc = "Get help for bindctl."))
         self.server_port = server_port
         self.server_port = server_port
         self.conn = ValidatedHTTPSConnection(self.server_port,
         self.conn = ValidatedHTTPSConnection(self.server_port,
                                              ca_certs=pem_file)
                                              ca_certs=pem_file)
         self.session_id = self._get_session_id()
         self.session_id = self._get_session_id()
         self.config_data = None
         self.config_data = None
-        
+        if csv_file_dir is not None:
+            self.csv_file_dir = csv_file_dir
+        else:
+            self.csv_file_dir = pwd.getpwnam(getpass.getuser()).pw_dir + \
+                os.sep + '.bind10' + os.sep
+
     def _get_session_id(self):
     def _get_session_id(self):
         '''Generate one session id for the connection. '''
         '''Generate one session id for the connection. '''
         rand = os.urandom(16)
         rand = os.urandom(16)
@@ -119,8 +127,8 @@ class BindCmdInterpreter(Cmd):
 
 
             self.cmdloop()
             self.cmdloop()
         except FailToLogin as err:
         except FailToLogin as err:
-            print(err)
+            # error already printed when this was raised, ignoring
-            print(FAIL_TO_CONNECT_WITH_CMDCTL)
+            pass
         except KeyboardInterrupt:
         except KeyboardInterrupt:
             print('\nExit from bindctl')
             print('\nExit from bindctl')
 
 
@@ -173,9 +181,7 @@ class BindCmdInterpreter(Cmd):
         time, username and password saved in 'default_user.csv' will be
         time, username and password saved in 'default_user.csv' will be
         used first.
         used first.
         '''
         '''
-        csv_file_dir = pwd.getpwnam(getpass.getuser()).pw_dir
+        users = self._get_saved_user_info(self.csv_file_dir, CSV_FILE_NAME)
-        csv_file_dir += os.sep + '.bind10' + os.sep
-        users = self._get_saved_user_info(csv_file_dir, CSV_FILE_NAME)
         for row in users:
         for row in users:
             param = {'username': row[0], 'password' : row[1]}
             param = {'username': row[0], 'password' : row[1]}
             try:
             try:
@@ -209,7 +215,8 @@ class BindCmdInterpreter(Cmd):
                 raise FailToLogin()
                 raise FailToLogin()
 
 
             if response.status == http.client.OK:
             if response.status == http.client.OK:
-                self._save_user_info(username, passwd, csv_file_dir, CSV_FILE_NAME)
+                self._save_user_info(username, passwd, self.csv_file_dir,
+                                     CSV_FILE_NAME)
                 return True
                 return True
 
 
     def _update_commands(self):
     def _update_commands(self):
@@ -270,8 +277,10 @@ class BindCmdInterpreter(Cmd):
         return line 
         return line 
 
 
     def postcmd(self, stop, line):
     def postcmd(self, stop, line):
-        '''Update the prompt after every command'''
+        '''Update the prompt after every command, but only if we
-        self.prompt = self.location + self.prompt_end
+           have a tty as output'''
+        if sys.stdin.isatty():
+            self.prompt = self.location + self.prompt_end
         return stop
         return stop
 
 
     def _prepare_module_commands(self, module_spec):
     def _prepare_module_commands(self, module_spec):
@@ -375,7 +384,14 @@ class BindCmdInterpreter(Cmd):
         if cmd.command == "help" or ("help" in cmd.params.keys()):
         if cmd.command == "help" or ("help" in cmd.params.keys()):
             self._handle_help(cmd)
             self._handle_help(cmd)
         elif cmd.module == CONFIG_MODULE_NAME:
         elif cmd.module == CONFIG_MODULE_NAME:
-            self.apply_config_cmd(cmd)
+            try:
+                self.apply_config_cmd(cmd)
+            except isc.cc.data.DataTypeError as dte:
+                print("Error: " + str(dte))
+            except isc.cc.data.DataNotFoundError as dnfe:
+                print("Error: " + str(dnfe))
+            except KeyError as ke:
+                print("Error: missing " + str(ke))
         else:
         else:
             self.apply_cmd(cmd)
             self.apply_cmd(cmd)
 
 
@@ -396,9 +412,24 @@ class BindCmdInterpreter(Cmd):
 
 
     def do_help(self, name):
     def do_help(self, name):
         print(CONST_BINDCTL_HELP)
         print(CONST_BINDCTL_HELP)
-        for k in self.modules.keys():
+        for k in self.modules.values():
-            print("\t", self.modules[k])
+            n = k.get_name()
-                
+            if len(n) >= CONST_BINDCTL_HELP_INDENT_WIDTH:
+                print("    %s" % n)
+                print(textwrap.fill(k.get_desc(),
+                      initial_indent="            ",
+                      subsequent_indent="    " +
+                      " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+                      width=70))
+            else:
+                print(textwrap.fill("%s%s%s" %
+                    (k.get_name(),
+                     " "*(CONST_BINDCTL_HELP_INDENT_WIDTH - len(k.get_name())),
+                     k.get_desc()),
+                    initial_indent="    ",
+                    subsequent_indent="    " +
+                    " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+                    width=70))
     
     
     def onecmd(self, line):
     def onecmd(self, line):
         if line == 'EOF' or line.lower() == "quit":
         if line == 'EOF' or line.lower() == "quit":
@@ -411,7 +442,19 @@ class BindCmdInterpreter(Cmd):
         Cmd.onecmd(self, line)
         Cmd.onecmd(self, line)
 
 
     def remove_prefix(self, list, prefix):
     def remove_prefix(self, list, prefix):
-        return [(val[len(prefix):]) for val in list]
+        """Removes the prefix already entered, and all elements from the
+           list that don't match it"""
+        if prefix.startswith('/'):
+            prefix = prefix[1:]
+
+        new_list = []
+        for val in list:
+            if val.startswith(prefix):
+                new_val = val[len(prefix):]
+                if new_val.startswith("/"):
+                    new_val = new_val[1:]
+                new_list.append(new_val)
+        return new_list
 
 
     def complete(self, text, state):
     def complete(self, text, state):
         if 0 == state:
         if 0 == state:
@@ -502,8 +545,7 @@ class BindCmdInterpreter(Cmd):
             self._validate_cmd(cmd)
             self._validate_cmd(cmd)
             self._handle_cmd(cmd)
             self._handle_cmd(cmd)
         except (IOError, http.client.HTTPException) as err:
         except (IOError, http.client.HTTPException) as err:
-            print('Error!', err)
+            print('Error: ', err)
-            print(FAIL_TO_CONNECT_WITH_CMDCTL)
         except BindCtlException as err:
         except BindCtlException as err:
             print("Error! ", err)
             print("Error! ", err)
             self._print_correct_usage(err)
             self._print_correct_usage(err)
@@ -541,87 +583,115 @@ class BindCmdInterpreter(Cmd):
            Raises a KeyError if the command was not complete
            Raises a KeyError if the command was not complete
         '''
         '''
         identifier = self.location
         identifier = self.location
-        try:
+        if 'identifier' in cmd.params:
-            if 'identifier' in cmd.params:
+            if not identifier.endswith("/"):
-                if not identifier.endswith("/"):
+                identifier += "/"
-                    identifier += "/"
+            if cmd.params['identifier'].startswith("/"):
-                if cmd.params['identifier'].startswith("/"):
+                identifier = cmd.params['identifier']
-                    identifier = cmd.params['identifier']
+            else:
-                else:
+                if cmd.params['identifier'].startswith('['):
-                    identifier += cmd.params['identifier']
+                    identifier = identifier[:-1]
-
+                identifier += cmd.params['identifier']
-                # Check if the module is known; for unknown modules
+
-                # we currently deny setting preferences, as we have
+            # Check if the module is known; for unknown modules
-                # no way yet to determine if they are ok.
+            # we currently deny setting preferences, as we have
-                module_name = identifier.split('/')[1]
+            # no way yet to determine if they are ok.
-                if self.config_data is None or \
+            module_name = identifier.split('/')[1]
-                   not self.config_data.have_specification(module_name):
+            if module_name != "" and (self.config_data is None or \
-                    print("Error: Module '" + module_name + "' unknown or not running")
+               not self.config_data.have_specification(module_name)):
-                    return
+                print("Error: Module '" + module_name + "' unknown or not running")
+                return
 
 
-            if cmd.command == "show":
+        if cmd.command == "show":
-                values = self.config_data.get_value_maps(identifier)
+            # check if we have the 'all' argument
-                for value_map in values:
+            show_all = False
-                    line = value_map['name']
+            if 'argument' in cmd.params:
-                    if value_map['type'] in [ 'module', 'map', 'list' ]:
+                if cmd.params['argument'] == 'all':
-                        line += "/"
+                    show_all = True
-                    else:
+                elif 'identifier' not in cmd.params:
-                        line += ":\t" + json.dumps(value_map['value'])
+                    # no 'all', no identifier, assume this is the
-                    line += "\t" + value_map['type']
+                    #identifier
-                    line += "\t"
+                    identifier += cmd.params['argument']
-                    if value_map['default']:
-                        line += "(default)"
-                    if value_map['modified']:
-                        line += "(modified)"
-                    print(line)
-            elif cmd.command == "add":
-                self.config_data.add_value(identifier, cmd.params['value'])
-            elif cmd.command == "remove":
-                if 'value' in cmd.params:
-                    self.config_data.remove_value(identifier, cmd.params['value'])
                 else:
                 else:
-                    self.config_data.remove_value(identifier, None)
+                    print("Error: unknown argument " + cmd.params['argument'] + ", or multiple identifiers given")
-            elif cmd.command == "set":
+                    return
-                if 'identifier' not in cmd.params:
+            values = self.config_data.get_value_maps(identifier, show_all)
-                    print("Error: missing identifier or value")
+            for value_map in values:
+                line = value_map['name']
+                if value_map['type'] in [ 'module', 'map' ]:
+                    line += "/"
+                elif value_map['type'] == 'list' \
+                     and value_map['value'] != []:
+                    # do not print content of non-empty lists if
+                    # we have more data to show
+                    line += "/"
                 else:
                 else:
-                    parsed_value = None
+                    line += "\t" + json.dumps(value_map['value'])
-                    try:
+                line += "\t" + value_map['type']
-                        parsed_value = json.loads(cmd.params['value'])
+                line += "\t"
-                    except Exception as exc:
+                if value_map['default']:
-                        # ok could be an unquoted string, interpret as such
+                    line += "(default)"
-                        parsed_value = cmd.params['value']
+                if value_map['modified']:
-                    self.config_data.set_value(identifier, parsed_value)
+                    line += "(modified)"
-            elif cmd.command == "unset":
+                print(line)
-                self.config_data.unset(identifier)
+        elif cmd.command == "show_json":
-            elif cmd.command == "revert":
+            if identifier == "":
-                self.config_data.clear_local_changes()
+                print("Need at least the module to show the configuration in JSON format")
-            elif cmd.command == "commit":
+            else:
-                self.config_data.commit()
+                data, default = self.config_data.get_value(identifier)
-            elif cmd.command == "diff":
+                print(json.dumps(data))
-                print(self.config_data.get_local_changes());
+        elif cmd.command == "add":
-            elif cmd.command == "go":
+            if 'value' in cmd.params:
-                self.go(identifier)
+                self.config_data.add_value(identifier, cmd.params['value'])
-        except isc.cc.data.DataTypeError as dte:
+            else:
-            print("Error: " + str(dte))
+                self.config_data.add_value(identifier)
-        except isc.cc.data.DataNotFoundError as dnfe:
+        elif cmd.command == "remove":
-            print("Error: " + identifier + " not found")
+            if 'value' in cmd.params:
-        except KeyError as ke:
+                self.config_data.remove_value(identifier, cmd.params['value'])
-            print("Error: missing " + str(ke))
+            else:
-            raise ke
+                self.config_data.remove_value(identifier, None)
+        elif cmd.command == "set":
+            if 'identifier' not in cmd.params:
+                print("Error: missing identifier or value")
+            else:
+                parsed_value = None
+                try:
+                    parsed_value = json.loads(cmd.params['value'])
+                except Exception as exc:
+                    # ok could be an unquoted string, interpret as such
+                    parsed_value = cmd.params['value']
+                self.config_data.set_value(identifier, parsed_value)
+        elif cmd.command == "unset":
+            self.config_data.unset(identifier)
+        elif cmd.command == "revert":
+            self.config_data.clear_local_changes()
+        elif cmd.command == "commit":
+            self.config_data.commit()
+        elif cmd.command == "diff":
+            print(self.config_data.get_local_changes());
+        elif cmd.command == "go":
+            self.go(identifier)
 
 
     def go(self, identifier):
     def go(self, identifier):
         '''Handles the config go command, change the 'current' location
         '''Handles the config go command, change the 'current' location
-           within the configuration tree'''
+           within the configuration tree. '..' will be interpreted as
-        # this is just to see if it exists
+           'up one level'.'''
-        self.config_data.get_value(identifier)
+        id_parts = isc.cc.data.split_identifier(identifier)
-        # some sanitizing
+
-        identifier = identifier.replace("//", "/")
+        new_location = ""
-        if not identifier.startswith("/"):
+        for id_part in id_parts:
-            identifier = "/" + identifier
+            if (id_part == ".."):
-        if identifier.endswith("/"):
+                # go 'up' one level
-            identifier = identifier[:-1]
+                new_location, a, b = new_location.rpartition("/")
-        self.location = identifier
+            else:
+                new_location += "/" + id_part
+        # check if exists, if not, revert and error
+        v,d = self.config_data.get_value(new_location)
+        if v is None:
+            print("Error: " + identifier + " not found")
+            return
+
+        self.location = new_location
 
 
     def apply_cmd(self, cmd):
     def apply_cmd(self, cmd):
         '''Handles a general module command'''
         '''Handles a general module command'''

+ 20 - 1
src/bin/bindctl/bindctl.xml

@@ -51,6 +51,7 @@
       <arg><option>--address <replaceable>address</replaceable></option></arg>
       <arg><option>--address <replaceable>address</replaceable></option></arg>
       <arg><option>--help</option></arg>
       <arg><option>--help</option></arg>
       <arg><option>--certificate-chain <replaceable>file</replaceable></option></arg>
       <arg><option>--certificate-chain <replaceable>file</replaceable></option></arg>
+      <arg><option>--csv-file-dir<replaceable>file</replaceable></option></arg>
       <arg><option>--port <replaceable>number</replaceable></option></arg>
       <arg><option>--port <replaceable>number</replaceable></option></arg>
       <arg><option>--version</option></arg>
       <arg><option>--version</option></arg>
     </cmdsynopsis>
     </cmdsynopsis>
@@ -110,6 +111,22 @@
       </varlistentry>
       </varlistentry>
 
 
       <varlistentry>
       <varlistentry>
+        <term>
+	  <option>--csv-file-dir</option><replaceable>file</replaceable>
+	</term>
+
+        <listitem>
+          <para>
+	    The directory name in which the user/password CSV file
+            is stored (see AUTHENTICATION).
+	    By default this option doesn't have any value,
+	    in which case the ".bind10" directory under the user's
+            home directory will be used.
+          </para>
+         </listitem>
+      </varlistentry>
+
+      <varlistentry>
         <term><option>-h</option>,
         <term><option>-h</option>,
           <option>--help</option></term>
           <option>--help</option></term>
         <listitem><para>
         <listitem><para>
@@ -148,8 +165,10 @@
     <para>
     <para>
       The tool will authenticate using a username and password.
       The tool will authenticate using a username and password.
       On the first successful login, it will save the details to
       On the first successful login, it will save the details to
-      <filename>~/.bind10/default_user.csv</filename>
+      a comma-separated-value (CSV) file
       which will be used for later uses of <command>bindctl</command>.
       which will be used for later uses of <command>bindctl</command>.
+      The file name is <filename>default_user.csv</filename>
+      located under the directory specified by the --csv-file-dir option.
     </para>
     </para>
 
 
 <!-- TODO: mention HTTPS? -->
 <!-- TODO: mention HTTPS? -->

+ 50 - 41
src/bin/bindctl/bindctl-source.py.in

@@ -31,53 +31,62 @@ isc.util.process.rename()
 # This is the version that gets displayed to the user.
 # This is the version that gets displayed to the user.
 # The VERSION string consists of the module name, the module version
 # The VERSION string consists of the module name, the module version
 # number, and the overall BIND 10 version number (set in configure.ac).
 # number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bindctl 20101201 (BIND 10 @PACKAGE_VERSION@)"
+VERSION = "bindctl 20110217 (BIND 10 @PACKAGE_VERSION@)"
+
+DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Boss/start_auth', 'Recurse/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
 
 
 def prepare_config_commands(tool):
 def prepare_config_commands(tool):
     '''Prepare fixed commands for local configuration editing'''
     '''Prepare fixed commands for local configuration editing'''
-    module = ModuleInfo(name = CONFIG_MODULE_NAME, desc = "Configuration commands")
+    module = ModuleInfo(name = CONFIG_MODULE_NAME, desc = "Configuration commands.")
-    cmd = CommandInfo(name = "show", desc = "Show configuration")
+    cmd = CommandInfo(name = "show", desc = "Show configuration.")
-    param = ParamInfo(name = "identifier", type = "string", optional=True)
+    param = ParamInfo(name = "argument", type = "string", optional=True, desc = "If you specify the argument 'all' (before the identifier), recursively show all child elements for the given identifier.")
+    cmd.add_param(param)
+    param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+    cmd.add_param(param)
+    module.add_command(cmd)
+
+    cmd = CommandInfo(name = "show_json", desc = "Show full configuration in JSON format.")
+    param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "add", desc = "Add entry to configuration list")
+    cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
-    param = ParamInfo(name = "identifier", type = "string", optional=True)
+    param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
-    param = ParamInfo(name = "value", type = "string", optional=False)
+    param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list")
+    cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
-    param = ParamInfo(name = "identifier", type = "string", optional=True)
+    param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
-    param = ParamInfo(name = "value", type = "string", optional=True)
+    param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "set", desc = "Set a configuration value")
+    cmd = CommandInfo(name = "set", desc = "Set a configuration value.")
-    param = ParamInfo(name = "identifier", type = "string", optional=True)
+    param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
-    param = ParamInfo(name = "value", type = "string", optional=False)
+    param = ParamInfo(name = "value", type = "string", optional=False, desc = "Specifies a value to set. It must be in correct JSON format and complete.")
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "unset", desc = "Unset a configuration value")
+    cmd = CommandInfo(name = "unset", desc = "Unset a configuration value (i.e. revert to the default, if any).")
-    param = ParamInfo(name = "identifier", type = "string", optional=False)
+    param = ParamInfo(name = "identifier", type = "string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "diff", desc = "Show all local changes")
+    cmd = CommandInfo(name = "diff", desc = "Show all local changes that have not been committed.")
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "revert", desc = "Revert all local changes")
+    cmd = CommandInfo(name = "revert", desc = "Revert all local changes.")
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "commit", desc = "Commit all local changes")
+    cmd = CommandInfo(name = "commit", desc = "Commit all local changes.")
     module.add_command(cmd)
     module.add_command(cmd)
 
 
-    cmd = CommandInfo(name = "go", desc = "Go to a specific configuration part")
+    cmd = CommandInfo(name = "go", desc = "Go to a specific configuration part.")
-    param = ParamInfo(name = "identifier", type="string", optional=False)
+    param = ParamInfo(name = "identifier", type="string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
     cmd.add_param(param)
     module.add_command(cmd)
     module.add_command(cmd)
 
 
@@ -102,28 +111,28 @@ def check_addr(option, opt_str, value, parser):
     parser.values.addr = value
     parser.values.addr = value
 
 
 def set_bindctl_options(parser):
 def set_bindctl_options(parser):
-    parser.add_option('-p', '--port', dest = 'port', type = 'int',
+    parser.add_option('-p', '--port', dest='port', type='int',
-                      action = 'callback', callback=check_port,
+                      action='callback', callback=check_port,
-                      default = '8080', help = 'port for cmdctl of bind10')
+                      default='8080', help='port for cmdctl of bind10')
 
 
-    parser.add_option('-a', '--address', dest = 'addr', type = 'string',
+    parser.add_option('-a', '--address', dest='addr', type='string',
-                      action = 'callback', callback=check_addr,
+                      action='callback', callback=check_addr,
-                      default = '127.0.0.1', help = 'IP address for cmdctl of bind10')
+                      default='127.0.0.1', help='IP address for cmdctl of bind10')
 
 
-    parser.add_option('-c', '--certificate-chain', dest = 'cert_chain', 
+    parser.add_option('-c', '--certificate-chain', dest='cert_chain',
-                      type = 'string', action = 'store',
+                      type='string', action='store',
-                      help = 'PEM formatted server certificate validation chain file')
+                      help='PEM formatted server certificate validation chain file')
-
-if __name__ == '__main__':
-    try:
-        parser = OptionParser(version = VERSION)
-        set_bindctl_options(parser)
-        (options, args) = parser.parse_args()
-        server_addr = options.addr + ':' + str(options.port)
-        tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain)
-        prepare_config_commands(tool)
-        tool.run()
-    except Exception as e:
-        print(e, "\nFailed to connect with b10-cmdctl module, is it running?")
 
 
+    parser.add_option('--csv-file-dir', dest='csv_file_dir', type='string',
+                      default=None, action='store',
+                      help='Directory to store the password CSV file')
 
 
+if __name__ == '__main__':
+    parser = OptionParser(version = VERSION)
+    set_bindctl_options(parser)
+    (options, args) = parser.parse_args()
+    server_addr = options.addr + ':' + str(options.port)
+    tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
+                              csv_file_dir=options.csv_file_dir)
+    prepare_config_commands(tool)
+    tool.run()

+ 55 - 1
src/bin/bindctl/cmdparse.py

@@ -33,6 +33,7 @@ param_value_str  = "(?P<param_value>[^\'\" ][^, ]+)"
 param_value_with_quota_str  = "[\"\'](?P<param_value>.+?)(?<!\\\)[\"\']"
 param_value_with_quota_str  = "[\"\'](?P<param_value>.+?)(?<!\\\)[\"\']"
 next_params_str = "(?P<blank>\s*)(?P<comma>,?)(?P<next_params>.*)$"
 next_params_str = "(?P<blank>\s*)(?P<comma>,?)(?P<next_params>.*)$"
 
 
+
 PARAM_WITH_QUOTA_PATTERN = re.compile(param_name_str + 
 PARAM_WITH_QUOTA_PATTERN = re.compile(param_name_str + 
                                       param_value_with_quota_str + 
                                       param_value_with_quota_str + 
                                       next_params_str)
                                       next_params_str)
@@ -40,8 +41,58 @@ PARAM_PATTERN = re.compile(param_name_str + param_value_str + next_params_str)
 # Used for module and command name
 # Used for module and command name
 NAME_PATTERN = re.compile("^\s*(?P<name>[\w]+)(?P<blank>\s*)(?P<others>.*)$")
 NAME_PATTERN = re.compile("^\s*(?P<name>[\w]+)(?P<blank>\s*)(?P<others>.*)$")
 
 
+# this removes all whitespace in the given string, except when
+# between " quotes
+_remove_unquoted_whitespace = \
+    lambda text:'"'.join( it if i%2 else ''.join(it.split())
+        for i,it in enumerate(text.split('"'))  )
+
+
+def _remove_list_and_map_whitespace(text):
+    """Returns a string where the whitespace between matching [ and ]
+       is removed, unless quoted"""
+    # regular expression aren't really the right tool, since we may have
+    # nested structures
+    result = []
+    start_pos = 0
+    pos = 0
+    list_count = 0
+    map_count = 0
+    cur_start_list_pos = None
+    cur_start_map_pos = None
+    for i in text:
+        if i == '[' and map_count == 0:
+            if list_count == 0:
+                result.append(text[start_pos:pos + 1])
+                cur_start_list_pos = pos + 1
+            list_count = list_count + 1
+        elif i == ']' and map_count == 0:
+            if list_count > 0:
+                list_count = list_count - 1
+                if list_count == 0:
+                    result.append(_remove_unquoted_whitespace(text[cur_start_list_pos:pos + 1]))
+                    start_pos = pos + 1
+        if i == '{' and list_count == 0:
+            if map_count == 0:
+                result.append(text[start_pos:pos + 1])
+                cur_start_map_pos = pos + 1
+            map_count = map_count + 1
+        elif i == '}' and list_count == 0:
+            if map_count > 0:
+                map_count = map_count - 1
+                if map_count == 0:
+                    result.append(_remove_unquoted_whitespace(text[cur_start_map_pos:pos + 1]))
+                    start_pos = pos + 1
+        
+
+        pos = pos + 1
+    if start_pos <= len(text):
+        result.append(text[start_pos:len(text)])
+    return "".join(result)
+    
+    
 class BindCmdParse:
 class BindCmdParse:
-    """ This class will parse the command line usr input into three part
+    """ This class will parse the command line user input into three parts:
     module name, command, parameters
     module name, command, parameters
     the first two parts are strings and parameter is one hash, 
     the first two parts are strings and parameter is one hash, 
     parameters part is optional
     parameters part is optional
@@ -86,9 +137,12 @@ class BindCmdParse:
 
 
             self._parse_params(param_str)
             self._parse_params(param_str)
 
 
+    def _remove_list_whitespace(self, text):
+        return ""
 
 
     def _parse_params(self, param_text):
     def _parse_params(self, param_text):
         """convert a=b,c=d into one hash """
         """convert a=b,c=d into one hash """
+        param_text = _remove_list_and_map_whitespace(param_text)
         
         
         # Check parameter name "help"
         # Check parameter name "help"
         param = NAME_PATTERN.match(param_text)
         param = NAME_PATTERN.match(param_text)

+ 57 - 11
src/bin/bindctl/moduleinfo.py

@@ -16,6 +16,8 @@
 """This module holds classes representing modules, commands and
 """This module holds classes representing modules, commands and
    parameters for use in bindctl"""
    parameters for use in bindctl"""
 
 
+import textwrap
+
 try:
 try:
     from collections import OrderedDict
     from collections import OrderedDict
 except ImportError:
 except ImportError:
@@ -30,6 +32,9 @@ MODULE_NODE_NAME = 'module'
 COMMAND_NODE_NAME = 'command'
 COMMAND_NODE_NAME = 'command'
 PARAM_NODE_NAME = 'param'
 PARAM_NODE_NAME = 'param'
 
 
+# this is used to align the descriptions in help output
+CONST_BINDCTL_HELP_INDENT_WIDTH=12
+
 
 
 class ParamInfo:
 class ParamInfo:
     """One parameter of one command.
     """One parameter of one command.
@@ -52,6 +57,12 @@ class ParamInfo:
     def __str__(self):        
     def __str__(self):        
         return str("\t%s <type: %s> \t(%s)" % (self.name, self.type, self.desc))
         return str("\t%s <type: %s> \t(%s)" % (self.name, self.type, self.desc))
 
 
+    def get_name(self):
+        return "%s <type: %s>" % (self.name, self.type)
+
+    def get_desc(self):
+        return self.desc
+
 class CommandInfo:
 class CommandInfo:
     """One command which is provided by one bind10 module, it has zero
     """One command which is provided by one bind10 module, it has zero
        or more parameters
        or more parameters
@@ -63,13 +74,18 @@ class CommandInfo:
         self.params = OrderedDict()        
         self.params = OrderedDict()        
         # Set default parameter "help"
         # Set default parameter "help"
         self.add_param(ParamInfo("help", 
         self.add_param(ParamInfo("help", 
-                                  desc = "Get help for command",
+                                  desc = "Get help for command.",
                                   optional = True))
                                   optional = True))
                 
                 
     def __str__(self):
     def __str__(self):
         return str("%s \t(%s)" % (self.name, self.desc))
         return str("%s \t(%s)" % (self.name, self.desc))
-        
 
 
+    def get_name(self):
+        return self.name
+
+    def get_desc(self):
+        return self.desc;
+    
     def add_param(self, paraminfo):
     def add_param(self, paraminfo):
         """Add a ParamInfo object to this CommandInfo"""
         """Add a ParamInfo object to this CommandInfo"""
         self.params[paraminfo.name] = paraminfo
         self.params[paraminfo.name] = paraminfo
@@ -144,22 +160,30 @@ class CommandInfo:
         del params["help"]
         del params["help"]
 
 
         if len(params) == 0:
         if len(params) == 0:
-            print("\tNo parameters for the command")
+            print("No parameters for the command")
             return
             return
         
         
-        print("\n\tMandatory parameters:")
+        print("\nMandatory parameters:")
         mandatory_infos = []
         mandatory_infos = []
         for info in params.values():            
         for info in params.values():            
             if not info.is_optional:
             if not info.is_optional:
-                print("\t", info)
+                print("    %s" % info.get_name())
+                print(textwrap.fill(info.get_desc(),
+                      initial_indent="        ",
+                      subsequent_indent="        ",
+                      width=70))
                 mandatory_infos.append(info)
                 mandatory_infos.append(info)
 
 
         optional_infos = [info for info in params.values() 
         optional_infos = [info for info in params.values() 
                           if info not in mandatory_infos]
                           if info not in mandatory_infos]
         if len(optional_infos) > 0:
         if len(optional_infos) > 0:
-            print("\n\tOptional parameters:")      
+            print("\nOptional parameters:")      
             for info in optional_infos:
             for info in optional_infos:
-                    print("\t", info)
+                print("    %s" % info.get_name())
+                print(textwrap.fill(info.get_desc(),
+                      initial_indent="        ",
+                      subsequent_indent="        ",
+                      width=70))
 
 
 
 
 class ModuleInfo:
 class ModuleInfo:
@@ -172,11 +196,17 @@ class ModuleInfo:
         self.desc = desc
         self.desc = desc
         self.commands = OrderedDict()         
         self.commands = OrderedDict()         
         self.add_command(CommandInfo(name = "help", 
         self.add_command(CommandInfo(name = "help", 
-                                     desc = "Get help for module"))
+                                     desc = "Get help for module."))
         
         
     def __str__(self):
     def __str__(self):
         return str("%s \t%s" % (self.name, self.desc))
         return str("%s \t%s" % (self.name, self.desc))
-        
+
+    def get_name(self):
+        return self.name
+
+    def get_desc(self):
+        return self.desc
+
     def add_command(self, command_info):
     def add_command(self, command_info):
         """Add a CommandInfo to this ModuleInfo."""
         """Add a CommandInfo to this ModuleInfo."""
         self.commands[command_info.name] = command_info
         self.commands[command_info.name] = command_info
@@ -201,8 +231,24 @@ class ModuleInfo:
     def module_help(self):
     def module_help(self):
         """Prints the help info for this module to stdout"""
         """Prints the help info for this module to stdout"""
         print("Module ", self, "\nAvailable commands:")
         print("Module ", self, "\nAvailable commands:")
-        for k in self.commands.keys():
+        for k in self.commands.values():
-            print("\t", self.commands[k])
+            n = k.get_name()
+            if len(n) >= CONST_BINDCTL_HELP_INDENT_WIDTH:
+                print("    %s" % n)
+                print(textwrap.fill(k.get_desc(),
+                      initial_indent="            ",
+                      subsequent_indent="    " +
+                      " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+                      width=70))
+            else:
+                print(textwrap.fill("%s%s%s" %
+                    (k.get_name(),
+                     " "*(CONST_BINDCTL_HELP_INDENT_WIDTH - len(k.get_name())),
+                     k.get_desc()),
+                    initial_indent="    ",
+                    subsequent_indent="    " +
+                    " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+                    width=70))
             
             
     def command_help(self, command):
     def command_help(self, command):
         """Prints the help info for the command with the given name.
         """Prints the help info for the command with the given name.

+ 2 - 2
src/bin/bindctl/tests/Makefile.am

@@ -1,5 +1,5 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = bindctl_test.py
+PYTESTS = bindctl_test.py cmdparse_test.py
 EXTRA_DIST = $(PYTESTS)
 EXTRA_DIST = $(PYTESTS)
 
 
 # test using command-line arguments, so use check-local target instead of TESTS
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -11,6 +11,6 @@ if ENABLE_PYTHON_COVERAGE
 endif
 endif
 	for pytest in $(PYTESTS) ; do \
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin \
+	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin  \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
 	done

+ 144 - 8
src/bin/bindctl/tests/bindctl_test.py

@@ -17,6 +17,12 @@
 import unittest
 import unittest
 import isc.cc.data
 import isc.cc.data
 import os
 import os
+import pwd
+import getpass
+from optparse import OptionParser
+from isc.config.config_data import ConfigData, MultiConfigData
+from isc.config.module_spec import ModuleSpec
+from bindctl_main import set_bindctl_options
 from bindctl import cmdparse
 from bindctl import cmdparse
 from bindctl import bindcmd
 from bindctl import bindcmd
 from bindctl.moduleinfo import *
 from bindctl.moduleinfo import *
@@ -238,14 +244,97 @@ class TestNameSequence(unittest.TestCase):
             assert self.random_names[i] == module_names[i+1]
             assert self.random_names[i] == module_names[i+1]
             i = i + 1
             i = i + 1
 
 
-    def test_apply_cfg_command(self):
+# tine class to fake a UIModuleCCSession, but only the config data
+# parts for the next set of tests
+class FakeCCSession(MultiConfigData):
+    def __init__(self):
+        self._local_changes = {}
+        self._current_config = {}
+        self._specifications = {}
+        self.add_foo_spec()
+
+    def add_foo_spec(self):
+        spec = { "module_name": "foo",
+                 "config_data": [
+                 { "item_name": "an_int",
+                   "item_type": "integer",
+                   "item_optional": False,
+                   "item_default": 1
+                 },
+                 { "item_name": "a_list",
+                   "item_type": "list",
+                   "item_optional": False,
+                   "item_default": [],
+                   "list_item_spec":
+                   { "item_name": "a_string",
+                     "item_type": "string",
+                     "item_optional": False,
+                     "item_default": "bar"
+                   }
+                 }
+                 ]
+               }
+        self.set_specification(ModuleSpec(spec))
+    
+
+class TestConfigCommands(unittest.TestCase):
+    def setUp(self):
+        self.tool = bindcmd.BindCmdInterpreter()
+        mod_info = ModuleInfo(name = "foo")
+        self.tool.add_module_info(mod_info)
+        self.tool.config_data = FakeCCSession()
+        
+    def test_apply_cfg_command_int(self):
         self.tool.location = '/'
         self.tool.location = '/'
-        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/bar\" value=\"5\"")
+
+        self.assertEqual((1, MultiConfigData.DEFAULT),
+                         self.tool.config_data.get_value("/foo/an_int"))
+
+        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/an_int\" value=\"5\"")
         self.tool.apply_config_cmd(cmd)
         self.tool.apply_config_cmd(cmd)
-    
+        self.assertEqual((5, MultiConfigData.LOCAL),
-class FakeBindCmdInterpreter(bindcmd.BindCmdInterpreter):
+                         self.tool.config_data.get_value("/foo/an_int"))
-    def __init__(self):
+
-        pass
+        # this should raise a NotFoundError
+        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/bar\" value=\"[]\"")
+        self.assertRaises(isc.cc.data.DataNotFoundError, self.tool.apply_config_cmd, cmd)
+
+        # this should raise a TypeError
+        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/an_int\" value=\"[]\"")
+        self.assertRaises(isc.cc.data.DataTypeError, self.tool.apply_config_cmd, cmd)
+
+    # this is a very specific one for use with a set of list tests
+    # to try out the flexibility of the parser (only in the next test)
+    def clt(self, full_cmd_string, item_value):
+        cmd = cmdparse.BindCmdParse(full_cmd_string)
+        self.tool.apply_config_cmd(cmd)
+        self.assertEqual(([item_value], MultiConfigData.LOCAL),
+                         self.tool.config_data.get_value("/foo/a_list"))
+
+    def test_apply_cfg_command_list(self):
+        self.tool.location = '/'
+
+        self.assertEqual(([], MultiConfigData.DEFAULT),
+                         self.tool.config_data.get_value("/foo/a_list"))
+
+        self.clt("config set identifier=\"foo/a_list\" value=[\"a\"]", "a")
+        self.clt("config set identifier=\"foo/a_list\" value =[\"b\"]", "b")
+        self.clt("config set identifier=\"foo/a_list\" value= [\"c\"]", "c")
+        self.clt("config set identifier=\"foo/a_list\" value = [\"d\"]", "d")
+        self.clt("config set identifier =\"foo/a_list\" value=[\"e\"]", "e")
+        self.clt("config set identifier= \"foo/a_list\" value=[\"f\"]", "f")
+        self.clt("config set identifier = \"foo/a_list\" value=[\"g\"]", "g")
+        self.clt("config set identifier = \"foo/a_list\" value = [\"h\"]", "h")
+        self.clt("config set identifier = \"foo/a_list\" value=[\"i\" ]", "i")
+        self.clt("config set identifier = \"foo/a_list\" value=[ \"j\"]", "j")
+        self.clt("config set identifier = \"foo/a_list\" value=[ \"k\" ]", "k")
+
+        # this should raise a TypeError
+        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/a_list\" value=\"a\"")
+        self.assertRaises(isc.cc.data.DataTypeError, self.tool.apply_config_cmd, cmd)
+        
+        cmd = cmdparse.BindCmdParse("config set identifier=\"foo/a_list\" value=[1]")
+        self.assertRaises(isc.cc.data.DataTypeError, self.tool.apply_config_cmd, cmd)
 
 
 class TestBindCmdInterpreter(unittest.TestCase):
 class TestBindCmdInterpreter(unittest.TestCase):
 
 
@@ -257,9 +346,22 @@ class TestBindCmdInterpreter(unittest.TestCase):
         writer.writerow(['name2'])
         writer.writerow(['name2'])
         csvfile.close()
         csvfile.close()
 
 
+    def test_csv_file_dir(self):
+        # Checking default value
+        if "HOME" in os.environ:
+            home_dir = os.environ["HOME"]
+        else:
+            home_dir = pwd.getpwnam(getpass.getuser()).pw_dir
+        self.assertEqual(home_dir + os.sep + '.bind10' + os.sep,
+                         bindcmd.BindCmdInterpreter().csv_file_dir)
+
+        new_csv_dir = '/something/different/'
+        custom_cmd = bindcmd.BindCmdInterpreter(csv_file_dir=new_csv_dir)
+        self.assertEqual(new_csv_dir, custom_cmd.csv_file_dir)
+
     def test_get_saved_user_info(self):
     def test_get_saved_user_info(self):
-        cmd = FakeBindCmdInterpreter()
+        cmd = bindcmd.BindCmdInterpreter()
-        users = cmd._get_saved_user_info('/notexist', 'cvs_file.cvs')
+        users = cmd._get_saved_user_info('/notexist', 'csv_file.csv')
         self.assertEqual([], users)
         self.assertEqual([], users)
         
         
         csvfilename = 'csv_file.csv'
         csvfilename = 'csv_file.csv'
@@ -268,6 +370,40 @@ class TestBindCmdInterpreter(unittest.TestCase):
         self.assertEqual([], users)
         self.assertEqual([], users)
         os.remove(csvfilename)
         os.remove(csvfilename)
 
 
+
+class TestCommandLineOptions(unittest.TestCase):
+    class FakeParserError(Exception):
+        """An exception thrown from FakeOptionParser on parser error.
+        """
+        pass
+
+    class FakeOptionParser(OptionParser):
+        """This fake class emulates the OptionParser class with customized
+        error handling for the convenient of tests.
+        """
+        def __init__(self):
+            OptionParser.__init__(self)
+
+        def error(self, msg):
+            raise TestCommandLineOptions.FakeParserError
+
+    def setUp(self):
+        self.parser = self.FakeOptionParser()
+        set_bindctl_options(self.parser)
+
+    def test_csv_file_dir(self):
+        # by default the option is "undefined"
+        (options, _) = self.parser.parse_args([])
+        self.assertEqual(None, options.csv_file_dir)
+
+        # specify the option, valid case.
+        (options, _) = self.parser.parse_args(['--csv-file-dir', 'some_dir'])
+        self.assertEqual('some_dir', options.csv_file_dir)
+
+        # missing option arg; should trigger parser error.
+        self.assertRaises(self.FakeParserError, self.parser.parse_args,
+                          ['--csv-file-dir'])
+
 if __name__== "__main__":
 if __name__== "__main__":
     unittest.main()
     unittest.main()
     
     

+ 88 - 0
src/bin/bindctl/tests/cmdparse_test.py

@@ -0,0 +1,88 @@
+# Copyright (C) 2009  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+import unittest
+from bindctl import cmdparse
+
+class TestCmdParse(unittest.TestCase):
+
+    def test_remove_unquoted_whitespace(self):
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a"), "a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" a"), "a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a "), "a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" a "), "a")
+        self.assertNotEqual(cmdparse._remove_unquoted_whitespace("a"), "a ")
+        self.assertNotEqual(cmdparse._remove_unquoted_whitespace(" a"), " a")
+        self.assertNotEqual(cmdparse._remove_unquoted_whitespace("a "), "a ")
+        self.assertNotEqual(cmdparse._remove_unquoted_whitespace(" a "), " a ")
+        self.assertNotEqual(cmdparse._remove_unquoted_whitespace(" a "), "b")
+
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("\"abc\""), "\"abc\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" \"abc\""), "\"abc\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("\"abc\" "), "\"abc\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" \"abc\" "), "\"abc\"")
+        
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("\" abc\""), "\" abc\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" \"a bc\""), "\"a bc\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("\"ab c\" "), "\"ab c\"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" \"abc \" "), "\"abc \"")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace(" \" a b c \" "), "\" a b c \"")
+        
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a\" abc\"a"), "a\" abc\"a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a \"a bc\"a"), "a\"a bc\"a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a\"ab c\" a"), "a\"ab c\"a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a \"abc \" a"), "a\"abc \"a")
+        self.assertEqual(cmdparse._remove_unquoted_whitespace("a \" a b c \" a"), "a\" a b c \"a")
+
+    # short-hand function to make the set of tests more readable
+    def rws(self, a, b):
+        self.assertEqual(cmdparse._remove_list_and_map_whitespace(a), b)
+
+    def test_remove_list_whitespace(self):
+        self.rws("a", "a")
+        self.rws(" a ", " a ")
+        self.rws(" [a] ", " [a] ")
+        self.rws(" [ a] ", " [a] ")
+        self.rws(" [ a ] ", " [a] ")
+        self.rws(" [ a b c ] ", " [abc] ")
+        self.rws(" [ a \"b c\" ] ", " [a\"b c\"] ")
+        self.rws("a [ a \"b c\" ] a", "a [a\"b c\"] a")
+        self.rws("a] [ a \"b c\" ] a", "a] [a\"b c\"] a")
+        self.rws(" [ a [b c] ] ", " [a[bc]] ")
+        self.rws(" [ a b][ c d ] ", " [ab][cd] ")
+        self.rws(" [ a b] [ c d ] ", " [ab] [cd] ")
+        
+        self.rws("a", "a")
+        self.rws(" a ", " a ")
+        self.rws(" {a} ", " {a} ")
+        self.rws(" { a} ", " {a} ")
+        self.rws(" { a } ", " {a} ")
+        self.rws(" { a b c } ", " {abc} ")
+        self.rws(" { a \"b c\" } ", " {a\"b c\"} ")
+        self.rws("a { a \"b c\" } a", "a {a\"b c\"} a")
+        self.rws("a} { a \"b c\" } a", "a} {a\"b c\"} a")
+        self.rws(" { a {b c} } ", " {a{bc}} ")
+        self.rws(" { a b}{ c d } ", " {ab}{cd} ")
+        self.rws(" { a b} { c d } ", " {ab} {cd} ")
+
+        self.rws(" [ a b]{ c d } ", " [ab]{cd} ")
+        self.rws(" [ a b{ c d }] ", " [ab{cd}] ")
+        self.rws(" [ a b{ \"c d\" }] ", " [ab{\"c d\"}] ")
+        
+
+if __name__== "__main__":
+    unittest.main()
+    

+ 0 - 1
src/bin/cfgmgr/Makefile.am

@@ -19,7 +19,6 @@ b10-cfgmgr.8: b10-cfgmgr.xml
 
 
 endif
 endif
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-cfgmgr: b10-cfgmgr.py
 b10-cfgmgr: b10-cfgmgr.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" b10-cfgmgr.py >$@
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" b10-cfgmgr.py >$@

+ 11 - 3
src/bin/cfgmgr/b10-cfgmgr.py.in

@@ -26,10 +26,18 @@ import os
 isc.util.process.rename()
 isc.util.process.rename()
 
 
 # If B10_FROM_SOURCE is set in the environment, we use data files
 # If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
+# from a directory relative to the value of that variable, or, if defined,
-# installed on the system
+# relative to the value of B10_FROM_SOURCE_LOCALSTATEDIR.  Otherwise
+# we use the ones installed on the system.
+# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
+# tests where we want to use variuos types of configuration within the test
+# environment.  (We may want to make it even more generic so that the path is
+# passed from the boss process)
 if "B10_FROM_SOURCE" in os.environ:
 if "B10_FROM_SOURCE" in os.environ:
-    DATA_PATH = os.environ["B10_FROM_SOURCE"]
+    if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+        DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
+    else:
+        DATA_PATH = os.environ["B10_FROM_SOURCE"]
 else:
 else:
     PREFIX = "@prefix@"
     PREFIX = "@prefix@"
     DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)
     DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)

+ 1 - 2
src/bin/cmdctl/Makefile.am

@@ -4,7 +4,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 
 pkglibexec_SCRIPTS = b10-cmdctl
 pkglibexec_SCRIPTS = b10-cmdctl
 
 
-b10_cmdctldir = $(DESTDIR)$(pkgdatadir)
+b10_cmdctldir = $(pkgdatadir)
 
 
 # NOTE: this will overwrite on install
 # NOTE: this will overwrite on install
 # So these generic copies are placed in share/bind10 instead of to etc
 # So these generic copies are placed in share/bind10 instead of to etc
@@ -33,7 +33,6 @@ endif
 cmdctl.spec: cmdctl.spec.pre
 cmdctl.spec: cmdctl.spec.pre
 	$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
 	$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-cmdctl: cmdctl.py
 b10-cmdctl: cmdctl.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@

+ 5 - 2
src/bin/host/host.cc

@@ -70,12 +70,15 @@ host_lookup(const char* const name, const char* const type) {
     msg.toWire(renderer);
     msg.toWire(renderer);
 
 
     struct addrinfo hints, *res;
     struct addrinfo hints, *res;
-    int e;
     memset(&hints, 0, sizeof(hints));
     memset(&hints, 0, sizeof(hints));
     hints.ai_family = AF_UNSPEC;
     hints.ai_family = AF_UNSPEC;
     hints.ai_socktype = SOCK_DGRAM;
     hints.ai_socktype = SOCK_DGRAM;
     hints.ai_flags = 0; // not using AI_NUMERICHOST in case to bootstrap
     hints.ai_flags = 0; // not using AI_NUMERICHOST in case to bootstrap
-    e = getaddrinfo(server, server_port, &hints, &res);
+    if (getaddrinfo(server, server_port, &hints, &res) != 0) {
+        cerr << "address/port conversion for " << server << ":"
+             << server_port << " failed" << endl;
+        return (1);
+    }
 
 
     if (verbose) {
     if (verbose) {
         cout << "Trying \"" << name << "\"\n";
         cout << "Trying \"" << name << "\"\n";

+ 0 - 1
src/bin/msgq/Makefile.am

@@ -16,7 +16,6 @@ b10-msgq.8: msgq.xml
 
 
 endif
 endif
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-msgq: msgq.py
 b10-msgq: msgq.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" msgq.py >$@
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" msgq.py >$@

+ 1 - 1
src/bin/msgq/msgq.py.in

@@ -40,7 +40,7 @@ isc.util.process.rename()
 # This is the version that gets displayed to the user.
 # This is the version that gets displayed to the user.
 # The VERSION string consists of the module name, the module version
 # The VERSION string consists of the module name, the module version
 # number, and the overall BIND 10 version number (set in configure.ac).
 # number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "b10-msgq 20100818 (BIND 10 @PACKAGE_VERSION@)"
+VERSION = "b10-msgq 20110127 (BIND 10 @PACKAGE_VERSION@)"
 
 
 class MsgQReceiveError(Exception): pass
 class MsgQReceiveError(Exception): pass
 
 

+ 5 - 1
src/bin/resolver/Makefile.am

@@ -48,11 +48,15 @@ b10_resolver_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/log/liblog.la
 b10_resolver_LDADD += $(top_builddir)/src/lib/log/liblog.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/cache/libcache.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 b10_resolver_LDADD += $(top_builddir)/src/bin/auth/change_user.o
 b10_resolver_LDADD += $(top_builddir)/src/bin/auth/change_user.o
 b10_resolver_LDFLAGS = -pthread
 b10_resolver_LDFLAGS = -pthread
 
 
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir
 # and can't use @datadir@ because doesn't expand default ${prefix}
 # and can't use @datadir@ because doesn't expand default ${prefix}
-b10_resolverdir = $(DESTDIR)$(pkgdatadir)
+b10_resolverdir = $(pkgdatadir)
 b10_resolver_DATA = resolver.spec
 b10_resolver_DATA = resolver.spec
 
 

+ 26 - 25
src/bin/resolver/b10-resolver.8

@@ -2,12 +2,12 @@
 .\"     Title: b10-resolver
 .\"     Title: b10-resolver
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: January 19, 2011
+.\"      Date: February 17, 2011
 .\"    Manual: BIND10
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"  Language: English
 .\"
 .\"
-.TH "B10\-RESOLVER" "8" "January 19, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" * set default formatting
 .\" -----------------------------------------------------------------
 .\" -----------------------------------------------------------------
@@ -39,23 +39,6 @@ will exit\&.
 .PP
 .PP
 It also receives its configurations from
 It also receives its configurations from
 \fBb10-cfgmgr\fR(8)\&.
 \fBb10-cfgmgr\fR(8)\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-.PP
-This prototype version only supports forwarding\&. Future versions will introduce full recursion, cache, lookup of local authoritative data (as in
-\fBb10\-auth\fR), and DNSSEC validation\&.
-.sp .5v
-.RE
 .SH "OPTIONS"
 .SH "OPTIONS"
 .PP
 .PP
 The arguments are as follows:
 The arguments are as follows:
@@ -91,15 +74,34 @@ to listen on\&. The list items are the
 \fIaddress\fR
 \fIaddress\fR
 string and
 string and
 \fIport\fR
 \fIport\fR
-number\&. The defaults are address ::1 port 5300 and address 127\&.0\&.0\&.1 port 5300\&.
+number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
 .PP
 .PP
 
 
 \fIretries\fR
 \fIretries\fR
-is the number of times to retry (resend query) after a timeout\&. The default is 0 (do not retry)\&.
+is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
+.PP
+
+\fIroot_addresses\fR
+is a list of addresses and ports for
+\fBb10\-resolver\fR
+to use directly as root servers to start resolving\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+.PP
+
+\fItimeout_client\fR
+is the number of milliseconds to wait before timing out the incoming client query\&. If set to \-1, this timeout is disabled\&. The default is 4000\&. After this timeout, a SERVFAIL is sent back to the client asking the question\&. (The lookup may continue after the timeout, but a later answer is not returned for the now\-past query\&.)
 .PP
 .PP
 
 
-\fItimeout\fR
+\fItimeout_lookup\fR
-is the number of milliseconds to wait for answer\&. If set to \-1, the timeout is disabled\&. The default is 2000\&.
+is the number of milliseconds before it stops trying the query\&. If set to \-1, this timeout is disabled\&. The default is 30000\&.
+.PP
+
+
+\fItimeout_query\fR
+is the number of milliseconds to wait before it retries a query\&. If set to \-1, this timeout is disabled\&. The default is 2000\&.
 .PP
 .PP
 The configuration command is:
 The configuration command is:
 .PP
 .PP
@@ -119,8 +121,7 @@ BIND 10 Guide\&.
 .PP
 .PP
 The
 The
 \fBb10\-resolver\fR
 \fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&.
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
 
 
 .SH "COPYRIGHT"
 .SH "COPYRIGHT"
 .br
 .br

+ 45 - 13
src/bin/resolver/b10-resolver.xml

@@ -20,7 +20,7 @@
 <refentry>
 <refentry>
 
 
   <refentryinfo>
   <refentryinfo>
-    <date>January 19, 2011</date>
+    <date>February 17, 2011</date>
   </refentryinfo>
   </refentryinfo>
 
 
   <refmeta>
   <refmeta>
@@ -69,11 +69,13 @@
 <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
 <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
     </para>
     </para>
 
 
+<!--
     <note><para>
     <note><para>
-      This prototype version only supports forwarding.  Future versions
+      Future versions will introduce lookup of local authoritative
-      will introduce full recursion, cache, lookup of local authoritative
+      data (as in <command>b10-auth</command>) and DNSSEC validation.
-      data (as in <command>b10-auth</command>), and DNSSEC validation.
     </para></note>
     </para></note>
+-->
+
   </refsect1>
   </refsect1>
 
 
   <refsect1>
   <refsect1>
@@ -128,7 +130,7 @@ port
 -->
 -->
     </para>
     </para>
 
 
-<!-- trac386:
+<!-- trac384:
 
 
 once that is merged you can for instance do 'config add Resolver/forward_addresses { "port": 123 } and it will fill in the rest (in this case ::1 for the address)
 once that is merged you can for instance do 'config add Resolver/forward_addresses { "port": 123 } and it will fill in the rest (in this case ::1 for the address)
 
 
@@ -139,19 +141,50 @@ once that is merged you can for instance do 'config add Resolver/forward_address
       <command>b10-resolver</command> to listen on.
       <command>b10-resolver</command> to listen on.
       The list items are the <varname>address</varname> string
       The list items are the <varname>address</varname> string
       and <varname>port</varname> number.
       and <varname>port</varname> number.
-      The defaults are address ::1 port 5300 and
+      The defaults are address ::1 port 53 and
-      address 127.0.0.1 port 5300.
+      address 127.0.0.1 port 53.
+<!-- TODO: but defaults are not used, Trac #518 -->
     </para>
     </para>
 
 
     <para>
     <para>
       <varname>retries</varname> is the number of times to retry
       <varname>retries</varname> is the number of times to retry
-      (resend query) after a timeout.
+      (resend query) after a query timeout
-      The default is 0 (do not retry).
+      (<varname>timeout_query</varname>).
+      The default is 3.
+    </para>
+
+    <para>
+      <varname>root_addresses</varname> is a list of addresses and ports
+      for <command>b10-resolver</command> to use directly as
+      root servers to start resolving.
+      The list items are the <varname>address</varname> string
+      and <varname>port</varname> number.
+      If empty, a hardcoded address for F-root (192.5.5.241) is used.
+    </para>
+
+    <para>
+      <varname>timeout_client</varname> is the number of milliseconds
+      to wait before timing out the incoming client query.
+      If set to -1, this timeout is disabled.
+      The default is 4000.
+      After this timeout, a SERVFAIL is sent back to the client asking
+      the question.
+      (The lookup may continue after the timeout, but a later answer
+      is not returned for the now-past query.)
+    </para>
+
+    <para>
+      <varname>timeout_lookup</varname> is the number of milliseconds
+      before it stops trying the query.
+      If set to -1, this timeout is disabled.
+      The default is 30000.
     </para>
     </para>
 
 
     <para>
     <para>
-      <varname>timeout</varname> is the number of milliseconds to
+<!-- previous timeout was renamed to timeout_query -->
-      wait for answer. If set to -1, the timeout is disabled.
+      <varname>timeout_query</varname> is the number of milliseconds to
+      wait before it retries a query.
+      If set to -1, this timeout is disabled.
       The default is 2000.
       The default is 2000.
     </para>
     </para>
 
 
@@ -200,9 +233,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
     <para>
     <para>
       The <command>b10-resolver</command> daemon was first coded in
       The <command>b10-resolver</command> daemon was first coded in
       September 2010. The initial implementation only provided
       September 2010. The initial implementation only provided
-      forwarding.
+      forwarding. Iteration was introduced in January 2011.
 <!-- TODO: document when caching was added -->
 <!-- TODO: document when caching was added -->
-<!-- TODO: document when iteration was added -->
 <!-- TODO: document when validation was added -->
 <!-- TODO: document when validation was added -->
     </para>
     </para>
   </refsect1>
   </refsect1>

+ 49 - 5
src/bin/resolver/main.cc

@@ -45,6 +45,9 @@
 #include <resolver/spec_config.h>
 #include <resolver/spec_config.h>
 #include <resolver/resolver.h>
 #include <resolver/resolver.h>
 
 
+#include <cache/resolver_cache.h>
+#include <nsas/nameserver_address_store.h>
+
 #include <log/dummylog.h>
 #include <log/dummylog.h>
 
 
 using namespace std;
 using namespace std;
@@ -56,11 +59,10 @@ using namespace asiolink;
 
 
 namespace {
 namespace {
 
 
-// Default port current 5300 for testing purposes
 static const string PROGRAM = "Resolver";
 static const string PROGRAM = "Resolver";
 
 
 IOService io_service;
 IOService io_service;
-static Resolver *resolver;
+static boost::shared_ptr<Resolver> resolver;
 
 
 ConstElementPtr
 ConstElementPtr
 my_config_handler(ConstElementPtr new_config) {
 my_config_handler(ConstElementPtr new_config) {
@@ -136,15 +138,58 @@ main(int argc, char* argv[]) {
             specfile = string(RESOLVER_SPECFILE_LOCATION);
             specfile = string(RESOLVER_SPECFILE_LOCATION);
         }
         }
 
 
-        resolver = new Resolver();
+        resolver = boost::shared_ptr<Resolver>(new Resolver());
         dlog("Server created.");
         dlog("Server created.");
 
 
         SimpleCallback* checkin = resolver->getCheckinProvider();
         SimpleCallback* checkin = resolver->getCheckinProvider();
         DNSLookup* lookup = resolver->getDNSLookupProvider();
         DNSLookup* lookup = resolver->getDNSLookupProvider();
         DNSAnswer* answer = resolver->getDNSAnswerProvider();
         DNSAnswer* answer = resolver->getDNSAnswerProvider();
 
 
+        isc::nsas::NameserverAddressStore nsas(resolver);
+        resolver->setNameserverAddressStore(nsas);
+
+        isc::cache::ResolverCache cache;
+        resolver->setCache(cache);
+        
+        // TODO priming query, remove root from direct
+        // Fake a priming query result here (TODO2 how to flag non-expiry?)
+        // propagation to runningquery. And check for forwarder mode?
+        isc::dns::QuestionPtr root_question(new isc::dns::Question(
+                                            isc::dns::Name("."),
+                                            isc::dns::RRClass::IN(),
+                                            isc::dns::RRType::NS()));
+        isc::dns::RRsetPtr root_ns_rrset(new isc::dns::RRset(isc::dns::Name("."), 
+                                         isc::dns::RRClass::IN(),
+                                         isc::dns::RRType::NS(),
+                                         isc::dns::RRTTL(8888)));
+        root_ns_rrset->addRdata(isc::dns::rdata::createRdata(isc::dns::RRType::NS(),
+                                                             isc::dns::RRClass::IN(),
+                                                             "l.root-servers.net."));
+        isc::dns::RRsetPtr root_a_rrset(new isc::dns::RRset(isc::dns::Name("l.root-servers.net"), 
+                                        isc::dns::RRClass::IN(),
+                                        isc::dns::RRType::A(),
+                                        isc::dns::RRTTL(8888)));
+        root_a_rrset->addRdata(isc::dns::rdata::createRdata(isc::dns::RRType::A(),
+                                                             isc::dns::RRClass::IN(),
+                                                             "199.7.83.42"));
+        isc::dns::RRsetPtr root_aaaa_rrset(new isc::dns::RRset(isc::dns::Name("l.root-servers.net"), 
+                                        isc::dns::RRClass::IN(),
+                                        isc::dns::RRType::AAAA(),
+                                        isc::dns::RRTTL(8888)));
+        root_aaaa_rrset->addRdata(isc::dns::rdata::createRdata(isc::dns::RRType::AAAA(),
+                                                             isc::dns::RRClass::IN(),
+                                                             "2001:500:3::42"));
+        isc::dns::MessagePtr priming_result(new isc::dns::Message(isc::dns::Message::RENDER));
+        priming_result->addQuestion(root_question);
+        priming_result->addRRset(isc::dns::Message::SECTION_ANSWER, root_ns_rrset);
+        priming_result->addRRset(isc::dns::Message::SECTION_ADDITIONAL, root_a_rrset);
+        priming_result->addRRset(isc::dns::Message::SECTION_ADDITIONAL, root_aaaa_rrset);
+        cache.update(*priming_result);
+        cache.update(root_ns_rrset);
+        cache.update(root_a_rrset);
+        cache.update(root_aaaa_rrset);
+        
         DNSService dns_service(io_service, checkin, lookup, answer);
         DNSService dns_service(io_service, checkin, lookup, answer);
-
         resolver->setDNSService(dns_service);
         resolver->setDNSService(dns_service);
         dlog("IOService created.");
         dlog("IOService created.");
 
 
@@ -173,7 +218,6 @@ main(int argc, char* argv[]) {
 
 
     delete config_session;
     delete config_session;
     delete cc_session;
     delete cc_session;
-    delete resolver;
 
 
     return (ret);
     return (ret);
 }
 }

+ 77 - 126
src/bin/resolver/resolver.cc

@@ -39,6 +39,9 @@
 #include <dns/rrttl.h>
 #include <dns/rrttl.h>
 #include <dns/message.h>
 #include <dns/message.h>
 #include <dns/messagerenderer.h>
 #include <dns/messagerenderer.h>
+#include <server_common/portconfig.h>
+
+#include <resolve/recursive_query.h>
 
 
 #include <log/dummylog.h>
 #include <log/dummylog.h>
 
 
@@ -52,8 +55,7 @@ using namespace isc::data;
 using namespace isc::config;
 using namespace isc::config;
 using isc::log::dlog;
 using isc::log::dlog;
 using namespace asiolink;
 using namespace asiolink;
-
+using namespace isc::server_common::portconfig;
-typedef pair<string, uint16_t> addr_t;
 
 
 class ResolverImpl {
 class ResolverImpl {
 private:
 private:
@@ -74,10 +76,15 @@ public:
         queryShutdown();
         queryShutdown();
     }
     }
 
 
-    void querySetup(DNSService& dnss) {
+    void querySetup(DNSService& dnss,
+                    isc::nsas::NameserverAddressStore& nsas,
+                    isc::cache::ResolverCache& cache)
+    {
         assert(!rec_query_); // queryShutdown must be called first
         assert(!rec_query_); // queryShutdown must be called first
         dlog("Query setup");
         dlog("Query setup");
-        rec_query_ = new RecursiveQuery(dnss, upstream_,
+        rec_query_ = new RecursiveQuery(dnss, 
+                                        nsas, cache,
+                                        upstream_,
                                         upstream_root_,
                                         upstream_root_,
                                         query_timeout_,
                                         query_timeout_,
                                         client_timeout_,
                                         client_timeout_,
@@ -96,14 +103,14 @@ public:
         }
         }
     }
     }
 
 
-    void setForwardAddresses(const vector<addr_t>& upstream,
+    void setForwardAddresses(const AddressList& upstream,
         DNSService *dnss)
         DNSService *dnss)
     {
     {
         upstream_ = upstream;
         upstream_ = upstream;
         if (dnss) {
         if (dnss) {
             if (!upstream_.empty()) {
             if (!upstream_.empty()) {
                 dlog("Setting forward addresses:");
                 dlog("Setting forward addresses:");
-                BOOST_FOREACH(const addr_t& address, upstream) {
+                BOOST_FOREACH(const AddressPair& address, upstream) {
                     dlog(" " + address.first + ":" +
                     dlog(" " + address.first + ":" +
                         boost::lexical_cast<string>(address.second));
                         boost::lexical_cast<string>(address.second));
                 }
                 }
@@ -113,14 +120,14 @@ public:
         }
         }
     }
     }
 
 
-    void setRootAddresses(const vector<addr_t>& upstream_root,
+    void setRootAddresses(const AddressList& upstream_root,
                           DNSService *dnss)
                           DNSService *dnss)
     {
     {
         upstream_root_ = upstream_root;
         upstream_root_ = upstream_root;
         if (dnss) {
         if (dnss) {
             if (!upstream_root_.empty()) {
             if (!upstream_root_.empty()) {
                 dlog("Setting root addresses:");
                 dlog("Setting root addresses:");
-                BOOST_FOREACH(const addr_t& address, upstream_root) {
+                BOOST_FOREACH(const AddressPair& address, upstream_root) {
                     dlog(" " + address.first + ":" +
                     dlog(" " + address.first + ":" +
                         boost::lexical_cast<string>(address.second));
                         boost::lexical_cast<string>(address.second));
                 }
                 }
@@ -129,7 +136,7 @@ public:
             }
             }
         }
         }
     }
     }
-
+    
     void resolve(const isc::dns::QuestionPtr& question,
     void resolve(const isc::dns::QuestionPtr& question,
         const isc::resolve::ResolverInterface::CallbackPtr& callback);
         const isc::resolve::ResolverInterface::CallbackPtr& callback);
 
 
@@ -144,11 +151,11 @@ public:
     /// These members are public because Resolver accesses them directly.
     /// These members are public because Resolver accesses them directly.
     ModuleCCSession* config_session_;
     ModuleCCSession* config_session_;
     /// Addresses of the root nameserver(s)
     /// Addresses of the root nameserver(s)
-    vector<addr_t> upstream_root_;
+    AddressList upstream_root_;
     /// Addresses of the forward nameserver
     /// Addresses of the forward nameserver
-    vector<addr_t> upstream_;
+    AddressList upstream_;
     /// Addresses we listen on
     /// Addresses we listen on
-    vector<addr_t> listen_;
+    AddressList listen_;
 
 
     /// Timeout for outgoing queries in milliseconds
     /// Timeout for outgoing queries in milliseconds
     int query_timeout_;
     int query_timeout_;
@@ -182,9 +189,11 @@ public:
     MessagePtr message_;
     MessagePtr message_;
 };
 };
 
 
+
+// TODO: REMOVE, USE isc::resolve::MakeErrorMessage?
 void
 void
-makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
+makeErrorMessage(MessagePtr message, MessagePtr answer_message,
-                 const Rcode& rcode)
+                 OutputBufferPtr buffer, const Rcode& rcode)
 {
 {
     // extract the parameters that should be kept.
     // extract the parameters that should be kept.
     // XXX: with the current implementation, it's not easy to set EDNS0
     // XXX: with the current implementation, it's not easy to set EDNS0
@@ -195,6 +204,12 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
     const Opcode& opcode = message->getOpcode();
     const Opcode& opcode = message->getOpcode();
     vector<QuestionPtr> questions;
     vector<QuestionPtr> questions;
 
 
+    // answer_message is actually ignored right now,
+    // see the comment in #607
+    answer_message->setRcode(rcode);
+    answer_message->setOpcode(opcode);
+    answer_message->setQid(qid);
+
     // If this is an error to a query or notify, we should also copy the
     // If this is an error to a query or notify, we should also copy the
     // question section.
     // question section.
     if (opcode == Opcode::QUERY() || opcode == Opcode::NOTIFY()) {
     if (opcode == Opcode::QUERY() || opcode == Opcode::NOTIFY()) {
@@ -256,25 +271,16 @@ public:
         const qid_t qid = query_message->getQid();
         const qid_t qid = query_message->getQid();
         const bool rd = query_message->getHeaderFlag(Message::HEADERFLAG_RD);
         const bool rd = query_message->getHeaderFlag(Message::HEADERFLAG_RD);
         const bool cd = query_message->getHeaderFlag(Message::HEADERFLAG_CD);
         const bool cd = query_message->getHeaderFlag(Message::HEADERFLAG_CD);
-        const Opcode& opcode = query_message->getOpcode();
+        
-
+        // The opcode and question section should have already been set,
-        // Fill in the final details of the answer message
+        // fill in the final details of the answer message
         answer_message->setQid(qid);
         answer_message->setQid(qid);
-        answer_message->setOpcode(opcode);
 
 
         answer_message->setHeaderFlag(Message::HEADERFLAG_QR);
         answer_message->setHeaderFlag(Message::HEADERFLAG_QR);
         answer_message->setHeaderFlag(Message::HEADERFLAG_RA);
         answer_message->setHeaderFlag(Message::HEADERFLAG_RA);
-        if (rd) {
+        answer_message->setHeaderFlag(Message::HEADERFLAG_RD, rd);
-            answer_message->setHeaderFlag(Message::HEADERFLAG_RD);
+        answer_message->setHeaderFlag(Message::HEADERFLAG_CD, cd);
-        }
-        if (cd) {
-            answer_message->setHeaderFlag(Message::HEADERFLAG_CD);
-        }
 
 
-        vector<QuestionPtr> questions;
-        questions.assign(query_message->beginQuestion(), query_message->endQuestion());
-        for_each(questions.begin(), questions.end(), QuestionInserter(answer_message));
-        
         // Now we can clear the buffer and render the new message into it
         // Now we can clear the buffer and render the new message into it
         buffer->clear();
         buffer->clear();
         MessageRenderer renderer(*buffer);
         MessageRenderer renderer(*buffer);
@@ -343,6 +349,19 @@ Resolver::setDNSService(asiolink::DNSService& dnss) {
 }
 }
 
 
 void
 void
+Resolver::setNameserverAddressStore(isc::nsas::NameserverAddressStore& nsas)
+{
+    nsas_ = &nsas;
+}
+
+void
+Resolver::setCache(isc::cache::ResolverCache& cache)
+{
+    cache_ = &cache;
+}
+
+
+void
 Resolver::setConfigSession(ModuleCCSession* config_session) {
 Resolver::setConfigSession(ModuleCCSession* config_session) {
     impl_->config_session_ = config_session;
     impl_->config_session_ = config_session;
 }
 }
@@ -392,12 +411,14 @@ Resolver::processMessage(const IOMessage& io_message,
     } catch (const DNSProtocolError& error) {
     } catch (const DNSProtocolError& error) {
         dlog(string("returning ") + error.getRcode().toText() + ": " + 
         dlog(string("returning ") + error.getRcode().toText() + ": " + 
             error.what());
             error.what());
-        makeErrorMessage(query_message, buffer, error.getRcode());
+        makeErrorMessage(query_message, answer_message,
+                         buffer, error.getRcode());
         server->resume(true);
         server->resume(true);
         return;
         return;
     } catch (const Exception& ex) {
     } catch (const Exception& ex) {
         dlog(string("returning SERVFAIL: ") + ex.what());
         dlog(string("returning SERVFAIL: ") + ex.what());
-        makeErrorMessage(query_message, buffer, Rcode::SERVFAIL());
+        makeErrorMessage(query_message, answer_message,
+                         buffer, Rcode::SERVFAIL());
         server->resume(true);
         server->resume(true);
         return;
         return;
     } // other exceptions will be handled at a higher layer.
     } // other exceptions will be handled at a higher layer.
@@ -407,28 +428,34 @@ Resolver::processMessage(const IOMessage& io_message,
     // Perform further protocol-level validation.
     // Perform further protocol-level validation.
     bool sendAnswer = true;
     bool sendAnswer = true;
     if (query_message->getOpcode() == Opcode::NOTIFY()) {
     if (query_message->getOpcode() == Opcode::NOTIFY()) {
-        makeErrorMessage(query_message, buffer, Rcode::NOTAUTH());
+        makeErrorMessage(query_message, answer_message,
+                         buffer, Rcode::NOTAUTH());
         dlog("Notify arrived, but we are not authoritative");
         dlog("Notify arrived, but we are not authoritative");
     } else if (query_message->getOpcode() != Opcode::QUERY()) {
     } else if (query_message->getOpcode() != Opcode::QUERY()) {
         dlog("Unsupported opcode (got: " + query_message->getOpcode().toText() +
         dlog("Unsupported opcode (got: " + query_message->getOpcode().toText() +
             ", expected: " + Opcode::QUERY().toText());
             ", expected: " + Opcode::QUERY().toText());
-        makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+        makeErrorMessage(query_message, answer_message,
+                         buffer, Rcode::NOTIMP());
     } else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
     } else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
         dlog("The query contained " +
         dlog("The query contained " +
             boost::lexical_cast<string>(query_message->getRRCount(
             boost::lexical_cast<string>(query_message->getRRCount(
             Message::SECTION_QUESTION) + " questions, exactly one expected"));
             Message::SECTION_QUESTION) + " questions, exactly one expected"));
-        makeErrorMessage(query_message, buffer, Rcode::FORMERR());
+        makeErrorMessage(query_message, answer_message,
+                         buffer, Rcode::FORMERR());
     } else {
     } else {
         ConstQuestionPtr question = *query_message->beginQuestion();
         ConstQuestionPtr question = *query_message->beginQuestion();
         const RRType &qtype = question->getType();
         const RRType &qtype = question->getType();
         if (qtype == RRType::AXFR()) {
         if (qtype == RRType::AXFR()) {
             if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
             if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-                makeErrorMessage(query_message, buffer, Rcode::FORMERR());
+                makeErrorMessage(query_message, answer_message,
+                                 buffer, Rcode::FORMERR());
             } else {
             } else {
-                makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+                makeErrorMessage(query_message, answer_message,
+                                 buffer, Rcode::NOTIMP());
             }
             }
         } else if (qtype == RRType::IXFR()) {
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+            makeErrorMessage(query_message, answer_message,
+                             buffer, Rcode::NOTIMP());
         } else {
         } else {
             // The RecursiveQuery object will post the "resume" event to the
             // The RecursiveQuery object will post the "resume" event to the
             // DNSServer when an answer arrives, so we don't have to do it now.
             // DNSServer when an answer arrives, so we don't have to do it now.
@@ -460,46 +487,6 @@ ResolverImpl::processNormalQuery(const Question& question,
     rec_query_->resolve(question, answer_message, buffer, server);
     rec_query_->resolve(question, answer_message, buffer, server);
 }
 }
 
 
-namespace {
-
-vector<addr_t>
-parseAddresses(ConstElementPtr addresses) {
-    vector<addr_t> result;
-    if (addresses) {
-        if (addresses->getType() == Element::list) {
-            for (size_t i(0); i < addresses->size(); ++ i) {
-                ConstElementPtr addrPair(addresses->get(i));
-                ConstElementPtr addr(addrPair->get("address"));
-                ConstElementPtr port(addrPair->get("port"));
-                if (!addr || ! port) {
-                    isc_throw(BadValue, "Address must contain both the IP"
-                        "address and port");
-                }
-                try {
-                    IOAddress(addr->stringValue());
-                    if (port->intValue() < 0 ||
-                        port->intValue() > 0xffff) {
-                        isc_throw(BadValue, "Bad port value (" <<
-                            port->intValue() << ")");
-                    }
-                    result.push_back(addr_t(addr->stringValue(),
-                        port->intValue()));
-                }
-                catch (const TypeError &e) { // Better error message
-                    isc_throw(TypeError,
-                        "Address must be a string and port an integer");
-                }
-            }
-        } else if (addresses->getType() != Element::null) {
-            isc_throw(TypeError,
-                "root_addresses, forward_addresses, and listen_on config element must be a list");
-        }
-    }
-    return (result);
-}
-
-}
-
 ConstElementPtr
 ConstElementPtr
 Resolver::updateConfig(ConstElementPtr config) {
 Resolver::updateConfig(ConstElementPtr config) {
     dlog("New config comes: " + config->toWire());
     dlog("New config comes: " + config->toWire());
@@ -507,11 +494,14 @@ Resolver::updateConfig(ConstElementPtr config) {
     try {
     try {
         // Parse forward_addresses
         // Parse forward_addresses
         ConstElementPtr rootAddressesE(config->get("root_addresses"));
         ConstElementPtr rootAddressesE(config->get("root_addresses"));
-        vector<addr_t> rootAddresses(parseAddresses(rootAddressesE));
+        AddressList rootAddresses(parseAddresses(rootAddressesE,
+                                                    "root_addresses"));
         ConstElementPtr forwardAddressesE(config->get("forward_addresses"));
         ConstElementPtr forwardAddressesE(config->get("forward_addresses"));
-        vector<addr_t> forwardAddresses(parseAddresses(forwardAddressesE));
+        AddressList forwardAddresses(parseAddresses(forwardAddressesE,
+                                                       "forward_addresses"));
         ConstElementPtr listenAddressesE(config->get("listen_on"));
         ConstElementPtr listenAddressesE(config->get("listen_on"));
-        vector<addr_t> listenAddresses(parseAddresses(listenAddressesE));
+        AddressList listenAddresses(parseAddresses(listenAddressesE,
+                                                      "listen_on"));
         bool set_timeouts(false);
         bool set_timeouts(false);
         int qtimeout = impl_->query_timeout_;
         int qtimeout = impl_->query_timeout_;
         int ctimeout = impl_->client_timeout_;
         int ctimeout = impl_->client_timeout_;
@@ -574,7 +564,7 @@ Resolver::updateConfig(ConstElementPtr config) {
 
 
         if (need_query_restart) {
         if (need_query_restart) {
             impl_->queryShutdown();
             impl_->queryShutdown();
-            impl_->querySetup(*dnss_);
+            impl_->querySetup(*dnss_, *nsas_, *cache_);
         }
         }
         return (isc::config::createAnswer());
         return (isc::config::createAnswer());
     } catch (const isc::Exception& error) {
     } catch (const isc::Exception& error) {
@@ -584,13 +574,13 @@ Resolver::updateConfig(ConstElementPtr config) {
 }
 }
 
 
 void
 void
-Resolver::setForwardAddresses(const vector<addr_t>& addresses)
+Resolver::setForwardAddresses(const AddressList& addresses)
 {
 {
     impl_->setForwardAddresses(addresses, dnss_);
     impl_->setForwardAddresses(addresses, dnss_);
 }
 }
 
 
 void
 void
-Resolver::setRootAddresses(const vector<addr_t>& addresses)
+Resolver::setRootAddresses(const AddressList& addresses)
 {
 {
     impl_->setRootAddresses(addresses, dnss_);
     impl_->setRootAddresses(addresses, dnss_);
 }
 }
@@ -600,58 +590,19 @@ Resolver::isForwarding() const {
     return (!impl_->upstream_.empty());
     return (!impl_->upstream_.empty());
 }
 }
 
 
-vector<addr_t>
+AddressList
 Resolver::getForwardAddresses() const {
 Resolver::getForwardAddresses() const {
     return (impl_->upstream_);
     return (impl_->upstream_);
 }
 }
 
 
-vector<addr_t>
+AddressList
 Resolver::getRootAddresses() const {
 Resolver::getRootAddresses() const {
     return (impl_->upstream_root_);
     return (impl_->upstream_root_);
 }
 }
 
 
-namespace {
-
-void
-setAddresses(DNSService *service, const vector<addr_t>& addresses) {
-    service->clearServers();
-    BOOST_FOREACH(const addr_t &address, addresses) {
-        service->addServer(address.second, address.first);
-    }
-}
-
-}
-
 void
 void
-Resolver::setListenAddresses(const vector<addr_t>& addresses) {
+Resolver::setListenAddresses(const AddressList& addresses) {
-    try {
+    installListenAddresses(addresses, impl_->listen_, *dnss_);
-        dlog("Setting listen addresses:");
-        BOOST_FOREACH(const addr_t& addr, addresses) {
-            dlog(" " + addr.first + ":" +
-                        boost::lexical_cast<string>(addr.second));
-        }
-        setAddresses(dnss_, addresses);
-        impl_->listen_ = addresses;
-    }
-    catch (const exception& e) {
-        /*
-         * We couldn't set it. So return it back. If that fails as well,
-         * we have a problem.
-         *
-         * If that fails, bad luck, but we are useless anyway, so just die
-         * and let boss start us again.
-         */
-        dlog(string("Unable to set new address: ") + e.what(),true);
-        try {
-            setAddresses(dnss_, impl_->listen_);
-        }
-        catch (const exception& e2) {
-            dlog(string("Unable to recover from error;"),true);
-            dlog(string("Rollback failed with: ") + e2.what(),true);
-            abort();
-        }
-        throw e; // Let it fly a little bit further
-    }
 }
 }
 
 
 void
 void
@@ -687,7 +638,7 @@ Resolver::getRetries() const {
     return impl_->retries_;
     return impl_->retries_;
 }
 }
 
 
-vector<addr_t>
+AddressList
 Resolver::getListenAddresses() const {
 Resolver::getListenAddresses() const {
     return (impl_->listen_);
     return (impl_->listen_);
 }
 }

+ 21 - 0
src/bin/resolver/resolver.h

@@ -24,6 +24,9 @@
 
 
 #include <asiolink/asiolink.h>
 #include <asiolink/asiolink.h>
 
 
+#include <nsas/nameserver_address_store.h>
+#include <cache/resolver_cache.h>
+
 #include <resolve/resolver_interface.h>
 #include <resolve/resolver_interface.h>
 
 
 class ResolverImpl;
 class ResolverImpl;
@@ -86,10 +89,26 @@ public:
 
 
     /// \brief Assign an ASIO IO Service queue to this Resolver object
     /// \brief Assign an ASIO IO Service queue to this Resolver object
     void setDNSService(asiolink::DNSService& dnss);
     void setDNSService(asiolink::DNSService& dnss);
+    
+    /// \brief Assign a NameserverAddressStore to this Resolver object
+    void setNameserverAddressStore(isc::nsas::NameserverAddressStore &nsas);
+    
+    /// \brief Assign a cache to this Resolver object
+    void setCache(isc::cache::ResolverCache& cache);
 
 
     /// \brief Return this object's ASIO IO Service queue
     /// \brief Return this object's ASIO IO Service queue
     asiolink::DNSService& getDNSService() const { return (*dnss_); }
     asiolink::DNSService& getDNSService() const { return (*dnss_); }
 
 
+    /// \brief Returns this object's NSAS
+    isc::nsas::NameserverAddressStore& getNameserverAddressStore() const {
+        return *nsas_;
+    };
+
+    /// \brief Returns this object's ResolverCache
+    isc::cache::ResolverCache& getResolverCache() const {
+        return *cache_;
+    };
+    
     /// \brief Return pointer to the DNS Lookup callback function
     /// \brief Return pointer to the DNS Lookup callback function
     asiolink::DNSLookup* getDNSLookupProvider() { return (dns_lookup_); }
     asiolink::DNSLookup* getDNSLookupProvider() { return (dns_lookup_); }
 
 
@@ -208,6 +227,8 @@ private:
     asiolink::SimpleCallback* checkin_;
     asiolink::SimpleCallback* checkin_;
     asiolink::DNSLookup* dns_lookup_;
     asiolink::DNSLookup* dns_lookup_;
     asiolink::DNSAnswer* dns_answer_;
     asiolink::DNSAnswer* dns_answer_;
+    isc::nsas::NameserverAddressStore* nsas_;
+    isc::cache::ResolverCache* cache_;
 };
 };
 
 
 #endif // __RESOLVER_H
 #endif // __RESOLVER_H

+ 20 - 20
src/bin/resolver/resolver.spec.pre.in

@@ -6,48 +6,48 @@
       {
       {
         "item_name": "timeout_query",
         "item_name": "timeout_query",
         "item_type": "integer",
         "item_type": "integer",
-        "item_optional": False,
+        "item_optional": false,
         "item_default": 2000
         "item_default": 2000
       },
       },
       {
       {
         "item_name": "timeout_client",
         "item_name": "timeout_client",
         "item_type": "integer",
         "item_type": "integer",
-        "item_optional": False,
+        "item_optional": false,
         "item_default": 4000
         "item_default": 4000
       },
       },
       {
       {
         "item_name": "timeout_lookup",
         "item_name": "timeout_lookup",
         "item_type": "integer",
         "item_type": "integer",
-        "item_optional": False,
+        "item_optional": false,
         "item_default": 30000
         "item_default": 30000
       },
       },
       {
       {
         "item_name": "retries",
         "item_name": "retries",
         "item_type": "integer",
         "item_type": "integer",
-        "item_optional": False,
+        "item_optional": false,
         "item_default": 3
         "item_default": 3
       },
       },
       {
       {
         "item_name": "forward_addresses",
         "item_name": "forward_addresses",
         "item_type": "list",
         "item_type": "list",
-        "item_optional": True,
+        "item_optional": true,
         "item_default": [],
         "item_default": [],
         "list_item_spec" : {
         "list_item_spec" : {
           "item_name": "address",
           "item_name": "address",
           "item_type": "map",
           "item_type": "map",
-          "item_optional": False,
+          "item_optional": false,
           "item_default": {},
           "item_default": {},
           "map_item_spec": [
           "map_item_spec": [
             {
             {
               "item_name": "address",
               "item_name": "address",
               "item_type": "string",
               "item_type": "string",
-              "item_optional": False,
+              "item_optional": false,
               "item_default": "::1"
               "item_default": "::1"
             },
             },
             {
             {
               "item_name": "port",
               "item_name": "port",
               "item_type": "integer",
               "item_type": "integer",
-              "item_optional": False,
+              "item_optional": false,
               "item_default": 53
               "item_default": 53
             }
             }
           ]
           ]
@@ -56,24 +56,24 @@
       {
       {
         "item_name": "root_addresses",
         "item_name": "root_addresses",
         "item_type": "list",
         "item_type": "list",
-        "item_optional": True,
+        "item_optional": true,
         "item_default": [],
         "item_default": [],
         "list_item_spec" : {
         "list_item_spec" : {
           "item_name": "address",
           "item_name": "address",
           "item_type": "map",
           "item_type": "map",
-          "item_optional": False,
+          "item_optional": false,
           "item_default": {},
           "item_default": {},
           "map_item_spec": [
           "map_item_spec": [
             {
             {
               "item_name": "address",
               "item_name": "address",
               "item_type": "string",
               "item_type": "string",
-              "item_optional": False,
+              "item_optional": false,
               "item_default": "::1"
               "item_default": "::1"
             },
             },
             {
             {
               "item_name": "port",
               "item_name": "port",
               "item_type": "integer",
               "item_type": "integer",
-              "item_optional": False,
+              "item_optional": false,
               "item_default": 53
               "item_default": 53
             }
             }
           ]
           ]
@@ -82,34 +82,34 @@
       {
       {
         "item_name": "listen_on",
         "item_name": "listen_on",
         "item_type": "list",
         "item_type": "list",
-        "item_optional": False,
+        "item_optional": false,
         "item_default": [
         "item_default": [
           {
           {
             "address": "::1",
             "address": "::1",
-            "port": 5300
+            "port": 53
           },
           },
           {
           {
             "address": "127.0.0.1",
             "address": "127.0.0.1",
-            "port": 5300
+            "port": 53
-          },
+          }
         ],
         ],
         "list_item_spec": {
         "list_item_spec": {
           "item_name": "address",
           "item_name": "address",
           "item_type": "map",
           "item_type": "map",
-          "item_optional": False,
+          "item_optional": false,
           "item_default": {},
           "item_default": {},
           "map_item_spec": [
           "map_item_spec": [
             {
             {
               "item_name": "address",
               "item_name": "address",
               "item_type": "string",
               "item_type": "string",
-              "item_optional": False,
+              "item_optional": false,
               "item_default": "::1"
               "item_default": "::1"
             },
             },
             {
             {
               "item_name": "port",
               "item_name": "port",
               "item_type": "integer",
               "item_type": "integer",
-              "item_optional": False,
+              "item_optional": false,
-              "item_default": 5300
+              "item_default": 53
             }
             }
           ]
           ]
         }
         }

+ 4 - 0
src/bin/resolver/tests/Makefile.am

@@ -37,6 +37,10 @@ run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
+run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 
 
 # Note the ordering matters: -Wno-... must follow -Wextra (defined in
 # Note the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS
 # B10_CXXFLAGS

+ 24 - 92
src/bin/resolver/tests/resolver_config_unittest.cc

@@ -24,6 +24,7 @@
 
 
 #include <dns/tests/unittest_util.h>
 #include <dns/tests/unittest_util.h>
 #include <testutils/srv_test.h>
 #include <testutils/srv_test.h>
+#include <testutils/portconfig.h>
 
 
 using namespace std;
 using namespace std;
 using namespace isc::data;
 using namespace isc::data;
@@ -42,7 +43,7 @@ class ResolverConfig : public ::testing::Test {
         {
         {
             server.setDNSService(dnss);
             server.setDNSService(dnss);
         }
         }
-        void invalidTest(const string &JOSN);
+        void invalidTest(const string &JSON, const string& name);
 };
 };
 
 
 TEST_F(ResolverConfig, forwardAddresses) {
 TEST_F(ResolverConfig, forwardAddresses) {
@@ -122,117 +123,48 @@ TEST_F(ResolverConfig, rootAddressConfig) {
 }
 }
 
 
 void
 void
-ResolverConfig::invalidTest(const string &JOSN) {
+ResolverConfig::invalidTest(const string &JSON, const string& name) {
-    ElementPtr config(Element::fromJSON(JOSN));
+    isc::testutils::portconfig::configRejected(server, JSON, name);
-    EXPECT_FALSE(server.updateConfig(config)->equals(
-        *isc::config::createAnswer())) << "Accepted config " << JOSN << endl;
 }
 }
 
 
 TEST_F(ResolverConfig, invalidForwardAddresses) {
 TEST_F(ResolverConfig, invalidForwardAddresses) {
     // Try torturing it with some invalid inputs
     // Try torturing it with some invalid inputs
     invalidTest("{"
     invalidTest("{"
         "\"forward_addresses\": \"error\""
         "\"forward_addresses\": \"error\""
-        "}");
+        "}", "Invalid type");
     invalidTest("{"
     invalidTest("{"
         "\"forward_addresses\": [{}]"
         "\"forward_addresses\": [{}]"
-        "}");
+        "}", "Empty element");
     invalidTest("{"
     invalidTest("{"
         "\"forward_addresses\": [{"
         "\"forward_addresses\": [{"
         "   \"port\": 1.5,"
         "   \"port\": 1.5,"
         "   \"address\": \"192.0.2.1\""
         "   \"address\": \"192.0.2.1\""
-        "}]}");
+        "}]}", "Float port");
     invalidTest("{"
     invalidTest("{"
         "\"forward_addresses\": [{"
         "\"forward_addresses\": [{"
         "   \"port\": -5,"
         "   \"port\": -5,"
         "   \"address\": \"192.0.2.1\""
         "   \"address\": \"192.0.2.1\""
-        "}]}");
+        "}]}", "Negative port");
     invalidTest("{"
     invalidTest("{"
         "\"forward_addresses\": [{"
         "\"forward_addresses\": [{"
         "   \"port\": 53,"
         "   \"port\": 53,"
         "   \"address\": \"bad_address\""
         "   \"address\": \"bad_address\""
-        "}]}");
+        "}]}", "Bad address");
 }
 }
 
 
+// Try setting the addresses directly
 TEST_F(ResolverConfig, listenAddresses) {
 TEST_F(ResolverConfig, listenAddresses) {
-    // Default value should be fully recursive
+    isc::testutils::portconfig::listenAddresses(server);
-    EXPECT_TRUE(server.getListenAddresses().empty());
-
-    // Try putting there some addresses
-    vector<pair<string, uint16_t> > addresses;
-    addresses.push_back(pair<string, uint16_t>("127.0.0.1", 5321));
-    addresses.push_back(pair<string, uint16_t>("::1", 5321));
-    server.setListenAddresses(addresses);
-    EXPECT_EQ(2, server.getListenAddresses().size());
-    EXPECT_EQ("::1", server.getListenAddresses()[1].first);
-
-    // Is it independent from what we do with the vector later?
-    addresses.clear();
-    EXPECT_EQ(2, server.getListenAddresses().size());
-
-    // Did it return to fully recursive?
-    server.setListenAddresses(addresses);
-    EXPECT_TRUE(server.getListenAddresses().empty());
 }
 }
 
 
-TEST_F(ResolverConfig, DISABLED_listenAddressConfig) {
+// Try setting some addresses and a rollback
-    // Try putting there some address
+TEST_F(ResolverConfig, listenAddressConfig) {
-    ElementPtr config(Element::fromJSON("{"
+    isc::testutils::portconfig::listenAddressConfig(server);
-        "\"listen_on\": ["
-        "   {"
-        "       \"address\": \"127.0.0.1\","
-        "       \"port\": 5321"
-        "   }"
-        "]"
-        "}"));
-    ConstElementPtr result(server.updateConfig(config));
-    EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
-    ASSERT_EQ(1, server.getListenAddresses().size());
-    EXPECT_EQ("127.0.0.1", server.getListenAddresses()[0].first);
-    EXPECT_EQ(5321, server.getListenAddresses()[0].second);
-
-    // As this is example address, the machine should not have it on
-    // any interface
-    // FIXME: This test aborts, because it tries to rollback and
-    //     it is impossible, since the sockets are not closed.
-    //     Once #388 is solved, enable this test.
-    config = Element::fromJSON("{"
-        "\"listen_on\": ["
-        "   {"
-        "       \"address\": \"192.0.2.0\","
-        "       \"port\": 5321"
-        "   }"
-        "]"
-        "}");
-    result = server.updateConfig(config);
-    EXPECT_FALSE(result->equals(*isc::config::createAnswer()));
-    ASSERT_EQ(1, server.getListenAddresses().size());
-    EXPECT_EQ("127.0.0.1", server.getListenAddresses()[0].first);
-    EXPECT_EQ(5321, server.getListenAddresses()[0].second);
 }
 }
 
 
+// Try some invalid configs are rejected
 TEST_F(ResolverConfig, invalidListenAddresses) {
 TEST_F(ResolverConfig, invalidListenAddresses) {
-    // Try torturing it with some invalid inputs
+    isc::testutils::portconfig::invalidListenAddressConfig(server);
-    invalidTest("{"
-        "\"listen_on\": \"error\""
-        "}");
-    invalidTest("{"
-        "\"listen_on\": [{}]"
-        "}");
-    invalidTest("{"
-        "\"listen_on\": [{"
-        "   \"port\": 1.5,"
-        "   \"address\": \"192.0.2.1\""
-        "}]}");
-    invalidTest("{"
-        "\"listen_on\": [{"
-        "   \"port\": -5,"
-        "   \"address\": \"192.0.2.1\""
-        "}]}");
-    invalidTest("{"
-        "\"listen_on\": [{"
-        "   \"port\": 53,"
-        "   \"address\": \"bad_address\""
-        "}]}");
 }
 }
 
 
 // Just test it sets and gets the values correctly
 // Just test it sets and gets the values correctly
@@ -267,28 +199,28 @@ TEST_F(ResolverConfig, timeoutsConfig) {
 TEST_F(ResolverConfig, invalidTimeoutsConfig) {
 TEST_F(ResolverConfig, invalidTimeoutsConfig) {
     invalidTest("{"
     invalidTest("{"
         "\"timeout_query\": \"error\""
         "\"timeout_query\": \"error\""
-        "}");
+        "}", "Wrong query element type");
     invalidTest("{"
     invalidTest("{"
         "\"timeout_query\": -2"
         "\"timeout_query\": -2"
-        "}");
+        "}", "Negative query timeout");
     invalidTest("{"
     invalidTest("{"
         "\"timeout_client\": \"error\""
         "\"timeout_client\": \"error\""
-        "}");
+        "}", "Wrong client element type");
     invalidTest("{"
     invalidTest("{"
         "\"timeout_client\": -2"
         "\"timeout_client\": -2"
-        "}");
+        "}", "Negative client timeout");
     invalidTest("{"
     invalidTest("{"
         "\"timeout_lookup\": \"error\""
         "\"timeout_lookup\": \"error\""
-        "}");
+        "}", "Wrong lookup element type");
     invalidTest("{"
     invalidTest("{"
         "\"timeout_lookup\": -2"
         "\"timeout_lookup\": -2"
-        "}");
+        "}", "Negative lookup timeout");
     invalidTest("{"
     invalidTest("{"
         "\"retries\": \"error\""
         "\"retries\": \"error\""
-        "}");
+        "}", "Wrong retries element type");
     invalidTest("{"
     invalidTest("{"
         "\"retries\": -1"
         "\"retries\": -1"
-        "}");
+        "}", "Negative number of retries");
 }
 }
 
 
 }
 }

+ 21 - 0
src/bin/resolver/tests/resolver_unittest.cc

@@ -96,6 +96,27 @@ TEST_F(ResolverTest, AXFRFail) {
                 QR_FLAG, 1, 0, 0, 0);
                 QR_FLAG, 1, 0, 0, 0);
 }
 }
 
 
+TEST_F(ResolverTest, IXFRFail) {
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    // IXFR is not implemented and should always send NOTIMP.
+    server.processMessage(*io_message,
+                          parse_message,
+                          response_message,
+                          response_obuffer,
+                          &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    // the second check is what we'll need in the end (with the values
+    // from the first one), but right now the first one is for what
+    // will actually be returned to the client
+    headerCheck(*parse_message, default_qid, Rcode::NOTIMP(), opcode.getCode(),
+                QR_FLAG, 1, 0, 0, 0);
+    headerCheck(*response_message, default_qid, Rcode::NOTIMP(), opcode.getCode(),
+                0, 0, 0, 0, 0);
+}
+
 TEST_F(ResolverTest, notifyFail) {
 TEST_F(ResolverTest, notifyFail) {
     // Notify should always return NOTAUTH
     // Notify should always return NOTAUTH
     request_message.clear(Message::RENDER);
     request_message.clear(Message::RENDER);

+ 1 - 2
src/bin/stats/Makefile.am

@@ -5,7 +5,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 pkglibexec_SCRIPTS = b10-stats
 pkglibexec_SCRIPTS = b10-stats
 noinst_SCRIPTS = b10-stats_stub
 noinst_SCRIPTS = b10-stats_stub
 
 
-b10_statsdir = $(DESTDIR)$(pkgdatadir)
+b10_statsdir = $(pkgdatadir)
 b10_stats_DATA = stats.spec
 b10_stats_DATA = stats.spec
 
 
 CLEANFILES = stats.spec b10-stats stats.pyc stats.pyo b10-stats_stub stats_stub.pyc stats_stub.pyo
 CLEANFILES = stats.spec b10-stats stats.pyc stats.pyo b10-stats_stub stats_stub.pyc stats_stub.pyo
@@ -23,7 +23,6 @@ endif
 stats.spec: stats.spec.pre
 stats.spec: stats.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" stats.spec.pre >$@
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" stats.spec.pre >$@
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-stats: stats.py
 b10-stats: stats.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \

+ 1 - 2
src/bin/usermgr/Makefile.am

@@ -1,6 +1,6 @@
 sbin_SCRIPTS = b10-cmdctl-usermgr
 sbin_SCRIPTS = b10-cmdctl-usermgr
 
 
-b10_cmdctl_usermgrdir = $(DESTDIR)$(pkgdatadir)
+b10_cmdctl_usermgrdir = $(pkgdatadir)
 
 
 CLEANFILES=	b10-cmdctl-usermgr
 CLEANFILES=	b10-cmdctl-usermgr
 
 
@@ -14,7 +14,6 @@ b10-cmdctl-usermgr.8: b10-cmdctl-usermgr.xml
 
 
 endif
 endif
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-cmdctl-usermgr: b10-cmdctl-usermgr.py
 b10-cmdctl-usermgr: b10-cmdctl-usermgr.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" b10-cmdctl-usermgr.py >$@
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" b10-cmdctl-usermgr.py >$@

+ 1 - 2
src/bin/xfrin/Makefile.am

@@ -4,7 +4,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 
 pkglibexec_SCRIPTS = b10-xfrin
 pkglibexec_SCRIPTS = b10-xfrin
 
 
-b10_xfrindir = $(DESTDIR)$(pkgdatadir)
+b10_xfrindir = $(pkgdatadir)
 b10_xfrin_DATA = xfrin.spec
 b10_xfrin_DATA = xfrin.spec
 
 
 CLEANFILES = b10-xfrin xfrin.pyc 
 CLEANFILES = b10-xfrin xfrin.pyc 
@@ -20,7 +20,6 @@ b10-xfrin.8: b10-xfrin.xml
 
 
 endif
 endif
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-xfrin: xfrin.py
 b10-xfrin: xfrin.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \

+ 1 - 2
src/bin/xfrout/Makefile.am

@@ -4,7 +4,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 
 pkglibexec_SCRIPTS = b10-xfrout
 pkglibexec_SCRIPTS = b10-xfrout
 
 
-b10_xfroutdir = $(DESTDIR)$(pkgdatadir)
+b10_xfroutdir = $(pkgdatadir)
 b10_xfrout_DATA = xfrout.spec
 b10_xfrout_DATA = xfrout.spec
 
 
 CLEANFILES=	b10-xfrout xfrout.pyc xfrout.spec
 CLEANFILES=	b10-xfrout xfrout.pyc xfrout.spec
@@ -23,7 +23,6 @@ endif
 xfrout.spec: xfrout.spec.pre
 xfrout.spec: xfrout.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
 
 
-# TODO: does this need $$(DESTDIR) also?
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
 b10-xfrout: xfrout.py
 b10-xfrout: xfrout.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \

+ 36 - 16
src/bin/xfrout/tests/xfrout_test.py

@@ -85,23 +85,12 @@ class TestXfroutSession(unittest.TestCase):
         return msg
         return msg
 
 
     def setUp(self):
     def setUp(self):
-        request = MySocket(socket.AF_INET,socket.SOCK_STREAM)
+        self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.log = isc.log.NSLogger('xfrout', '',  severity = 'critical', log_to_console = False )
         self.log = isc.log.NSLogger('xfrout', '',  severity = 'critical', log_to_console = False )
-        (self.write_sock, self.read_sock) = socket.socketpair()
+        self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), self.log)
-        self.xfrsess = MyXfroutSession(request, None, None, self.log, self.read_sock)
-        self.xfrsess.server = Dbserver()
         self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
         self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
-        self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
         self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
 
 
-    def test_receive_query_message(self):
-        send_msg = b"\xd6=\x00\x00\x00\x01\x00"
-        msg_len = struct.pack('H', socket.htons(len(send_msg)))
-        self.write_sock.send(msg_len)
-        self.write_sock.send(send_msg)
-        recv_msg = self.xfrsess._receive_query_message(self.read_sock)
-        self.assertEqual(recv_msg, send_msg)
-
     def test_parse_query_message(self):
     def test_parse_query_message(self):
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(get_rcode.to_text(), "NOERROR")
         self.assertEqual(get_rcode.to_text(), "NOERROR")
@@ -121,6 +110,29 @@ class TestXfroutSession(unittest.TestCase):
         get_msg = self.sock.read_msg()
         get_msg = self.sock.read_msg()
         self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
         self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
 
 
+    def test_send_message(self):
+        msg = self.getmsg()
+        msg.make_response()
+        # soa record data with different cases
+        soa_record = (4, 3, 'Example.com.', 'com.Example.', 3600, 'SOA', None, 'master.Example.com. admin.exAmple.com. 1234 3600 1800 2419200 7200')
+        rrset_soa = self.xfrsess._create_rrset_from_db_record(soa_record)
+        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        self.xfrsess._send_message(self.sock, msg)
+        send_out_data = self.sock.readsent()[2:]
+
+        # CASE_INSENSITIVE compression mode
+        render = MessageRenderer();
+        render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
+        msg.to_wire(render)
+        self.assertNotEqual(render.get_data(), send_out_data)
+
+        # CASE_SENSITIVE compression mode
+        render.clear()
+        render.set_compress_mode(MessageRenderer.CASE_SENSITIVE)
+        render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
+        msg.to_wire(render)
+        self.assertEqual(render.get_data(), send_out_data)
+
     def test_clear_message(self):
     def test_clear_message(self):
         msg = self.getmsg()
         msg = self.getmsg()
         qid = msg.get_qid()
         qid = msg.get_qid()
@@ -134,7 +146,6 @@ class TestXfroutSession(unittest.TestCase):
         self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
         self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
 
 
     def test_reply_query_with_format_error(self):
     def test_reply_query_with_format_error(self):
-
         msg = self.getmsg()
         msg = self.getmsg()
         self.xfrsess._reply_query_with_format_error(msg, self.sock)
         self.xfrsess._reply_query_with_format_error(msg, self.sock)
         get_msg = self.sock.read_msg()
         get_msg = self.sock.read_msg()
@@ -249,11 +260,11 @@ class TestXfroutSession(unittest.TestCase):
         self.xfrsess._zone_has_soa = zone_empty
         self.xfrsess._zone_has_soa = zone_empty
         def false_func():
         def false_func():
             return False
             return False
-        self.xfrsess.server.increase_transfers_counter = false_func
+        self.xfrsess._server.increase_transfers_counter = false_func
         self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
         self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
         def true_func():
         def true_func():
             return True
             return True
-        self.xfrsess.server.increase_transfers_counter = true_func
+        self.xfrsess._server.increase_transfers_counter = true_func
         self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
         self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
 
 
     def test_dns_xfrout_start_formerror(self):
     def test_dns_xfrout_start_formerror(self):
@@ -323,8 +334,17 @@ class MyUnixSockServer(UnixSockServer):
 
 
 class TestUnixSockServer(unittest.TestCase):
 class TestUnixSockServer(unittest.TestCase):
     def setUp(self):
     def setUp(self):
+        self.write_sock, self.read_sock = socket.socketpair()
         self.unix = MyUnixSockServer()
         self.unix = MyUnixSockServer()
 
 
+    def test_receive_query_message(self):
+        send_msg = b"\xd6=\x00\x00\x00\x01\x00"
+        msg_len = struct.pack('H', socket.htons(len(send_msg)))
+        self.write_sock.send(msg_len)
+        self.write_sock.send(send_msg)
+        recv_msg = self.unix._receive_query_message(self.read_sock)
+        self.assertEqual(recv_msg, send_msg)
+
     def test_updata_config_data(self):
     def test_updata_config_data(self):
         self.unix.update_config_data({'transfers_out':10 })
         self.unix.update_config_data({'transfers_out':10 })
         self.assertEqual(self.unix._max_transfers_out, 10)
         self.assertEqual(self.unix._max_transfers_out, 10)

+ 116 - 76
src/bin/xfrout/xfrout.py.in

@@ -50,7 +50,11 @@ isc.util.process.rename()
 if "B10_FROM_BUILD" in os.environ:
 if "B10_FROM_BUILD" in os.environ:
     SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/xfrout"
     SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/xfrout"
     AUTH_SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/auth"
     AUTH_SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/auth"
-    UNIX_SOCKET_FILE= os.environ["B10_FROM_BUILD"] + "/auth_xfrout_conn"
+    if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+        UNIX_SOCKET_FILE = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"] + \
+            "/auth_xfrout_conn"
+    else:
+        UNIX_SOCKET_FILE = os.environ["B10_FROM_BUILD"] + "/auth_xfrout_conn"
 else:
 else:
     PREFIX = "@prefix@"
     PREFIX = "@prefix@"
     DATAROOTDIR = "@datarootdir@"
     DATAROOTDIR = "@datarootdir@"
@@ -73,75 +77,25 @@ def get_rrset_len(rrset):
     return len(bytes)
     return len(bytes)
 
 
 
 
-class XfroutSession(BaseRequestHandler):
+class XfroutSession():
-    def __init__(self, request, client_address, server, log, sock):
+    def __init__(self, sock_fd, request_data, server, log):
         # The initializer for the superclass may call functions
         # The initializer for the superclass may call functions
         # that need _log to be set, so we set it first
         # that need _log to be set, so we set it first
+        self._sock_fd = sock_fd
+        self._request_data = request_data
+        self._server = server
         self._log = log
         self._log = log
-        self._shutdown_sock = sock
+        self.handle()
-        BaseRequestHandler.__init__(self, request, client_address, server)
 
 
     def handle(self):
     def handle(self):
-        '''Handle a request until shutdown or xfrout client is closed.'''
+        ''' Handle a xfrout query, send xfrout response '''
-        # check self.server._shutdown_event to ensure the real shutdown comes.
+        try:
-        # Linux could trigger a spurious readable event on the _shutdown_sock 
+            self.dns_xfrout_start(self._sock_fd, self._request_data)
-        # due to a bug, so we need perform a double check. 
+            #TODO, avoid catching all exceptions
-        while not self.server._shutdown_event.is_set(): # Check if xfrout is shutdown
+        except Exception as e:
-            try:
+            self._log.log_message("error", str(e))
-                (rlist, wlist, xlist) = select.select([self._shutdown_sock, self.request], [], [])
-            except select.error as e:
-                if e.args[0] == errno.EINTR:
-                    (rlist, wlist, xlist) = ([], [], [])
-                    continue
-                else:
-                    self._log.log_message("error", "Error with select(): %s" %e)
-                    break
-            # self.server._shutdown_evnet will be set by now, if it is not a false
-            # alarm
-            if self._shutdown_sock in rlist:
-                continue
-
-            sock_fd = recv_fd(self.request.fileno())
-
-            if sock_fd < 0:
-                # This may happen when one xfrout process try to connect to
-                # xfrout unix socket server, to check whether there is another
-                # xfrout running.
-                if sock_fd == XFR_FD_RECEIVE_FAIL:
-                    self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
-                break
-
-            # receive query msg
-            msgdata = self._receive_query_message(self.request)
-            if not msgdata:
-                break
-
-            try:
-                self.dns_xfrout_start(sock_fd, msgdata)
-                #TODO, avoid catching all exceptions
-            except Exception as e:
-                self._log.log_message("error", str(e))
-
-            os.close(sock_fd)
-
-    def _receive_query_message(self, sock):
-        ''' receive query message from sock'''
-        # receive data length
-        data_len = sock.recv(2)
-        if not data_len:
-            return None
-        msg_len = struct.unpack('!H', data_len)[0]
-        # receive data
-        recv_size = 0
-        msgdata = b''
-        while recv_size < msg_len:
-            data = sock.recv(msg_len - recv_size)
-            if not data:
-                return None
-            recv_size += len(data)
-            msgdata += data
 
 
-        return msgdata
+        os.close(self._sock_fd)
 
 
     def _parse_query_message(self, mdata):
     def _parse_query_message(self, mdata):
         ''' parse query message to [socket,message]'''
         ''' parse query message to [socket,message]'''
@@ -170,6 +124,9 @@ class XfroutSession(BaseRequestHandler):
 
 
     def _send_message(self, sock_fd, msg):
     def _send_message(self, sock_fd, msg):
         render = MessageRenderer()
         render = MessageRenderer()
+        # As defined in RFC5936 section3.4, perform case-preserving name
+        # compression for AXFR message.
+        render.set_compress_mode(MessageRenderer.CASE_SENSITIVE)
         render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
         render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
         msg.to_wire(render)
         msg.to_wire(render)
         header_len = struct.pack('H', socket.htons(render.get_length()))
         header_len = struct.pack('H', socket.htons(render.get_length()))
@@ -192,7 +149,6 @@ class XfroutSession(BaseRequestHandler):
         msg.set_rcode(Rcode.FORMERR())
         msg.set_rcode(Rcode.FORMERR())
         self._send_message(sock_fd, msg)
         self._send_message(sock_fd, msg)
 
 
-
     def _zone_has_soa(self, zone):
     def _zone_has_soa(self, zone):
         '''Judge if the zone has an SOA record.'''
         '''Judge if the zone has an SOA record.'''
         # In some sense, the SOA defines a zone.
         # In some sense, the SOA defines a zone.
@@ -200,7 +156,7 @@ class XfroutSession(BaseRequestHandler):
         # specific zone, we need to judge if the zone has an SOA record;
         # specific zone, we need to judge if the zone has an SOA record;
         # if not, we consider the zone has incomplete data, so xfrout can't
         # if not, we consider the zone has incomplete data, so xfrout can't
         # serve for it.
         # serve for it.
-        if sqlite3_ds.get_zone_soa(zone, self.server.get_db_file()):
+        if sqlite3_ds.get_zone_soa(zone, self._server.get_db_file()):
             return True
             return True
 
 
         return False
         return False
@@ -212,7 +168,7 @@ class XfroutSession(BaseRequestHandler):
         # authority for the specific zone.
         # authority for the specific zone.
         # TODO: should get zone's configuration from cfgmgr or other place
         # TODO: should get zone's configuration from cfgmgr or other place
         # in future.
         # in future.
-        return sqlite3_ds.zone_exist(zonename, self.server.get_db_file())
+        return sqlite3_ds.zone_exist(zonename, self._server.get_db_file())
 
 
     def _check_xfrout_available(self, zone_name):
     def _check_xfrout_available(self, zone_name):
         '''Check if xfr request can be responsed.
         '''Check if xfr request can be responsed.
@@ -231,7 +187,7 @@ class XfroutSession(BaseRequestHandler):
             return Rcode.SERVFAIL()
             return Rcode.SERVFAIL()
 
 
         #TODO, check allow_transfer
         #TODO, check allow_transfer
-        if not self.server.increase_transfers_counter():
+        if not self._server.increase_transfers_counter():
             return Rcode.REFUSED()
             return Rcode.REFUSED()
 
 
         return Rcode.NOERROR()
         return Rcode.NOERROR()
@@ -257,7 +213,7 @@ class XfroutSession(BaseRequestHandler):
         except Exception as err:
         except Exception as err:
             self._log.log_message("error", str(err))
             self._log.log_message("error", str(err))
 
 
-        self.server.decrease_transfers_counter()
+        self._server.decrease_transfers_counter()
         return
         return
 
 
 
 
@@ -304,14 +260,14 @@ class XfroutSession(BaseRequestHandler):
         #TODO, there should be a better way to insert rrset.
         #TODO, there should be a better way to insert rrset.
         msg.make_response()
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        soa_record = sqlite3_ds.get_zone_soa(zone_name, self.server.get_db_file())
+        soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
         rrset_soa = self._create_rrset_from_db_record(soa_record)
         rrset_soa = self._create_rrset_from_db_record(soa_record)
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
 
 
         message_upper_len = get_rrset_len(rrset_soa)
         message_upper_len = get_rrset_len(rrset_soa)
 
 
-        for rr_data in sqlite3_ds.get_zone_datas(zone_name, self.server.get_db_file()):
+        for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
-            if  self.server._shutdown_event.is_set(): # Check if xfrout is shutdown
+            if  self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
                 self._log.log_message("info", "xfrout process is being shutdown")
                 self._log.log_message("info", "xfrout process is being shutdown")
                 return
                 return
 
 
@@ -353,9 +309,93 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         self.update_config_data(config_data)
         self.update_config_data(config_data)
         self._cc = cc
         self._cc = cc
 
 
-    def finish_request(self, request, client_address):
+    def _receive_query_message(self, sock):
+        ''' receive request message from sock'''
+        # receive data length
+        data_len = sock.recv(2)
+        if not data_len:
+            return None
+        msg_len = struct.unpack('!H', data_len)[0]
+        # receive data
+        recv_size = 0
+        msgdata = b''
+        while recv_size < msg_len:
+            data = sock.recv(msg_len - recv_size)
+            if not data:
+                return None
+            recv_size += len(data)
+            msgdata += data
+
+        return msgdata
+
+    def handle_request(self):
+        ''' Enable server handle a request until shutdown or auth is closed.'''
+        try:
+            request, client_address = self.get_request()
+        except socket.error:
+            self._log.log_message("error", "Failed to fetch request")
+            return
+
+        # Check self._shutdown_event to ensure the real shutdown comes.
+        # Linux could trigger a spurious readable event on the _read_sock
+        # due to a bug, so we need perform a double check.
+        while not self._shutdown_event.is_set(): # Check if xfrout is shutdown
+            try:
+                (rlist, wlist, xlist) = select.select([self._read_sock, request], [], [])
+            except select.error as e:
+                if e.args[0] == errno.EINTR:
+                    (rlist, wlist, xlist) = ([], [], [])
+                    continue
+                else:
+                    self._log.log_message("error", "Error with select(): %s" %e)
+                    break
+
+            # self.server._shutdown_event will be set by now, if it is not a false
+            # alarm
+            if self._read_sock in rlist:
+                continue
+
+            try:
+                self.process_request(request)
+            except:
+                self._log.log_message("error", "Exception happened during processing of %s"
+                                      % str(client_address))
+                break
+
+    def _handle_request_noblock(self):
+        """Override the function _handle_request_noblock(), it creates a new
+        thread to handle requests for each auth"""
+        td = threading.Thread(target=self.handle_request)
+        td.setDaemon(True)
+        td.start()
+
+    def process_request(self, request):
+        """Receive socket fd and query message from auth, then
+        start a new thread to process the request."""
+        sock_fd = recv_fd(request.fileno())
+        if sock_fd < 0:
+            # This may happen when one xfrout process try to connect to
+            # xfrout unix socket server, to check whether there is another
+            # xfrout running.
+            if sock_fd == XFR_FD_RECEIVE_FAIL:
+                self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+            return
+
+        # receive request msg
+        request_data = self._receive_query_message(request)
+        if not request_data:
+            return
+
+        t = threading.Thread(target = self.finish_request,
+                             args = (sock_fd, request_data))
+        if self.daemon_threads:
+            t.daemon = True
+        t.start()
+
+
+    def finish_request(self, sock_fd, request_data):
         '''Finish one request by instantiating RequestHandlerClass.'''
         '''Finish one request by instantiating RequestHandlerClass.'''
-        self.RequestHandlerClass(request, client_address, self, self._log, self._read_sock)
+        self.RequestHandlerClass(sock_fd, request_data, self, self._log)
 
 
     def _remove_unused_sock_file(self, sock_file):
     def _remove_unused_sock_file(self, sock_file):
         '''Try to remove the socket file. If the file is being used
         '''Try to remove the socket file. If the file is being used
@@ -373,7 +413,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
             try:
                 os.unlink(sock_file)
                 os.unlink(sock_file)
             except OSError as err:
             except OSError as err:
-                self._log.log_message("error", '[b10-xfrout] Fail to remove file %s: %s\n' % (sock_file, err))
+                self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
                 sys.exit(0)
                 sys.exit(0)
 
 
     def _sock_file_in_use(self, sock_file):
     def _sock_file_in_use(self, sock_file):
@@ -394,7 +434,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         try:
         try:
             os.unlink(self._sock_file)
             os.unlink(self._sock_file)
         except Exception as e:
         except Exception as e:
-            self._log.log_message("error", str(e))
+            self._log.log_message('error', str(e))
 
 
     def update_config_data(self, new_config):
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module. '''
         '''Apply the new config setting of xfrout module. '''

+ 1 - 1
src/bin/zonemgr/Makefile.am

@@ -4,7 +4,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 
 pkglibexec_SCRIPTS = b10-zonemgr
 pkglibexec_SCRIPTS = b10-zonemgr
 
 
-b10_zonemgrdir = $(DESTDIR)$(pkgdatadir)
+b10_zonemgrdir = $(pkgdatadir)
 b10_zonemgr_DATA = zonemgr.spec
 b10_zonemgr_DATA = zonemgr.spec
 
 
 CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
 CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec

+ 15 - 0
src/cppcheck-suppress.lst

@@ -0,0 +1,15 @@
+// On some systems cppcheck produces false alarms about 'missing includes'.
+// the following two will suppress, depending on the cppcheck version
+debug
+missingInclude
+// This is a template, and should be excluded from the check
+unreadVariable:src/lib/dns/rdata/template.cc:59
+// These three trigger warnings due to the incomplete implementation.  This is
+// our problem, but we need to suppress the warnings for now.
+functionConst:src/lib/cache/resolver_cache.h
+functionConst:src/lib/cache/message_cache.h
+functionConst:src/lib/cache/rrset_cache.h
+// Intentional self assignment tests.  Suppress warning about them.
+selfAssignment:src/lib/dns/tests/name_unittest.cc:292
+selfAssignment:src/lib/dns/tests/rdata_unittest.cc:227
+selfAssignment:src/lib/dns/tests/tsigkey_unittest.cc:104

+ 2 - 2
src/lib/Makefile.am

@@ -1,2 +1,2 @@
-SUBDIRS = exceptions dns cc config datasrc python xfr bench log \
+SUBDIRS = exceptions dns cc config python xfr bench log asiolink \
-          resolve asiolink testutils nsas cache
+          nsas cache resolve testutils datasrc server_common

+ 25 - 19
src/lib/asiolink/Makefile.am

@@ -12,34 +12,40 @@ CLEANFILES = *.gcno *.gcda
 # have some code fragments that would hit gcc's unused-parameter warning,
 # have some code fragments that would hit gcc's unused-parameter warning,
 # which would make the build fail with -Werror (our default setting).
 # which would make the build fail with -Werror (our default setting).
 lib_LTLIBRARIES = libasiolink.la
 lib_LTLIBRARIES = libasiolink.la
-libasiolink_la_SOURCES = asiolink.h
+libasiolink_la_SOURCES  = asiolink.h
-libasiolink_la_SOURCES += io_service.cc io_service.h
+libasiolink_la_SOURCES += asiolink_utilities.h
-libasiolink_la_SOURCES += dns_service.cc dns_service.h
+libasiolink_la_SOURCES += asiodef.cc asiodef.h
-libasiolink_la_SOURCES += dns_server.h
-libasiolink_la_SOURCES += dns_lookup.h
 libasiolink_la_SOURCES += dns_answer.h
 libasiolink_la_SOURCES += dns_answer.h
-libasiolink_la_SOURCES += simple_callback.h
+libasiolink_la_SOURCES += dns_lookup.h
-libasiolink_la_SOURCES += interval_timer.h interval_timer.cc
+libasiolink_la_SOURCES += dns_server.h
-libasiolink_la_SOURCES += recursive_query.h recursive_query.cc
+libasiolink_la_SOURCES += dns_service.cc dns_service.h
-libasiolink_la_SOURCES += io_socket.cc io_socket.h
+libasiolink_la_SOURCES += dummy_io_cb.h
-libasiolink_la_SOURCES += io_message.h
+libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
 libasiolink_la_SOURCES += io_address.cc io_address.h
 libasiolink_la_SOURCES += io_address.cc io_address.h
+libasiolink_la_SOURCES += io_asio_socket.h
 libasiolink_la_SOURCES += io_endpoint.cc io_endpoint.h
 libasiolink_la_SOURCES += io_endpoint.cc io_endpoint.h
-libasiolink_la_SOURCES += udp_endpoint.h udp_socket.h
+libasiolink_la_SOURCES += io_error.h
-libasiolink_la_SOURCES += udp_server.h udp_server.cc
+libasiolink_la_SOURCES += io_fetch.cc io_fetch.h
-libasiolink_la_SOURCES += udp_query.h udp_query.cc
+libasiolink_la_SOURCES += io_message.h
-libasiolink_la_SOURCES += tcp_endpoint.h tcp_socket.h
+libasiolink_la_SOURCES += qid_gen.cc qid_gen.h
-libasiolink_la_SOURCES += tcp_server.h tcp_server.cc
+libasiolink_la_SOURCES += io_service.h io_service.cc
+libasiolink_la_SOURCES += io_socket.h io_socket.cc
+libasiolink_la_SOURCES += simple_callback.h
+libasiolink_la_SOURCES += tcp_endpoint.h
+libasiolink_la_SOURCES += tcp_server.cc tcp_server.h
+libasiolink_la_SOURCES += tcp_socket.h
+libasiolink_la_SOURCES += udp_endpoint.h
+libasiolink_la_SOURCES += udp_server.cc udp_server.h
+libasiolink_la_SOURCES += udp_socket.h
+
+EXTRA_DIST = asiodef.msg
+
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS)
 # B10_CXXFLAGS)
 libasiolink_la_CXXFLAGS = $(AM_CXXFLAGS)
 libasiolink_la_CXXFLAGS = $(AM_CXXFLAGS)
-if USE_GXX
-libasiolink_la_CXXFLAGS += -Wno-unused-parameter
-endif
 if USE_CLANGPP
 if USE_CLANGPP
 # Same for clang++, but we need to turn off -Werror completely.
 # Same for clang++, but we need to turn off -Werror completely.
 libasiolink_la_CXXFLAGS += -Wno-error
 libasiolink_la_CXXFLAGS += -Wno-error
 endif
 endif
 libasiolink_la_CPPFLAGS = $(AM_CPPFLAGS)
 libasiolink_la_CPPFLAGS = $(AM_CPPFLAGS)
 libasiolink_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
 libasiolink_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
-libasiolink_la_LIBADD += $(top_builddir)/src/lib/resolve/libresolve.la

+ 80 - 1
src/lib/asiolink/README

@@ -33,7 +33,7 @@ This is intended to simplify development a bit, since it allows the
 routines to be written in a straightfowrard step-step-step fashion rather
 routines to be written in a straightfowrard step-step-step fashion rather
 than as a complex chain of separate handler functions.
 than as a complex chain of separate handler functions.
 
 
-Coroutine objects (i.e., UDPServer, TCPServer and UDPQuery) are objects
+Coroutine objects (i.e., UDPServer, TCPServer and IOFetch) are objects
 with reenterable operator() members.  When an instance of one of these
 with reenterable operator() members.  When an instance of one of these
 classes is called as a function, it resumes at the position where it left
 classes is called as a function, it resumes at the position where it left
 off.  Thus, a UDPServer can issue an asynchronous I/O call and specify
 off.  Thus, a UDPServer can issue an asynchronous I/O call and specify
@@ -101,3 +101,82 @@ when the answer has arrived.  In simplified form, the DNSQuery routine is:
 Currently, DNSQuery is only implemented for UDP queries.  In future work
 Currently, DNSQuery is only implemented for UDP queries.  In future work
 it will be necessary to write code to fall back to TCP when circumstances
 it will be necessary to write code to fall back to TCP when circumstances
 require it.
 require it.
+
+
+Upstream Fetches
+================
+Upstream fetches (queries by the resolver on behalf of a client) are made
+using a slightly-modified version of the pattern described above.
+
+Sockets
+-------
+First, it will be useful to understand the class hierarchy used in the
+fetch logic:
+
+        IOSocket
+           |
+      IOAsioSocket
+           |
+     +-----+-----+                
+     |           |
+UDPSocket    TCPSocket
+
+IOSocket is a wrapper class for a socket and is used by the authoritative
+server code.  It is an abstract base class, providing little more that the ability to hold the socket and to return the protocol in use.
+
+Built on this is IOAsioSocket, which adds the open, close, asyncSend and
+asyncReceive methods.  This is a template class, which takes as template
+argument the class of the object that will be used as the callback when the
+asynchronous operation completes. This object can be of any type, but must
+include an operator() method with the signature:
+
+   operator()(asio::error_code ec, size_t length)
+
+... the two arguments being the status of the completed I/O operation and
+the number of bytes transferred. (In the case of the open method, the second
+argument will be zero.)
+
+Finally, the TCPSocket and UDPSocket classes provide the body of the
+asynchronous operations.
+
+Fetch Sequence
+--------------
+The fetch is implemented by the IOFetch class, which takes as argument the
+protocol to use.  The sequence is:
+
+  REENTER:
+    render the question into a wire-format query packet
+    open()                           // Open socket and optionally connect
+    if (! synchronous) {
+        YIELD;
+    }
+    YIELD asyncSend(query)           // Send query 
+    do {
+        YIELD asyncReceive(response) // Read response
+    } while (! complete(response))
+    close()                          // Drop connection and close socket
+    server->resume
+
+The open() method opens a socket for use.  On TCP, it also makes a
+connection to the remote end.  So under UDP the operation will complete
+immediately, but under TCP it could take a long time.  One solution would be
+for the open operation to post an event to the I/O queue; then both cases
+could be regarded as being equivalent, with the completion being signalled
+by the posting of the completion event.  However UDP is the most common case
+and that would involve extra overhead.  So the open() returns a status
+indicating whether the operation completed asynchronously.  If it did, the
+code yields back to the coroutine; if not the yield is bypassed.
+
+The asynchronous send is straightforward, invoking the underlying ASIO
+function.  (Note that the address/port is supplied to both the open() and
+asyncSend() methods - it is used by the TCPSocket in open() and by the
+UDPSocket in asyncSend().)
+
+The asyncReceive() method issues an asynchronous read and waits for completion.
+The fetch object keeps track of the amount of data received so far and when
+the receive completes it calls a method on the socket to determine if the
+entire message has been received.  (This will always be the case for UDP.  On
+TCP though, the message is preceded by a count field as several reads may be
+required to read all the data.)  The fetch loops until all the data is read.
+
+Finally, the socket is closed and the server called to resume operation.

+ 37 - 0
src/lib/asiolink/asiodef.cc

@@ -0,0 +1,37 @@
+// File created from asiodef.msg on Mon Feb 28 17:15:30 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace asiolink {
+
+extern const isc::log::MessageID ASIO_FETCHCOMP = "FETCHCOMP";
+extern const isc::log::MessageID ASIO_FETCHSTOP = "FETCHSTOP";
+extern const isc::log::MessageID ASIO_OPENSOCK = "OPENSOCK";
+extern const isc::log::MessageID ASIO_RECVSOCK = "RECVSOCK";
+extern const isc::log::MessageID ASIO_RECVTMO = "RECVTMO";
+extern const isc::log::MessageID ASIO_SENDSOCK = "SENDSOCK";
+extern const isc::log::MessageID ASIO_UNKORIGIN = "UNKORIGIN";
+extern const isc::log::MessageID ASIO_UNKRESULT = "UNKRESULT";
+
+} // namespace asiolink
+
+namespace {
+
+const char* values[] = {
+    "FETCHCOMP", "upstream fetch to %s(%d) has now completed",
+    "FETCHSTOP", "upstream fetch to %s(%d) has been stopped",
+    "OPENSOCK", "error %d opening %s socket to %s(%d)",
+    "RECVSOCK", "error %d reading %s data from %s(%d)",
+    "RECVTMO", "receive timeout while waiting for data from %s(%d)",
+    "SENDSOCK", "error %d sending data using %s to %s(%d)",
+    "UNKORIGIN", "unknown origin for ASIO error code %d (protocol: %s, address %s)",
+    "UNKRESULT", "unknown result (%d) when IOFetch::stop() was executed for I/O to %s(%d)",
+    NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+

+ 21 - 0
src/lib/asiolink/asiodef.h

@@ -0,0 +1,21 @@
+// File created from asiodef.msg on Mon Feb 28 17:15:30 2011
+
+#ifndef __ASIODEF_H
+#define __ASIODEF_H
+
+#include <log/message_types.h>
+
+namespace asiolink {
+
+extern const isc::log::MessageID ASIO_FETCHCOMP;
+extern const isc::log::MessageID ASIO_FETCHSTOP;
+extern const isc::log::MessageID ASIO_OPENSOCK;
+extern const isc::log::MessageID ASIO_RECVSOCK;
+extern const isc::log::MessageID ASIO_RECVTMO;
+extern const isc::log::MessageID ASIO_SENDSOCK;
+extern const isc::log::MessageID ASIO_UNKORIGIN;
+extern const isc::log::MessageID ASIO_UNKRESULT;
+
+} // namespace asiolink
+
+#endif // __ASIODEF_H

+ 56 - 0
src/lib/asiolink/asiodef.msg

@@ -0,0 +1,56 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$PREFIX ASIO_
+$NAMESPACE asiolink
+
+FETCHCOMP   upstream fetch to %s(%d) has now completed
++ A debug message, this records the the upstream fetch (a query made by the
++ resolver on behalf of its client) to the specified address has completed.
+
+FETCHSTOP   upstream fetch to %s(%d) has been stopped
++ An external component has requested the halting of an upstream fetch.  This
++ is an allowed operation, and the message should only appear if debug is
++ enabled.
+
+OPENSOCK    error %d opening %s socket to %s(%d)
++ The asynchronous I/O code encountered an error when trying to open a socket
++ of the specified protocol in order to send a message to the target address.
++ The the number of the system error that cause the problem is given in the
++ message.
+
+RECVSOCK    error %d reading %s data from %s(%d)
++ The asynchronous I/O code encountered an error when trying read data from
++ the specified address on the given protocol.  The the number of the system
++ error that cause the problem is given in the message.
+
+SENDSOCK    error %d sending data using %s to %s(%d)
++ The asynchronous I/O code encountered an error when trying send data to
++ the specified address on the given protocol.  The the number of the system
++ error that cause the problem is given in the message.
+
+RECVTMO     receive timeout while waiting for data from %s(%d)
++ An upstream fetch from the specified address timed out.  This may happen for
++ any number of reasons and is most probably a problem at the remote server
++ or a problem on the network.  The message will only appear if debug is
++ enabled.
+
+UNKORIGIN  unknown origin for ASIO error code %d (protocol: %s, address %s)
++ This message should not appear and indicates an internal error if it does.
++ Please enter a bug report.
+
+UNKRESULT  unknown result (%d) when IOFetch::stop() was executed for I/O to %s(%d)
++ The termination method of the resolver's upstream fetch class was called with
++ an unknown result code (which is given in the message).  This message should
++ not appear and may indicate an internal error.  Please enter a bug report.

+ 1 - 19
src/lib/asiolink/asiolink.h

@@ -25,13 +25,13 @@
 #include <asiolink/dns_lookup.h>
 #include <asiolink/dns_lookup.h>
 #include <asiolink/dns_answer.h>
 #include <asiolink/dns_answer.h>
 #include <asiolink/simple_callback.h>
 #include <asiolink/simple_callback.h>
-#include <asiolink/recursive_query.h>
 #include <asiolink/interval_timer.h>
 #include <asiolink/interval_timer.h>
 
 
 #include <asiolink/io_address.h>
 #include <asiolink/io_address.h>
 #include <asiolink/io_endpoint.h>
 #include <asiolink/io_endpoint.h>
 #include <asiolink/io_message.h>
 #include <asiolink/io_message.h>
 #include <asiolink/io_socket.h>
 #include <asiolink/io_socket.h>
+#include <asiolink/io_error.h>
 
 
 /// \namespace asiolink
 /// \namespace asiolink
 /// \brief A wrapper interface for the ASIO library.
 /// \brief A wrapper interface for the ASIO library.
@@ -83,22 +83,4 @@
 /// the placeholder of callback handlers:
 /// the placeholder of callback handlers:
 /// http://think-async.com/Asio/asio-1.3.1/doc/asio/reference/asio_handler_allocate.html
 /// http://think-async.com/Asio/asio-1.3.1/doc/asio/reference/asio_handler_allocate.html
 
 
-namespace asiolink {
-
-
-/// \brief An exception that is thrown if an error occurs within the IO
-/// module.  This is mainly intended to be a wrapper exception class for
-/// ASIO specific exceptions.
-class IOError : public isc::Exception {
-public:
-    IOError(const char* file, size_t line, const char* what) :
-        isc::Exception(file, line, what) {}
-};
-
-
-}      // asiolink
 #endif // __ASIOLINK_H
 #endif // __ASIOLINK_H
-
-// Local Variables: 
-// mode: c++
-// End: 

+ 61 - 0
src/lib/asiolink/asiolink_utilities.h

@@ -0,0 +1,61 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __ASIOLINK_UTILITIES_H
+#define __ASIOLINK_UTILITIES_H
+
+#include <cstddef>
+
+namespace asiolink {
+
+/// \brief Read Unsigned 16-Bit Integer from Buffer
+///
+/// This is essentially a copy of the isc::dns::InputBuffer::readUint16.  It
+/// should really be moved into a separate library.
+///
+/// \param buffer Data buffer at least two bytes long of which the first two
+///        bytes are assumed to represent a 16-bit integer in network-byte
+///        order.
+///
+/// \return Value of 16-bit integer
+inline uint16_t
+readUint16(const void* buffer) {
+    const uint8_t* byte_buffer = static_cast<const uint8_t*>(buffer);
+
+    uint16_t result = (static_cast<uint16_t>(byte_buffer[0])) << 8;
+    result |= static_cast<uint16_t>(byte_buffer[1]);
+
+    return (result);
+}
+
+/// \brief Write Unisgned 16-Bit Integer to Buffer
+///
+/// This is essentially a copy of isc::dns::OutputBuffer::writeUint16.  It
+/// should really be moved into a separate library.
+///
+/// \param value 16-bit value to convert
+/// \param buffer Data buffer at least two bytes long into which the 16-bit
+///        value is written in network-byte order.
+
+inline void
+writeUint16(uint16_t value, void* buffer) {
+    uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
+
+    byte_buffer[0] = static_cast<uint8_t>((value & 0xff00U) >> 8);
+    byte_buffer[1] = static_cast<uint8_t>(value & 0x00ffU);
+}
+
+} // namespace asiolink
+
+#endif // __ASIOLINK_UTILITIES_H

+ 8 - 5
src/lib/asiolink/dns_server.h

@@ -21,7 +21,7 @@ namespace asiolink {
 
 
 /// \brief The \c DNSServer class is a wrapper (and base class) for
 /// \brief The \c DNSServer class is a wrapper (and base class) for
 /// classes which provide DNS server functionality.
 /// classes which provide DNS server functionality.
-/// 
+///
 /// The classes derived from this one, \c TCPServer and \c UDPServer,
 /// The classes derived from this one, \c TCPServer and \c UDPServer,
 /// act as the interface layer between clients sending queries, and
 /// act as the interface layer between clients sending queries, and
 /// functions defined elsewhere that provide answers to those queries.
 /// functions defined elsewhere that provide answers to those queries.
@@ -42,10 +42,10 @@ namespace asiolink {
 /// when "forking", and that instances will be posted as ASIO handler
 /// when "forking", and that instances will be posted as ASIO handler
 /// objects, which are always copied.
 /// objects, which are always copied.
 ///
 ///
-/// Because these objects are frequently copied, it is recommended 
+/// Because these objects are frequently copied, it is recommended
 /// that derived classes be kept small to reduce copy overhead.
 /// that derived classes be kept small to reduce copy overhead.
 class DNSServer {
 class DNSServer {
-protected: 
+protected:
     ///
     ///
     /// \name Constructors and destructors
     /// \name Constructors and destructors
     ///
     ///
@@ -66,7 +66,7 @@ public:
     /// the ones in the derived class.  This makes it possible to pass
     /// the ones in the derived class.  This makes it possible to pass
     /// instances of derived classes as references to this base class
     /// instances of derived classes as references to this base class
     /// without losing access to derived class data.
     /// without losing access to derived class data.
-    /// 
+    ///
     //@{
     //@{
     /// \brief The funtion operator
     /// \brief The funtion operator
     virtual void operator()(asio::error_code ec = asio::error_code(),
     virtual void operator()(asio::error_code ec = asio::error_code(),
@@ -75,6 +75,9 @@ public:
         (*self_)(ec, length);
         (*self_)(ec, length);
     }
     }
 
 
+    /// \brief Stop current running server
+    virtual void stop() { self_->stop();}
+
     /// \brief Resume processing of the server coroutine after an 
     /// \brief Resume processing of the server coroutine after an 
     /// asynchronous call (e.g., to the DNS Lookup provider) has completed.
     /// asynchronous call (e.g., to the DNS Lookup provider) has completed.
     ///
     ///
@@ -84,7 +87,7 @@ public:
 
 
     /// \brief Indicate whether the server is able to send an answer
     /// \brief Indicate whether the server is able to send an answer
     /// to a query.
     /// to a query.
-    /// 
+    ///
     /// This is presently used only for testing purposes.
     /// This is presently used only for testing purposes.
     virtual bool hasAnswer() { return (self_->hasAnswer()); }
     virtual bool hasAnswer() { return (self_->hasAnswer()); }
 
 

+ 13 - 5
src/lib/asiolink/dns_service.cc

@@ -12,13 +12,19 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
 
 
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <unistd.h>             // for some IPC/network system calls
+
+#include <boost/lexical_cast.hpp>
+
 #include <config.h>
 #include <config.h>
 
 
-// unistd is needed for asio.hpp with SunStudio
+#include <log/dummylog.h>
-#include <unistd.h>
 
 
 #include <asio.hpp>
 #include <asio.hpp>
-
+#include <asiolink/dns_service.h>
+#include <asiolink/io_service.h>
 #include <asiolink/io_service.h>
 #include <asiolink/io_service.h>
 #include <asiolink/tcp_server.h>
 #include <asiolink/tcp_server.h>
 #include <asiolink/udp_server.h>
 #include <asiolink/udp_server.h>
@@ -26,6 +32,7 @@
 #include <log/dummylog.h>
 #include <log/dummylog.h>
 
 
 #include <boost/lexical_cast.hpp>
 #include <boost/lexical_cast.hpp>
+#include <boost/foreach.hpp>
 
 
 using isc::log::dlog;
 using isc::log::dlog;
 
 
@@ -182,8 +189,9 @@ DNSService::addServer(uint16_t port, const std::string& address) {
 
 
 void
 void
 DNSService::clearServers() {
 DNSService::clearServers() {
-    // FIXME: This does not work, it does not close the socket.
+    BOOST_FOREACH(const DNSServiceImpl::DNSServerPtr& s, impl_->servers_) {
-    // How is it done?
+        s->stop();
+    }
     impl_->servers_.clear();
     impl_->servers_.clear();
 }
 }
 
 

+ 7 - 1
src/lib/asiolink/dns_service.h

@@ -26,13 +26,13 @@ class DNSLookup;
 class DNSAnswer;
 class DNSAnswer;
 class DNSServiceImpl;
 class DNSServiceImpl;
 
 
+/// \brief Handle DNS Queries
 ///
 ///
 /// DNSService is the service that handles DNS queries and answers with
 /// DNSService is the service that handles DNS queries and answers with
 /// a given IOService. This class is mainly intended to hold all the
 /// a given IOService. This class is mainly intended to hold all the
 /// logic that is shared between the authoritative and the recursive
 /// logic that is shared between the authoritative and the recursive
 /// server implementations. As such, it handles asio, including config
 /// server implementations. As such, it handles asio, including config
 /// updates (through the 'Checkinprovider'), and listening sockets.
 /// updates (through the 'Checkinprovider'), and listening sockets.
-/// 
 class DNSService {
 class DNSService {
     ///
     ///
     /// \name Constructors and Destructor
     /// \name Constructors and Destructor
@@ -97,6 +97,12 @@ public:
     /// It will eventually be removed once the wrapper interface is
     /// It will eventually be removed once the wrapper interface is
     /// generalized.
     /// generalized.
     asio::io_service& get_io_service() { return io_service_.get_io_service(); }
     asio::io_service& get_io_service() { return io_service_.get_io_service(); }
+
+    /// \brief Return the IO Service Object
+    ///
+    /// \return IOService object for this DNS service.
+    asiolink::IOService& getIOService() { return (io_service_);}
+
 private:
 private:
     DNSServiceImpl* impl_;
     DNSServiceImpl* impl_;
     IOService& io_service_;
     IOService& io_service_;

+ 59 - 0
src/lib/asiolink/dummy_io_cb.h

@@ -0,0 +1,59 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DUMMY_IO_CB_H
+#define __DUMMY_IO_CB_H
+
+#include <iostream>
+
+#include <asio/error.hpp>
+#include <asio/error_code.hpp>
+
+namespace asiolink {
+
+/// \brief Asynchronous I/O Completion Callback
+///
+/// The two socket classes (UDPSocket and TCPSocket) require that the I/O
+/// completion callback function have an operator() method with the appropriate
+/// signature.  The classes are templates, any class with that method and
+/// signature can be passed as the callback object - there is no need for a
+/// base class defining the interface.  However, some users of the socket
+/// classes do not use the asynchronous I/O operations, yet have to supply a
+/// template parameter.  This is the reason for this class - it is the dummy
+/// template parameter.
+
+class DummyIOCallback {
+public:
+
+    /// \brief Asynchronous I/O callback method
+    ///
+    /// \param error Unused
+    void operator()(asio::error_code)
+    {
+        // TODO: log an error if this method ever gets called.
+    }
+
+    /// \brief Asynchronous I/O callback method
+    ///
+    /// \param error Unused
+    /// \param length Unused
+    void operator()(asio::error_code, size_t)
+    {
+        // TODO: log an error if this method ever gets called.
+    }
+};
+
+} // namespace asiolink
+
+#endif // __DUMMY_IO_CB_H

+ 5 - 5
src/lib/asiolink/interval_timer.cc

@@ -14,18 +14,18 @@
 
 
 #include <config.h>
 #include <config.h>
 
 
-// unistd is needed for asio.hpp with SunStudio
+#include <unistd.h>             // for some IPC/network system calls
-#include <unistd.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
 
 
-#include <asio.hpp>
+#include <boost/bind.hpp>
 
 
 #include <exceptions/exceptions.h>
 #include <exceptions/exceptions.h>
 
 
+#include <asio.hpp>
 #include <asiolink/interval_timer.h>
 #include <asiolink/interval_timer.h>
 #include <asiolink/io_service.h>
 #include <asiolink/io_service.h>
 
 
-#include <boost/bind.hpp>
-
 namespace asiolink {
 namespace asiolink {
 
 
 class IntervalTimerImpl {
 class IntervalTimerImpl {

+ 1 - 1
src/lib/asiolink/interval_timer.h

@@ -37,7 +37,7 @@ struct IntervalTimerImpl;
 /// The function calls the call back function set by \c setup() and updates
 /// The function calls the call back function set by \c setup() and updates
 /// the timer to expire in (now + interval) milliseconds.
 /// the timer to expire in (now + interval) milliseconds.
 /// The type of call back function is \c void(void).
 /// The type of call back function is \c void(void).
-/// 
+///
 /// The call back function will not be called if the instance of this class is
 /// The call back function will not be called if the instance of this class is
 /// destroyed before the timer is expired.
 /// destroyed before the timer is expired.
 ///
 ///

+ 4 - 1
src/lib/asiolink/io_address.cc

@@ -20,7 +20,10 @@
 
 
 #include <asio.hpp>
 #include <asio.hpp>
 
 
-#include <asiolink/asiolink.h>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_error.h>
+
 
 
 using namespace asio;
 using namespace asio;
 using asio::ip::udp;
 using asio::ip::udp;

+ 3 - 7
src/lib/asiolink/io_address.h

@@ -12,8 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
 
 
-#ifndef __IOADDRESS_H
+#ifndef __IO_ADDRESS_H
-#define __IOADDRESS_H 1
+#define __IO_ADDRESS_H 1
 
 
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // this file.  In particular, asio.hpp should never be included here.
 // this file.  In particular, asio.hpp should never be included here.
@@ -120,8 +120,4 @@ private:
 };
 };
 
 
 }      // asiolink
 }      // asiolink
-#endif // __IOADDRESS_H
+#endif // __IO_ADDRESS_H
-
-// Local Variables: 
-// mode: c++
-// End: 

+ 399 - 0
src/lib/asiolink/io_asio_socket.h

@@ -0,0 +1,399 @@
+// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IO_ASIO_SOCKET_H
+#define __IO_ASIO_SOCKET_H 1
+
+// IMPORTANT NOTE: only very few ASIO headers files can be included in
+// this file.  In particular, asio.hpp should never be included here.
+// See the description of the namespace below.
+#include <unistd.h>             // for some network system calls
+
+#include <functional>
+#include <string>
+
+#include <exceptions/exceptions.h>
+#include <coroutine.h>
+
+#include <dns/buffer.h>
+
+#include <asiolink/io_error.h>
+#include <asiolink/io_socket.h>
+
+
+namespace asiolink {
+
+/// \brief Socket not open
+///
+/// Thrown on an attempt to do read/write to a socket that is not open.
+class SocketNotOpen : public IOError {
+public:
+    SocketNotOpen(const char* file, size_t line, const char* what) :
+        IOError(file, line, what) {}
+};
+
+/// \brief Error setting socket options
+///
+/// Thrown if attempt to change socket options fails.
+class SocketSetError : public IOError {
+public:
+    SocketSetError(const char* file, size_t line, const char* what) :
+        IOError(file, line, what) {}
+};
+
+/// \brief Buffer overflow
+///
+/// Thrown if an attempt is made to receive into an area beyond the end of
+/// the receive data buffer.
+class BufferOverflow : public IOError {
+public:
+    BufferOverflow(const char* file, size_t line, const char* what) :
+        IOError(file, line, what) {}
+};
+
+/// Forward declaration of an IOEndpoint
+class IOEndpoint;
+
+
+/// \brief I/O Socket with asynchronous operations
+///
+/// This class is a wrapper for the ASIO socket classes such as
+/// \c ip::tcp::socket and \c ip::udp::socket.
+///
+/// This is the basic IOSocket with additional operations - open, send, receive
+/// and close.  Depending on how the asiolink code develops, it may be a
+/// temporary class: its main use is to add the template parameter needed for
+/// the derived classes UDPSocket and TCPSocket but without changing the
+/// signature of the more basic IOSocket class.
+///
+/// We may revisit this decision when we generalize the wrapper and more
+/// modules use it.  Also, at that point we may define a separate (visible)
+/// derived class for testing purposes rather than providing factory methods
+/// (i.e., getDummy variants below).
+///
+/// TODO: Check if IOAsioSocket class is still needed
+///
+/// \param C Template parameter identifying type of the callback object.
+
+template <typename C>
+class IOAsioSocket : public IOSocket {
+    ///
+    /// \name Constructors and Destructor
+    ///
+    /// Note: The copy constructor and the assignment operator are
+    /// intentionally defined as private, making this class non-copyable.
+    //@{
+private:
+    IOAsioSocket(const IOAsioSocket<C>& source);
+    IOAsioSocket& operator=(const IOAsioSocket<C>& source);
+protected:
+    /// \brief The default constructor.
+    ///
+    /// This is intentionally defined as \c protected as this base class
+    /// should never be instantiated (except as part of a derived class).
+    IOAsioSocket() {}
+public:
+    /// The destructor.
+    virtual ~IOAsioSocket() {}
+    //@}
+
+    /// \brief Return the "native" representation of the socket.
+    ///
+    /// In practice, this is the file descriptor of the socket for UNIX-like
+    /// systems so the current implementation simply uses \c int as the type of
+    /// the return value. We may have to need revisit this decision later.
+    ///
+    /// In general, the application should avoid using this method; it
+    /// essentially discloses an implementation specific "handle" that can
+    /// change the internal state of the socket (consider what would happen if
+    /// the application closes it, for example).  But we sometimes need to
+    /// perform very low-level operations that requires the native
+    /// representation.  Passing the file descriptor to a different process is
+    /// one example.  This method is provided as a necessary evil for such
+    /// limited purposes.
+    ///
+    /// This method never throws an exception.
+    ///
+    /// \return The native representation of the socket.  This is the socket
+    ///         file descriptor for UNIX-like systems.
+    virtual int getNative() const = 0;
+
+    /// \brief Return the transport protocol of the socket.
+    ///
+    /// Currently, it returns \c IPPROTO_UDP for UDP sockets, and
+    /// \c IPPROTO_TCP for TCP sockets.
+    ///
+    /// This method never throws an exception.
+    ///
+    /// \return \c IPPROTO_UDP for UDP sockets, \c IPPROTO_TCP for TCP sockets
+    virtual int getProtocol() const = 0;
+
+    /// \brief Is Open() synchronous?
+    ///
+    /// On a TCP socket, an "open" operation is a call to the socket's "open()"
+    /// method followed by a connection to the remote system: it is an
+    /// asynchronous operation.  On a UDP socket, it is just a call to "open()"
+    /// and completes synchronously.
+    ///
+    /// For TCP, signalling of the completion of the operation is done by
+    /// by calling the callback function in the normal way.  This could be done
+    /// for UDP (by posting en event on the event queue); however, that will
+    /// incur additional overhead in the most common case.  So we give the
+    /// caller the choice for calling this open() method synchronously or
+    /// asynchronously.
+    ///
+    /// Owing to the way that the stackless coroutines are implemented, we need
+    /// to know _before_ executing the "open" function whether or not it is
+    /// asynchronous.  So this method is called to provide that information.
+    ///
+    /// (The reason there is a need to know is because the call to open() passes
+    /// in the state of the coroutine at the time the call is made.  On an
+    /// asynchronous I/O, we need to set the state to point to the statement
+    /// after the call to open() _before_ we pass the corouine to the open()
+    /// call.  Unfortunately, the macros that set the state of the coroutine
+    /// also yield control - which we don't want to do if the open is
+    /// synchronous.  Hence we need to know before we make the call to open()
+    /// whether that call will complete asynchronously.)
+    virtual bool isOpenSynchronous() const = 0;
+
+    /// \brief Open AsioSocket
+    ///
+    /// Opens the socket for asynchronous I/O.  The open will complete
+    /// synchronously on UCP or asynchronously on TCP (in which case a callback
+    /// will be queued).
+    ///
+    /// \param endpoint Pointer to the endpoint object.  This is ignored for
+    ///        a UDP socket (the target is specified in the send call), but
+    ///        should be of type TCPEndpoint for a TCP connection.
+    /// \param callback I/O Completion callback, called when the operation has
+    ///        completed, but only if the operation was asynchronous. (It is
+    ///        ignored on a UDP socket.)
+    virtual void open(const IOEndpoint* endpoint, C& callback) = 0;
+
+    /// \brief Send Asynchronously
+    ///
+    /// This corresponds to async_send_to() for UDP sockets and async_send()
+    /// for TCP.  In both cases an endpoint argument is supplied indicating the
+    /// target of the send - this is ignored for TCP.
+    ///
+    /// \param data Data to send
+    /// \param length Length of data to send
+    /// \param endpoint Target of the send
+    /// \param callback Callback object.
+    virtual void asyncSend(const void* data, size_t length,
+                           const IOEndpoint* endpoint, C& callback) = 0;
+
+    /// \brief Receive Asynchronously
+    ///
+    /// This corresponds to async_receive_from() for UDP sockets and
+    /// async_receive() for TCP.  In both cases, an endpoint argument is
+    /// supplied to receive the source of the communication.  For TCP it will
+    /// be filled in with details of the connection.
+    ///
+    /// \param data Buffer to receive incoming message
+    /// \param length Length of the data buffer
+    /// \param offset Offset into buffer where data is to be put.  Although the
+    ///        offset could be implied by adjusting "data" and "length"
+    ///        appropriately, using this argument allows data to be specified as
+    ///        "const void*" - the overhead of converting it to a pointer to a
+    ///        set of bytes is hidden away here.
+    /// \param endpoint Source of the communication
+    /// \param callback Callback object
+    virtual void asyncReceive(void* data, size_t length, size_t offset,
+                              IOEndpoint* endpoint, C& callback) = 0;
+
+    /// \brief Processes received data
+    ///
+    /// In the IOFetch code, data is received into a staging buffer before being
+    /// copied into the target buffer.  (This is because (a) we don't know how
+    /// much data we will be receiving, so don't know how to size the output
+    /// buffer and (b) TCP data is preceded by a two-byte count field that needs
+    /// to be discarded before being returned to the user.)
+    ///
+    /// An additional consideration is that TCP data is not received in one
+    /// I/O - it may take a number of I/Os - each receiving any non-zero number
+    /// of bytes - to read the entire message.
+    ///
+    /// So the IOFetch code has to loop until it determines that all the data
+    /// has been read.  This is where this method comes in.  It has several
+    /// functions:
+    ///
+    /// - It checks if the received data is complete.
+    /// - If data is not complete, decides if the next set of data is to go into
+    ///   the start of the staging buffer or at some offset into it.  (This
+    ///   simplifies the case we could have in a TCP receive where the two-byte
+    ///   count field is received in one-byte chunks: we put off interpreting
+    ///   the count until we have all of it.  The alternative - copying the
+    ///   data to the output buffer and interpreting the count from there -
+    ///   would require moving the data in the output buffer by two bytes before
+    ///   returning it to the caller.)
+    /// - Copies data from the staging buffer into the output buffer.
+    ///
+    /// This functionality mainly applies to TCP receives.  For UDP, all the
+    /// data is received in one I/O, so this just copies the data into the
+    /// output buffer.
+    ///
+    /// \param staging Pointer to the start of the staging buffer.
+    /// \param length Amount of data in the staging buffer.
+    /// \param cumulative Amount of data received before the staging buffer is
+    ///        processed (this includes the TCP count field if appropriate).
+    ///        The value should be set to zero before the receive loop is
+    ///        entered, and it will be updated by this method as required.
+    /// \param offset Offset into the staging buffer where the next read should
+    ///        put the received data.  It should be set to zero before the first
+    ///        call and may be updated by this method.
+    /// \param expected Expected amount of data to be received.  This is
+    ///        really the TCP count field and is set to that value when enough
+    ///        of a TCP message is received.  It should be initialized to -1
+    ///        before the first read is executed.
+    /// \param outbuff Output buffer.  Data in the staging buffer may be copied
+    ///        to this output buffer in the call.
+    ///
+    /// \return true if the receive is complete, false if another receive is
+    ///         needed.  This is always true for UDP, but for TCP involves
+    ///         checking the amount of data received so far against the amount
+    ///         expected (as indicated by the two-byte count field).  If this
+    ///         method returns false, another read should be queued and data
+    ///         should be read into the staging buffer at offset given by the
+    ///         "offset" parameter.
+    virtual bool processReceivedData(const void* staging, size_t length,
+                                     size_t& cumulative, size_t& offset,
+                                     size_t& expected,
+                                     isc::dns::OutputBufferPtr& outbuff) = 0;
+
+    /// \brief Cancel I/O On AsioSocket
+    virtual void cancel() = 0;
+
+    /// \brief Close socket
+    virtual void close() = 0;
+};
+
+
+#include "io_socket.h"
+
+/// \brief The \c DummyAsioSocket class is a concrete derived class of
+/// \c IOAsioSocket that is not associated with any real socket.
+///
+/// This main purpose of this class is tests, where it may be desirable to
+/// instantiate an \c IOAsioSocket object without involving system resource
+/// allocation such as real network sockets.
+///
+/// \param C Template parameter identifying type of the callback object.
+
+template <typename C>
+class DummyAsioSocket : public IOAsioSocket<C> {
+private:
+    DummyAsioSocket(const DummyAsioSocket<C>& source);
+    DummyAsioSocket& operator=(const DummyAsioSocket<C>& source);
+public:
+    /// \brief Constructor from the protocol number.
+    ///
+    /// The protocol must validly identify a standard network protocol.
+    /// For example, to specify TCP \c protocol must be \c IPPROTO_TCP.
+    ///
+    /// \param protocol The network protocol number for the socket.
+    DummyAsioSocket(const int protocol) : protocol_(protocol) {}
+
+    /// \brief A dummy derived method of \c IOAsioSocket::getNative().
+    ///
+    /// \return Always returns -1 as the object is not associated with a real
+    /// (native) socket.
+    virtual int getNative() const { return (-1); }
+
+    /// \brief A dummy derived method of \c IOAsioSocket::getProtocol().
+    ///
+    /// \return Protocol socket was created with
+    virtual int getProtocol() const { return (protocol_); }
+
+
+    /// \brief Is socket opening synchronous?
+    ///
+    /// \return true - it is for this class.
+    bool isOpenSynchronous() const {
+        return true;
+    }
+
+    /// \brief Open AsioSocket
+    ///
+    /// A call that is a no-op on UDP sockets, this opens a connection to the
+    /// system identified by the given endpoint.
+    ///
+    /// \param endpoint Unused
+    /// \param callback Unused.
+    ///false indicating that the operation completed synchronously.
+    virtual bool open(const IOEndpoint*, C&) {
+        return (false);
+    }
+
+    /// \brief Send Asynchronously
+    ///
+    /// Must be supplied as it is abstract in the base class.
+    ///
+    /// \param data Unused
+    /// \param length Unused
+    /// \param endpoint Unused
+    /// \param callback Unused
+    virtual void asyncSend(const void*, size_t, const IOEndpoint*, C&) {
+    }
+
+    /// \brief Receive Asynchronously
+    ///
+    /// Must be supplied as it is abstract in the base class.
+    ///
+    /// \param data Unused
+    /// \param length Unused
+    /// \param offset Unused
+    /// \param endpoint Unused
+    /// \param callback Unused
+    virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) {
+    }
+
+    /// \brief Checks if the data received is complete.
+    ///
+    /// \param staging Unused
+    /// \param length Unused
+    /// \param cumulative Unused
+    /// \param offset Unused.
+    /// \param expected Unused.
+    /// \param outbuff Unused.
+    ///
+    /// \return Always true
+    virtual bool receiveComplete(const void* staging, size_t length,
+                                 size_t& cumulative, size_t& offset,
+                                 size_t& expected,
+                                 isc::dns::OutputBufferPtr& outbuff)
+    {
+        return (true);
+    }
+
+
+    /// \brief Cancel I/O On AsioSocket
+    ///
+    /// Must be supplied as it is abstract in the base class.
+    virtual void cancel() {
+    }
+
+    /// \brief Close socket
+    ///
+    /// Must be supplied as it is abstract in the base class.
+    virtual void close() {
+    }
+
+private:
+    const int protocol_;
+};
+
+} // namespace asiolink
+
+#endif // __IO_ASIO_SOCKET_H

+ 3 - 1
src/lib/asiolink/io_endpoint.cc

@@ -20,7 +20,9 @@
 
 
 #include <asio.hpp>
 #include <asio.hpp>
 
 
-#include <asiolink/asiolink.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_error.h>
+#include <asiolink/io_endpoint.h>
 #include <asiolink/tcp_endpoint.h>
 #include <asiolink/tcp_endpoint.h>
 #include <asiolink/udp_endpoint.h>
 #include <asiolink/udp_endpoint.h>
 
 

+ 3 - 7
src/lib/asiolink/io_endpoint.h

@@ -12,8 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
 
 
-#ifndef __IOENDPOINT_H
+#ifndef __IO_ENDPOINT_H
-#define __IOENDPOINT_H 1
+#define __IO_ENDPOINT_H 1
 
 
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // this file.  In particular, asio.hpp should never be included here.
 // this file.  In particular, asio.hpp should never be included here.
@@ -115,8 +115,4 @@ public:
 };
 };
 
 
 }      // asiolink
 }      // asiolink
-#endif // __IOENDPOINT_H
+#endif // __IO_ENDPOINT_H
-
-// Local Variables: 
-// mode: c++
-// End: 

+ 35 - 0
src/lib/asiolink/io_error.h

@@ -0,0 +1,35 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __IO_ERROR_H
+#define __IO_ERROR_H
+
+#include <exceptions/exceptions.h>
+
+namespace asiolink {
+
+/// \brief An exception that is thrown if an error occurs within the IO
+/// module.  This is mainly intended to be a wrapper exception class for
+/// ASIO specific exceptions.
+class IOError : public isc::Exception {
+public:
+    IOError(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
+
+}      // asiolink
+
+#endif // __IO_ERROR_H

+ 355 - 0
src/lib/asiolink/io_fetch.cc

@@ -0,0 +1,355 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <unistd.h>             // for some IPC/network system calls
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+
+#include <dns/message.h>
+#include <dns/messagerenderer.h>
+#include <dns/opcode.h>
+#include <dns/rcode.h>
+#include <log/logger.h>
+
+#include <asiolink/qid_gen.h>
+
+#include <asio.hpp>
+#include <asio/deadline_timer.hpp>
+
+#include <asiolink/asiodef.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_asio_socket.h>
+#include <asiolink/io_endpoint.h>
+#include <asiolink/io_fetch.h>
+#include <asiolink/io_service.h>
+#include <asiolink/tcp_endpoint.h>
+#include <asiolink/tcp_socket.h>
+#include <asiolink/udp_endpoint.h>
+#include <asiolink/udp_socket.h>
+
+using namespace asio;
+using namespace isc::dns;
+using namespace isc::log;
+using namespace std;
+
+namespace asiolink {
+
+/// Use the ASIO logger
+
+isc::log::Logger logger("asiolink");
+
+/// \brief IOFetch Data
+///
+/// The data for IOFetch is held in a separate struct pointed to by a shared_ptr
+/// object.  This is because the IOFetch object will be copied often (it is used
+/// as a coroutine and passed as callback to many async_*() functions) and we
+/// want keep the same data).  Organising the data in this way keeps copying to
+/// a minimum.
+struct IOFetchData {
+
+    // The first two members are shared pointers to a base class because what is
+    // actually instantiated depends on whether the fetch is over UDP or TCP,
+    // which is not known until construction of the IOFetch.  Use of a shared
+    // pointer here is merely to ensure deletion when the data object is deleted.
+    boost::scoped_ptr<IOAsioSocket<IOFetch> > socket;
+                                            ///< Socket to use for I/O
+    boost::scoped_ptr<IOEndpoint> remote;   ///< Where the fetch was sent
+    isc::dns::Question          question;   ///< Question to be asked
+    isc::dns::OutputBufferPtr   msgbuf;     ///< Wire buffer for question
+    isc::dns::OutputBufferPtr   received;   ///< Received data put here
+    IOFetch::Callback*          callback;   ///< Called on I/O Completion
+    asio::deadline_timer        timer;      ///< Timer to measure timeouts
+    IOFetch::Protocol           protocol;   ///< Protocol being used
+    size_t                      cumulative; ///< Cumulative received amount
+    size_t                      expected;   ///< Expected amount of data
+    size_t                      offset;     ///< Offset to receive data
+    bool                        stopped;    ///< Have we stopped running?
+    int                         timeout;    ///< Timeout in ms
+
+    // In case we need to log an error, the origin of the last asynchronous
+    // I/O is recorded.  To save time and simplify the code, this is recorded
+    // as the ID of the error message that would be generated if the I/O failed.
+    // This means that we must make sure that all possible "origins" take the
+    // same arguments in their message in the same order.
+    isc::log::MessageID         origin;     ///< Origin of last asynchronous I/O
+    uint8_t                     staging[IOFetch::STAGING_LENGTH];
+                                            ///< Temporary array for received data
+
+    /// \brief Constructor
+    ///
+    /// Just fills in the data members of the IOFetchData structure
+    ///
+    /// \param proto Either IOFetch::TCP or IOFetch::UDP.
+    /// \param service I/O Service object to handle the asynchronous
+    ///        operations.
+    /// \param query DNS question to send to the upstream server.
+    /// \param address IP address of upstream server
+    /// \param port Port to use for the query
+    /// \param buff Output buffer into which the response (in wire format)
+    ///        is written (if a response is received).
+    /// \param cb Callback object containing the callback to be called
+    ///        when we terminate.  The caller is responsible for managing this
+    ///        object and deleting it if necessary.
+    /// \param wait Timeout for the fetch (in ms).
+    ///
+    /// TODO: May need to alter constructor (see comment 4 in Trac ticket #554)
+    IOFetchData(IOFetch::Protocol proto, IOService& service,
+        const isc::dns::Question& query, const IOAddress& address,
+        uint16_t port, isc::dns::OutputBufferPtr& buff, IOFetch::Callback* cb,
+        int wait)
+        :
+        socket((proto == IOFetch::UDP) ?
+            static_cast<IOAsioSocket<IOFetch>*>(
+                new UDPSocket<IOFetch>(service)) :
+            static_cast<IOAsioSocket<IOFetch>*>(
+                new TCPSocket<IOFetch>(service))
+            ),
+        remote((proto == IOFetch::UDP) ?
+            static_cast<IOEndpoint*>(new UDPEndpoint(address, port)) :
+            static_cast<IOEndpoint*>(new TCPEndpoint(address, port))
+            ),
+        question(query),
+        msgbuf(new isc::dns::OutputBuffer(512)),
+        received(buff),
+
+        callback(cb),
+        timer(service.get_io_service()),
+        protocol(proto),
+        cumulative(0),
+        expected(0),
+        offset(0),
+        stopped(false),
+        timeout(wait),
+        origin(ASIO_UNKORIGIN),
+        staging()
+    {}
+};
+
+/// IOFetch Constructor - just initialize the private data
+
+IOFetch::IOFetch(Protocol protocol, IOService& service,
+    const isc::dns::Question& question, const IOAddress& address, uint16_t port,
+    OutputBufferPtr& buff, Callback* cb, int wait)
+    :
+    data_(new IOFetchData(protocol, service, question, address,
+        port, buff, cb, wait))
+{
+}
+
+// Return protocol in use.
+
+IOFetch::Protocol
+IOFetch::getProtocol() const {
+    return (data_->protocol);
+}
+
+/// The function operator is implemented with the "stackless coroutine"
+/// pattern; see internal/coroutine.h for details.
+
+void
+IOFetch::operator()(asio::error_code ec, size_t length) {
+
+    if (data_->stopped) {
+        return;
+    } else if (ec) {
+        logIOFailure(ec);
+        return;
+    }
+
+    CORO_REENTER (this) {
+
+        /// Generate the upstream query and render it to wire format
+        /// This is done in a different scope to allow inline variable
+        /// declarations.
+        {
+            Message msg(Message::RENDER);
+            msg.setQid(QidGenerator::getInstance().generateQid());
+            msg.setOpcode(Opcode::QUERY());
+            msg.setRcode(Rcode::NOERROR());
+            msg.setHeaderFlag(Message::HEADERFLAG_RD);
+            msg.addQuestion(data_->question);
+            MessageRenderer renderer(*data_->msgbuf);
+            msg.toWire(renderer);
+        }
+
+        // If we timeout, we stop, which will can cancel outstanding I/Os and
+        // shutdown everything.
+        if (data_->timeout != -1) {
+            data_->timer.expires_from_now(boost::posix_time::milliseconds(
+                data_->timeout));
+            data_->timer.async_wait(boost::bind(&IOFetch::stop, *this,
+                TIME_OUT));
+        }
+
+        // Open a connection to the target system.  For speed, if the operation
+        // is synchronous (i.e. UDP operation) we bypass the yield.
+        data_->origin = ASIO_OPENSOCK;
+        if (data_->socket->isOpenSynchronous()) {
+            data_->socket->open(data_->remote.get(), *this);
+        } else {
+            CORO_YIELD data_->socket->open(data_->remote.get(), *this);
+        }
+
+        // Begin an asynchronous send, and then yield.  When the send completes,
+        // we will resume immediately after this point.
+        data_->origin = ASIO_SENDSOCK;
+        CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
+            data_->msgbuf->getLength(), data_->remote.get(), *this);
+
+        // Now receive the response.  Since TCP may not receive the entire
+        // message in one operation, we need to loop until we have received
+        // it. (This can't be done within the asyncReceive() method because
+        // each I/O operation will be done asynchronously and between each one
+        // we need to yield ... and we *really* don't want to set up another
+        // coroutine within that method.)  So after each receive (and yield),
+        // we check if the operation is complete and if not, loop to read again.
+        //
+        // Another concession to TCP is that the amount of is contained in the
+        // first two bytes.  This leads to two problems:
+        //
+        // a) We don't want those bytes in the return buffer.
+        // b) They may not both arrive in the first I/O.
+        //
+        // So... we need to loop until we have at least two bytes, then store
+        // the expected amount of data.  Then we need to loop until we have
+        // received all the data before copying it back to the user's buffer.
+        // And we want to minimise the amount of copying...
+
+        data_->origin = ASIO_RECVSOCK;
+        data_->cumulative = 0;          // No data yet received
+        data_->offset = 0;              // First data into start of buffer
+        do {
+            CORO_YIELD data_->socket->asyncReceive(data_->staging,
+                                                   static_cast<size_t>(STAGING_LENGTH),
+                                                   data_->offset,
+                                                   data_->remote.get(), *this);
+        } while (!data_->socket->processReceivedData(data_->staging, length,
+                                                     data_->cumulative, data_->offset,
+                                                     data_->expected, data_->received));
+
+        // Finished with this socket, so close it.  This will not generate an
+        // I/O error, but reset the origin to unknown in case we change this.
+        data_->origin = ASIO_UNKORIGIN;
+        data_->socket->close();
+
+        /// We are done
+        stop(SUCCESS);
+    }
+}
+
+// Function that stops the coroutine sequence.  It is called either when the
+// query finishes or when the timer times out.  Either way, it sets the
+// "stopped_" flag and cancels anything that is in progress.
+//
+// As the function may be entered multiple times as things wind down, it checks
+// if the stopped_ flag is already set.  If it is, the call is a no-op.
+
+void
+IOFetch::stop(Result result) {
+
+    if (!data_->stopped) {
+
+        // Mark the fetch as stopped to prevent other completion callbacks
+        // (invoked because of the calls to cancel()) from executing the
+        // cancel calls again.
+        //
+        // In a single threaded environment, the callbacks won't be invoked
+        // until this one completes. In a multi-threaded environment, they may
+        // well be, in which case the testing (and setting) of the stopped_
+        // variable should be done inside a mutex (and the stopped_ variable
+        // declared as "volatile").
+        //
+        // The numeric arguments indicate the debug level, with the lower
+        // numbers indicating the most important information.  The relative
+        // values are somewhat arbitrary.
+        //
+        // Although Logger::debug checks the debug flag internally, doing it
+        // below before calling Logger::debug avoids the overhead of a string
+        // conversion in the common case when debug is not enabled.
+        //
+        // TODO: Update testing of stopped_ if threads are used.
+        data_->stopped = true;
+        switch (result) {
+            case TIME_OUT:
+                if (logger.isDebugEnabled(1)) {
+                    logger.debug(20, ASIO_RECVTMO,
+                                 data_->remote->getAddress().toText().c_str(),
+                                 static_cast<int>(data_->remote->getPort()));
+                }
+                break;
+
+            case SUCCESS:
+                if (logger.isDebugEnabled(50)) {
+                    logger.debug(30, ASIO_FETCHCOMP,
+                                 data_->remote->getAddress().toText().c_str(),
+                                 static_cast<int>(data_->remote->getPort()));
+                }
+                break;
+
+            case STOPPED:
+                // Fetch has been stopped for some other reason.  This is
+                // allowed but as it is unusual it is logged, but with a lower
+                // debug level than a timeout (which is totally normal).
+                logger.debug(1, ASIO_FETCHSTOP,
+                             data_->remote->getAddress().toText().c_str(),
+                             static_cast<int>(data_->remote->getPort()));
+                break;
+
+            default:
+                logger.error(ASIO_UNKRESULT, static_cast<int>(result),
+                             data_->remote->getAddress().toText().c_str(),
+                             static_cast<int>(data_->remote->getPort()));
+        }
+
+        // Stop requested, cancel and I/O's on the socket and shut it down,
+        // and cancel the timer.
+        data_->socket->cancel();
+        data_->socket->close();
+
+        data_->timer.cancel();
+
+        // Execute the I/O completion callback (if present).
+        if (data_->callback) {
+            (*(data_->callback))(result);
+        }
+    }
+}
+
+// Log an error - called on I/O failure
+
+void IOFetch::logIOFailure(asio::error_code ec) {
+
+    // Should only get here with a known error code.
+    assert((data_->origin == ASIO_OPENSOCK) ||
+           (data_->origin == ASIO_SENDSOCK) ||
+           (data_->origin == ASIO_RECVSOCK) ||
+           (data_->origin == ASIO_UNKORIGIN));
+
+    static const char* PROTOCOL[2] = {"TCP", "UDP"};
+    logger.error(data_->origin,
+                 ec.value(),
+                 ((data_->remote->getProtocol() == IPPROTO_TCP) ?
+                     PROTOCOL[0] : PROTOCOL[1]),
+                 data_->remote->getAddress().toText().c_str(),
+                 static_cast<int>(data_->remote->getPort()));
+}
+
+} // namespace asiolink
+

+ 179 - 0
src/lib/asiolink/io_fetch.h

@@ -0,0 +1,179 @@
+// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IO_FETCH_H
+#define __IO_FETCH_H 1
+
+#include <config.h>
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+
+#include <coroutine.h>
+
+#include <asio/error_code.hpp>
+
+#include <dns/buffer.h>
+#include <dns/question.h>
+
+namespace asiolink {
+
+// Forward declarations
+class IOAddress;
+class IOFetchData;
+class IOService;
+
+/// \brief Upstream Fetch Processing
+///
+/// IOFetch is the class used to send upstream fetches and to handle responses.
+///
+/// \param E Endpoint type to use.
+
+class IOFetch : public coroutine {
+public:
+    /// \brief Protocol to use on the fetch
+    enum Protocol {
+        UDP = 0,
+        TCP = 1
+    };
+
+    /// \brief Origin of Asynchronous I/O Call
+    ///
+    /// Indicates what initiated an asynchronous I/O call and used in deciding
+    /// what error message to output if the I/O fails.
+    enum Origin {
+        NONE = 0,           ///< No asynchronous call outstanding
+        OPEN = 1,
+        SEND = 2,
+        RECEIVE = 3,
+        CLOSE = 4
+    };
+
+    /// \brief Result of Upstream Fetch
+    ///
+    /// Note that this applies to the status of I/Os in the fetch - a fetch
+    /// that resulted in a packet being received from the server is a SUCCESS,
+    /// even if the contents of the packet indicate that some error occurred.
+    enum Result {
+        SUCCESS = 0,        ///< Success, fetch completed
+        TIME_OUT = 1,       ///< Failure, fetch timed out
+        STOPPED = 2,        ///< Control code, fetch has been stopped
+        NOTSET = 3          ///< For testing, indicates value not set
+    };
+
+    // The next enum is a "trick" to allow constants to be defined in a class
+    // declaration.
+
+    /// \brief Integer Constants
+    enum {
+        STAGING_LENGTH = 8192   ///< Size of staging buffer
+    };
+
+    /// \brief I/O Fetch Callback
+    ///
+    /// Class of callback object for when the fetch itself has completed - an
+    /// object of this class is passed to the IOFetch constructor and its
+    /// operator() method called when the fetch completes.
+    ///
+    /// Note the difference between the two operator() methods:
+    /// - IOFetch::operator() callback is called when an asynchronous I/O has
+    ///   completed.
+    /// - IOFetch::Callback::operator() is called when an upstream fetch - which
+    ///   may have involved several asynchronous I/O operations - has completed.
+    ///
+    /// This is an abstract class.
+    class Callback {
+    public:
+        /// \brief Default Constructor
+        Callback()
+        {}
+
+        /// \brief Virtual Destructor
+        virtual ~Callback()
+        {}
+
+        /// \brief Callback method
+        ///
+        /// This is the method called when the fetch completes.
+        ///
+        /// \param result Result of the fetch
+        virtual void operator()(Result result) = 0;
+    };
+
+    /// \brief Constructor.
+    ///
+    /// Creates the object that will handle the upstream fetch.
+    ///
+    /// TODO: Need to randomise the source port
+    ///
+    /// \param protocol Fetch protocol, either IOFetch::TCP or IOFetch::UDP
+    /// \param service I/O Service object to handle the asynchronous
+    ///     operations.
+    /// \param question DNS question to send to the upstream server.
+    /// \param buff Output buffer into which the response (in wire format)
+    ///     is written (if a response is received).
+    /// \param cb Callback object containing the callback to be called
+    ///     when we terminate.  The caller is responsible for managing this
+    ///     object and deleting it if necessary.
+    /// \param address IP address of upstream server
+    /// \param port Port to which to connect on the upstream server
+    /// (default = 53)
+    /// \param wait Timeout for the fetch (in ms).  The default value of
+    ///     -1 indicates no timeout.
+    IOFetch(Protocol protocol, IOService& service,
+        const isc::dns::Question& question, const IOAddress& address,
+        uint16_t port, isc::dns::OutputBufferPtr& buff, Callback* cb,
+        int wait = -1);
+
+    /// \brief Return Current Protocol
+    ///
+    /// \return Protocol associated with this IOFetch object.
+    Protocol getProtocol() const;
+
+    /// \brief Coroutine entry point
+    ///
+    /// The operator() method is the method in which the coroutine code enters
+    /// this object when an operation has been completed.
+    ///
+    /// \param ec Error code, the result of the last asynchronous I/O operation.
+    /// \param length Amount of data received on the last asynchronous read
+    void operator()(asio::error_code ec = asio::error_code(), size_t length = 0);
+
+    /// \brief Terminate query
+    ///
+    /// This method can be called at any point.  It terminates the current
+    /// query with the specified reason.
+    ///
+    /// \param reason Reason for terminating the query
+    void stop(Result reason = STOPPED);
+
+private:
+    /// \brief Log I/O Failure
+    ///
+    /// Records an I/O failure to the log file
+    ///
+    /// \param ec ASIO error code
+    void logIOFailure(asio::error_code ec);
+
+    // Member variables.  All data is in a structure pointed to by a shared
+    // pointer.  The IOFetch object is copied a number of times during its
+    // life, and only requiring a pointer to be copied reduces overhead.
+    boost::shared_ptr<IOFetchData>  data_;   ///< Private data
+
+};
+
+} // namespace asiolink
+
+#endif // __IO_FETCH_H

+ 4 - 7
src/lib/asiolink/io_message.h

@@ -12,8 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
 
 
-#ifndef __IOMESSAGE_H
+#ifndef __IO_MESSAGE_H
-#define __IOMESSAGE_H 1
+#define __IO_MESSAGE_H 1
 
 
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // this file.  In particular, asio.hpp should never be included here.
 // this file.  In particular, asio.hpp should never be included here.
@@ -46,6 +46,7 @@ class IOMessage {
     ///
     ///
     /// \name Constructors and Destructor
     /// \name Constructors and Destructor
     ///
     ///
+
     /// Note: The copy constructor and the assignment operator are
     /// Note: The copy constructor and the assignment operator are
     /// intentionally defined as private, making this class non-copyable.
     /// intentionally defined as private, making this class non-copyable.
     //@{
     //@{
@@ -96,8 +97,4 @@ private:
 
 
 
 
 }      // asiolink
 }      // asiolink
-#endif // __IOMESSAGE_H
+#endif // __IO_MESSAGE_H
-
-// Local Variables: 
-// mode: c++
-// End: 

+ 5 - 4
src/lib/asiolink/io_service.cc

@@ -11,13 +11,14 @@
 // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
 // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
-#include <config.h>
 
 
-// unistd is needed for asio.hpp with SunStudio
+#include <netinet/in.h>
-#include <unistd.h>
+#include <sys/socket.h>
+#include <unistd.h>             // for some IPC/network system calls
 
 
-#include <asio.hpp>
+#include <config.h>
 
 
+#include <asio.hpp>
 #include <asiolink/io_service.h>
 #include <asiolink/io_service.h>
 
 
 namespace asiolink {
 namespace asiolink {

+ 4 - 7
src/lib/asiolink/io_socket.h

@@ -12,8 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 // PERFORMANCE OF THIS SOFTWARE.
 
 
-#ifndef __IOSOCKET_H
+#ifndef __IO_SOCKET_H
-#define __IOSOCKET_H 1
+#define __IO_SOCKET_H 1
 
 
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // IMPORTANT NOTE: only very few ASIO headers files can be included in
 // this file.  In particular, asio.hpp should never be included here.
 // this file.  In particular, asio.hpp should never be included here.
@@ -119,9 +119,6 @@ public:
     static IOSocket& getDummyTCPSocket();
     static IOSocket& getDummyTCPSocket();
 };
 };
 
 
-}      // asiolink
+} // namespace asiolink
-#endif // __IOSOCKET_H
 
 
-// Local Variables: 
+#endif // __IO_SOCKET_H
-// mode: c++
-// End: 

+ 54 - 0
src/lib/asiolink/qid_gen.cc

@@ -0,0 +1,54 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// qid_gen defines a generator for query id's
+//
+// We probably want to merge this with the weighted random in the nsas
+// (and other parts where we need randomness, perhaps another thing
+// for a general libutil?)
+
+#include <asiolink/qid_gen.h>
+
+#include <sys/time.h>
+
+namespace {
+    asiolink::QidGenerator qid_generator_instance;
+}
+
+namespace asiolink {
+
+QidGenerator&
+QidGenerator::getInstance() {
+    return (qid_generator_instance);
+}
+
+QidGenerator::QidGenerator() : dist_(0, 65535),
+                               vgen_(generator_, dist_)
+{
+    seed();
+}
+
+void
+QidGenerator::seed() {
+    struct timeval tv;
+    gettimeofday(&tv, 0);
+    generator_.seed((tv.tv_sec * 1000000) + tv.tv_usec);
+}
+
+isc::dns::qid_t
+QidGenerator::generateQid() {
+    return (vgen_());
+}
+
+} // namespace asiolink

+ 85 - 0
src/lib/asiolink/qid_gen.h

@@ -0,0 +1,85 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// qid_gen defines a generator for query id's
+//
+// We probably want to merge this with the weighted random in the nsas
+// (and other parts where we need randomness, perhaps another thing
+// for a general libutil?)
+
+#ifndef __QID_GEN_H
+#define __QID_GEN_H
+
+#include <dns/message.h>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+namespace asiolink {
+
+/// This class generates Qids for outgoing queries
+///
+/// It is implemented as a singleton; the public way to access it
+/// is to call getInstance()->generateQid().
+///
+/// It automatically seeds it with the current time when it is first
+/// used.
+class QidGenerator {
+public:
+    /// \brief Returns the singleton instance of the QidGenerator
+    ///
+    /// Returns a reference to the singleton instance of the generator
+    static QidGenerator& getInstance();
+
+    /// \brief Default constructor
+    ///
+    /// It is recommended that getInstance is used rather than creating
+    /// separate instances of this class.
+    ///
+    /// The constructor automatically seeds the generator with the
+    /// current time.
+    QidGenerator();
+
+    /// Generate a Qid
+    ///
+    /// \return A random Qid
+    isc::dns::qid_t generateQid();
+
+    /// \brief Seeds the QidGenerator (based on the current time)
+    ///
+    /// This is automatically called by the constructor
+    void seed();
+
+private:
+    // "Mersenne Twister: A 623-dimensionally equidistributed
+    // uniform pseudo-random number generator", Makoto Matsumoto and
+    // Takuji Nishimura, ACM Transactions on Modeling and Computer
+    // Simulation: Special Issue on Uniform Random Number Generation,
+    // Vol. 8, No. 1, January 1998, pp. 3-30.
+    //
+    // mt19937 is an implementation of one of the pseudo random
+    // generators described in this paper.
+    boost::mt19937 generator_;
+
+    // For qid's we want a uniform distribution
+    boost::uniform_int<> dist_;
+
+    boost::variate_generator<boost::mt19937&, boost::uniform_int<> > vgen_;
+};
+
+
+} // namespace asiolink
+
+#endif // __QID_GEN_H

+ 0 - 460
src/lib/asiolink/recursive_query.cc

@@ -1,460 +0,0 @@
-// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <config.h>
-
-#include <stdlib.h>
-
-// unistd is needed for asio.hpp with SunStudio
-#include <unistd.h>
-
-#include <asio.hpp>
-
-#include <asiolink/recursive_query.h>
-#include <asiolink/dns_service.h>
-#include <asiolink/udp_query.h>
-
-#include <log/dummylog.h>
-
-#include <boost/lexical_cast.hpp>
-#include <boost/bind.hpp>
-
-#include <dns/question.h>
-#include <dns/message.h>
-
-#include <resolve/resolve.h>
-
-using isc::log::dlog;
-using namespace isc::dns;
-
-namespace asiolink {
-
-typedef std::vector<std::pair<std::string, uint16_t> > AddressVector;
-
-// Here we do not use the typedef above, as the SunStudio compiler
-// mishandles this in its name mangling, and wouldn't compile.
-// We can probably use a typedef, but need to move it to a central
-// location and use it consistently.
-RecursiveQuery::RecursiveQuery(DNSService& dns_service,
-    const std::vector<std::pair<std::string, uint16_t> >& upstream,
-    const std::vector<std::pair<std::string, uint16_t> >& upstream_root,
-    int query_timeout, int client_timeout, int lookup_timeout,
-    unsigned retries) :
-    dns_service_(dns_service), upstream_(new AddressVector(upstream)),
-    upstream_root_(new AddressVector(upstream_root)),
-    query_timeout_(query_timeout), client_timeout_(client_timeout),
-    lookup_timeout_(lookup_timeout), retries_(retries)
-{}
-
-namespace {
-
-typedef std::pair<std::string, uint16_t> addr_t;
-
-/*
- * This is a query in progress. When a new query is made, this one holds
- * the context information about it, like how many times we are allowed
- * to retry on failure, what to do when we succeed, etc.
- *
- * Used by RecursiveQuery::sendQuery.
- */
-class RunningQuery : public UDPQuery::Callback {
-private:
-    // The io service to handle async calls
-    asio::io_service& io_;
-
-    // Info for (re)sending the query (the question and destination)
-    Question question_;
-
-    // This is where we build and store our final answer
-    MessagePtr answer_message_;
-
-    // currently we use upstream as the current list of NS records
-    // we should differentiate between forwarding and resolving
-    boost::shared_ptr<AddressVector> upstream_;
-
-    // root servers...just copied over to the zone_servers_
-    boost::shared_ptr<AddressVector> upstream_root_;
-
-    // Buffer to store the result.
-    OutputBufferPtr buffer_;
-
-    // Server to notify when we succeed or fail
-    //shared_ptr<DNSServer> server_;
-    isc::resolve::ResolverInterface::CallbackPtr resolvercallback_;
-
-    // To prevent both unreasonably long cname chains and cname loops,
-    // we simply keep a counter of the number of CNAMEs we have
-    // followed so far (and error if it exceeds RESOLVER_MAX_CNAME_CHAIN
-    // from lib/resolve/response_classifier.h)
-    unsigned cname_count_;
-
-    /*
-     * TODO Do something more clever with timeouts. In the long term, some
-     *     computation of average RTT, increase with each retry, etc.
-     */
-    // Timeout information
-    int query_timeout_;
-    unsigned retries_;
-
-    // normal query state
-
-    // Not using NSAS at this moment, so we keep a list
-    // of 'current' zone servers
-    std::vector<addr_t> zone_servers_;
-
-    // Update the question that will be sent to the server
-    void setQuestion(const Question& new_question) {
-        question_ = new_question;
-    }
-
-    // TODO: replace by our wrapper
-    asio::deadline_timer client_timer;
-    asio::deadline_timer lookup_timer;
-
-    size_t queries_out_;
-
-    // If we timed out ourselves (lookup timeout), stop issuing queries
-    bool done_;
-
-    // If we have a client timeout, we send back an answer, but don't
-    // stop. We use this variable to make sure we don't send another
-    // answer if we do find one later (or if we have a lookup_timeout)
-    bool answer_sent_;
-
-    // (re)send the query to the server.
-    void send() {
-        const int uc = upstream_->size();
-        const int zs = zone_servers_.size();
-        buffer_->clear();
-        if (uc > 0) {
-            int serverIndex = rand() % uc;
-            dlog("Sending upstream query (" + question_.toText() +
-                ") to " + upstream_->at(serverIndex).first);
-            UDPQuery query(io_, question_,
-                upstream_->at(serverIndex).first,
-                upstream_->at(serverIndex).second, buffer_, this,
-                query_timeout_);
-            ++queries_out_;
-            io_.post(query);
-        } else if (zs > 0) {
-            int serverIndex = rand() % zs;
-            dlog("Sending query to zone server (" + question_.toText() +
-                ") to " + zone_servers_.at(serverIndex).first);
-            UDPQuery query(io_, question_,
-                zone_servers_.at(serverIndex).first,
-                zone_servers_.at(serverIndex).second, buffer_, this,
-                query_timeout_);
-            ++queries_out_;
-            io_.post(query);
-        } else {
-            dlog("Error, no upstream servers to send to.");
-        }
-    }
-    
-    // This function is called by operator() if there is an actual
-    // answer from a server and we are in recursive mode
-    // depending on the contents, we go on recursing or return
-    //
-    // Note that the footprint may change as this function may
-    // need to append data to the answer we are building later.
-    //
-    // returns true if we are done (either we have an answer or an
-    //              error message)
-    // returns false if we are not done
-    bool handleRecursiveAnswer(const Message& incoming) {
-        dlog("Handle response");
-        // In case we get a CNAME, we store the target
-        // here (classify() will set it when it walks through
-        // the cname chain to verify it).
-        Name cname_target(question_.getName());
-        
-        isc::resolve::ResponseClassifier::Category category =
-            isc::resolve::ResponseClassifier::classify(
-                question_, incoming, cname_target, cname_count_, true);
-
-        bool found_ns_address = false;
-
-        switch (category) {
-        case isc::resolve::ResponseClassifier::ANSWER:
-        case isc::resolve::ResponseClassifier::ANSWERCNAME:
-            // Done. copy and return.
-            isc::resolve::copyResponseMessage(incoming, answer_message_);
-            return true;
-            break;
-        case isc::resolve::ResponseClassifier::CNAME:
-            dlog("Response is CNAME!");
-            // (unfinished) CNAME. We set our question_ to the CNAME
-            // target, then start over at the beginning (for now, that
-            // is, we reset our 'current servers' to the root servers).
-            if (cname_count_ >= RESOLVER_MAX_CNAME_CHAIN) {
-                // just give up
-                dlog("CNAME chain too long");
-                isc::resolve::makeErrorMessage(answer_message_,
-                                               Rcode::SERVFAIL());
-                return true;
-            }
-
-            answer_message_->appendSection(Message::SECTION_ANSWER,
-                                           incoming);
-            setZoneServersToRoot();
-
-            question_ = Question(cname_target, question_.getClass(),
-                                 question_.getType());
-
-            dlog("Following CNAME chain to " + question_.toText());
-            send();
-            return false;
-            break;
-        case isc::resolve::ResponseClassifier::NXDOMAIN:
-            // NXDOMAIN, just copy and return.
-            isc::resolve::copyResponseMessage(incoming, answer_message_);
-            return true;
-            break;
-        case isc::resolve::ResponseClassifier::REFERRAL:
-            // Referral. For now we just take the first glue address
-            // we find and continue with that
-            zone_servers_.clear();
-
-            for (RRsetIterator rrsi = incoming.beginSection(Message::SECTION_ADDITIONAL);
-                 rrsi != incoming.endSection(Message::SECTION_ADDITIONAL) && !found_ns_address;
-                 rrsi++) {
-                ConstRRsetPtr rrs = *rrsi;
-                if (rrs->getType() == RRType::A()) {
-                    // found address
-                    RdataIteratorPtr rdi = rrs->getRdataIterator();
-                    // just use the first for now
-                    if (!rdi->isLast()) {
-                        std::string addr_str = rdi->getCurrent().toText();
-                        dlog("[XX] first address found: " + addr_str);
-                        // now we have one address, simply
-                        // resend that exact same query
-                        // to that address and yield, when it
-                        // returns, loop again.
-                        
-                        // TODO should use NSAS
-                        zone_servers_.push_back(addr_t(addr_str, 53));
-                        found_ns_address = true;
-                    }
-                }
-            }
-            if (found_ns_address) {
-                // next resolver round
-                send();
-                return false;
-            } else {
-                dlog("[XX] no ready-made addresses in additional. need nsas.");
-                // TODO this will result in answering with the delegation. oh well
-                isc::resolve::copyResponseMessage(incoming, answer_message_);
-                return true;
-            }
-            break;
-        case isc::resolve::ResponseClassifier::EMPTY:
-        case isc::resolve::ResponseClassifier::EXTRADATA:
-        case isc::resolve::ResponseClassifier::INVNAMCLASS:
-        case isc::resolve::ResponseClassifier::INVTYPE:
-        case isc::resolve::ResponseClassifier::MISMATQUEST:
-        case isc::resolve::ResponseClassifier::MULTICLASS:
-        case isc::resolve::ResponseClassifier::NOTONEQUEST:
-        case isc::resolve::ResponseClassifier::NOTRESPONSE:
-        case isc::resolve::ResponseClassifier::NOTSINGLE:
-        case isc::resolve::ResponseClassifier::OPCODE:
-        case isc::resolve::ResponseClassifier::RCODE:
-        case isc::resolve::ResponseClassifier::TRUNCATED:
-            // Should we try a different server rather than SERVFAIL?
-            isc::resolve::makeErrorMessage(answer_message_,
-                                           Rcode::SERVFAIL());
-            return true;
-            break;
-        }
-        // should not be reached. assert here?
-        dlog("[FATAL] unreachable code");
-        return true;
-    }
-    
-public:
-    RunningQuery(asio::io_service& io,
-        const Question &question,
-        MessagePtr answer_message,
-        boost::shared_ptr<AddressVector> upstream,
-        boost::shared_ptr<AddressVector> upstream_root,
-        OutputBufferPtr buffer,
-        isc::resolve::ResolverInterface::CallbackPtr cb,
-        int query_timeout, int client_timeout, int lookup_timeout,
-        unsigned retries) :
-        io_(io),
-        question_(question),
-        answer_message_(answer_message),
-        upstream_(upstream),
-        upstream_root_(upstream_root),
-        buffer_(buffer),
-        resolvercallback_(cb),
-        cname_count_(0),
-        query_timeout_(query_timeout),
-        retries_(retries),
-        client_timer(io),
-        lookup_timer(io),
-        queries_out_(0),
-        done_(false),
-        answer_sent_(false)
-    {
-        // Setup the timer to stop trying (lookup_timeout)
-        if (lookup_timeout >= 0) {
-            lookup_timer.expires_from_now(
-                boost::posix_time::milliseconds(lookup_timeout));
-            lookup_timer.async_wait(boost::bind(&RunningQuery::stop, this, false));
-        }
-        
-        // Setup the timer to send an answer (client_timeout)
-        if (client_timeout >= 0) {
-            client_timer.expires_from_now(
-                boost::posix_time::milliseconds(client_timeout));
-            client_timer.async_wait(boost::bind(&RunningQuery::clientTimeout, this));
-        }
-        
-        // should use NSAS for root servers
-        // Adding root servers if not a forwarder
-        if (upstream_->empty()) {
-            setZoneServersToRoot();
-        }
-
-        send();
-    }
-
-    void setZoneServersToRoot() {
-        zone_servers_.clear();
-        if (upstream_root_->empty()) { //if no root ips given, use this
-            zone_servers_.push_back(addr_t("192.5.5.241", 53));
-        } else {
-            // copy the list
-            dlog("Size is " + 
-                boost::lexical_cast<std::string>(upstream_root_->size()) + 
-                "\n");
-            for(AddressVector::iterator it = upstream_root_->begin();
-                it < upstream_root_->end(); ++it) {
-            zone_servers_.push_back(addr_t(it->first,it->second));
-            dlog("Put " + zone_servers_.back().first + "into root list\n");
-            }
-        }
-    }
-    virtual void clientTimeout() {
-        // Return a SERVFAIL, but do not stop until
-        // we have an answer or timeout ourselves
-        isc::resolve::makeErrorMessage(answer_message_,
-                                       Rcode::SERVFAIL());
-        if (!answer_sent_) {
-            answer_sent_ = true;
-            resolvercallback_->success(answer_message_);
-        }
-    }
-
-    virtual void stop(bool resume) {
-        // if we cancel our timers, we will still get an event for
-        // that, so we cannot delete ourselves just yet (those events
-        // would be bound to a deleted object)
-        // cancel them one by one, both cancels should get us back
-        // here again.
-        // same goes if we have an outstanding query (can't delete
-        // until that one comes back to us)
-        done_ = true;
-        if (resume && !answer_sent_) {
-            answer_sent_ = true;
-            resolvercallback_->success(answer_message_);
-        } else {
-            resolvercallback_->failure();
-        }
-        if (lookup_timer.cancel() != 0) {
-            return;
-        }
-        if (client_timer.cancel() != 0) {
-            return;
-        }
-        if (queries_out_ > 0) {
-            return;
-        }
-        delete this;
-    }
-
-    // This function is used as callback from DNSQuery.
-    virtual void operator()(UDPQuery::Result result) {
-        // XXX is this the place for TCP retry?
-        --queries_out_;
-        if (!done_ && result != UDPQuery::TIME_OUT) {
-            // we got an answer
-            Message incoming(Message::PARSE);
-            InputBuffer ibuf(buffer_->getData(), buffer_->getLength());
-            incoming.fromWire(ibuf);
-
-            if (upstream_->size() == 0 &&
-                incoming.getRcode() == Rcode::NOERROR()) {
-                done_ = handleRecursiveAnswer(incoming);
-            } else {
-                isc::resolve::copyResponseMessage(incoming, answer_message_);
-                done_ = true;
-            }
-            
-            if (done_) {
-                stop(true);
-            }
-        } else if (!done_ && retries_--) {
-            // We timed out, but we have some retries, so send again
-            dlog("Timeout, resending query");
-            send();
-        } else {
-            // out of retries, give up for now
-            stop(false);
-        }
-    }
-};
-
-}
-
-void
-RecursiveQuery::resolve(const QuestionPtr& question,
-    const isc::resolve::ResolverInterface::CallbackPtr callback)
-{
-    asio::io_service& io = dns_service_.get_io_service();
-
-    MessagePtr answer_message(new Message(Message::RENDER));
-    OutputBufferPtr buffer(new OutputBuffer(0));
-    
-    // It will delete itself when it is done
-    new RunningQuery(io, *question, answer_message, upstream_,
-                     upstream_root_, buffer, callback, query_timeout_,
-                     client_timeout_, lookup_timeout_, retries_);
-}
-
-void
-RecursiveQuery::resolve(const Question& question,
-                        MessagePtr answer_message,
-                        OutputBufferPtr buffer,
-                        DNSServer* server)
-{
-    // XXX: eventually we will need to be able to determine whether
-    // the message should be sent via TCP or UDP, or sent initially via
-    // UDP and then fall back to TCP on failure, but for the moment
-    // we're only going to handle UDP.
-    asio::io_service& io = dns_service_.get_io_service();
-
-    isc::resolve::ResolverInterface::CallbackPtr crs(
-        new isc::resolve::ResolverCallbackServer(server));
-    
-    // It will delete itself when it is done
-    new RunningQuery(io, question, answer_message, upstream_, upstream_root_,
-                         buffer, crs, query_timeout_, client_timeout_,
-                         lookup_timeout_, retries_);
-}
-
-
-
-} // namespace asiolink

+ 38 - 23
src/lib/asiolink/tcp_endpoint.h

@@ -24,32 +24,33 @@
 namespace asiolink {
 namespace asiolink {
 
 
 /// \brief The \c TCPEndpoint class is a concrete derived class of
 /// \brief The \c TCPEndpoint class is a concrete derived class of
-/// \c IOEndpoint that represents an endpoint of a TCP connection.
+/// \c IOEndpoint that represents an endpoint of a TCP packet.
 ///
 ///
-/// In the current implementation, an object of this class is always
+/// Other notes about \c TCPEndpoint applies to this class, too.
-/// instantiated within the wrapper routines.  Applications are expected to
-/// get access to the object via the abstract base class, \c IOEndpoint.
-/// This design may be changed when we generalize the wrapper interface.
-///
-/// Note: this implementation is optimized for the case where this object
-/// is created from an ASIO endpoint object in a receiving code path
-/// by avoiding to make a copy of the base endpoint.  For TCP it may not be
-/// a big deal, but when we receive UDP packets at a high rate, the copy
-/// overhead might be significant.
 class TCPEndpoint : public IOEndpoint {
 class TCPEndpoint : public IOEndpoint {
 public:
 public:
     ///
     ///
-    /// \name Constructors and Destructor
+    /// \name Constructors and Destructor.
     ///
     ///
     //@{
     //@{
+
+    /// \brief Default Constructor
+    ///
+    /// Creates an internal endpoint.  This is expected to be set by some
+    /// external call.
+    TCPEndpoint() :
+        asio_endpoint_placeholder_(new asio::ip::tcp::endpoint()),
+        asio_endpoint_(*asio_endpoint_placeholder_)
+    {}
+
     /// \brief Constructor from a pair of address and port.
     /// \brief Constructor from a pair of address and port.
     ///
     ///
     /// \param address The IP address of the endpoint.
     /// \param address The IP address of the endpoint.
     /// \param port The TCP port number of the endpoint.
     /// \param port The TCP port number of the endpoint.
     TCPEndpoint(const IOAddress& address, const unsigned short port) :
     TCPEndpoint(const IOAddress& address, const unsigned short port) :
         asio_endpoint_placeholder_(
         asio_endpoint_placeholder_(
-            new asio::ip::tcp::endpoint(
+            new asio::ip::tcp::endpoint(asio::ip::address::from_string(address.toText()),
-                asio::ip::address::from_string(address.toText()), port)),
+                              port)),
         asio_endpoint_(*asio_endpoint_placeholder_)
         asio_endpoint_(*asio_endpoint_placeholder_)
     {}
     {}
 
 
@@ -59,39 +60,53 @@ public:
     /// corresponding ASIO class, \c tcp::endpoint.
     /// corresponding ASIO class, \c tcp::endpoint.
     ///
     ///
     /// \param asio_endpoint The ASIO representation of the TCP endpoint.
     /// \param asio_endpoint The ASIO representation of the TCP endpoint.
-    TCPEndpoint(const asio::ip::tcp::endpoint& asio_endpoint) :
+    TCPEndpoint(asio::ip::tcp::endpoint& asio_endpoint) :
         asio_endpoint_placeholder_(NULL), asio_endpoint_(asio_endpoint)
         asio_endpoint_placeholder_(NULL), asio_endpoint_(asio_endpoint)
     {}
     {}
 
 
+    /// \brief Constructor from an ASIO TCP endpoint.
+    ///
+    /// This constructor is designed to be an efficient wrapper for the
+    /// corresponding ASIO class, \c tcp::endpoint.
+    ///
+    /// \param asio_endpoint The ASIO representation of the TCP endpoint.
+    TCPEndpoint(const asio::ip::tcp::endpoint& asio_endpoint) :
+        asio_endpoint_placeholder_(new asio::ip::tcp::endpoint(asio_endpoint)),
+        asio_endpoint_(*asio_endpoint_placeholder_)
+    {}
+
     /// \brief The destructor.
     /// \brief The destructor.
-    ~TCPEndpoint() { delete asio_endpoint_placeholder_; }
+    virtual ~TCPEndpoint() { delete asio_endpoint_placeholder_; }
     //@}
     //@}
 
 
-    IOAddress getAddress() const {
+    virtual IOAddress getAddress() const {
         return (asio_endpoint_.address());
         return (asio_endpoint_.address());
     }
     }
 
 
-    uint16_t getPort() const {
+    virtual uint16_t getPort() const {
         return (asio_endpoint_.port());
         return (asio_endpoint_.port());
     }
     }
 
 
-    short getProtocol() const {
+    virtual short getProtocol() const {
         return (asio_endpoint_.protocol().protocol());
         return (asio_endpoint_.protocol().protocol());
     }
     }
 
 
-    short getFamily() const {
+    virtual short getFamily() const {
         return (asio_endpoint_.protocol().family());
         return (asio_endpoint_.protocol().family());
     }
     }
 
 
     // This is not part of the exosed IOEndpoint API but allows
     // This is not part of the exosed IOEndpoint API but allows
     // direct access to the ASIO implementation of the endpoint
     // direct access to the ASIO implementation of the endpoint
-    const asio::ip::tcp::endpoint& getASIOEndpoint() const {
+    inline const asio::ip::tcp::endpoint& getASIOEndpoint() const {
+        return (asio_endpoint_);
+    }
+    inline asio::ip::tcp::endpoint& getASIOEndpoint() {
         return (asio_endpoint_);
         return (asio_endpoint_);
     }
     }
 
 
 private:
 private:
-    const asio::ip::tcp::endpoint* asio_endpoint_placeholder_;
+    asio::ip::tcp::endpoint* asio_endpoint_placeholder_;
-    const asio::ip::tcp::endpoint& asio_endpoint_;
+    asio::ip::tcp::endpoint& asio_endpoint_;
 };
 };
 
 
 }      // namespace asiolink
 }      // namespace asiolink

+ 47 - 12
src/lib/asiolink/tcp_server.cc

@@ -14,18 +14,19 @@
 
 
 #include <config.h>
 #include <config.h>
 
 
-#include <boost/shared_array.hpp>
+#include <netinet/in.h>
-
+#include <sys/socket.h>
-// unistd is needed for asio.hpp with SunStudio
+#include <unistd.h>             // for some IPC/network system calls
-#include <unistd.h>
+#include <errno.h>
 
 
-#include <asio.hpp>
+#include <boost/shared_array.hpp>
 
 
 #include <log/dummylog.h>
 #include <log/dummylog.h>
 
 
+#include <asio.hpp>
+#include <asiolink/dummy_io_cb.h>
 #include <asiolink/tcp_endpoint.h>
 #include <asiolink/tcp_endpoint.h>
 #include <asiolink/tcp_socket.h>
 #include <asiolink/tcp_socket.h>
-
 #include <asiolink/tcp_server.h>
 #include <asiolink/tcp_server.h>
 
 
 
 
@@ -46,7 +47,7 @@ TCPServer::TCPServer(io_service& io_service,
                      const SimpleCallback* checkin,
                      const SimpleCallback* checkin,
                      const DNSLookup* lookup,
                      const DNSLookup* lookup,
                      const DNSAnswer* answer) :
                      const DNSAnswer* answer) :
-    io_(io_service), done_(false),
+    io_(io_service), done_(false), stopped_by_hand_(false),
     checkin_callback_(checkin), lookup_callback_(lookup),
     checkin_callback_(checkin), lookup_callback_(lookup),
     answer_callback_(answer)
     answer_callback_(answer)
 {
 {
@@ -65,9 +66,16 @@ TCPServer::TCPServer(io_service& io_service,
 
 
 void
 void
 TCPServer::operator()(error_code ec, size_t length) {
 TCPServer::operator()(error_code ec, size_t length) {
-    /// Because the coroutine reeentry block is implemented as
+    /// Because the coroutine reentry block is implemented as
     /// a switch statement, inline variable declarations are not
     /// a switch statement, inline variable declarations are not
     /// permitted.  Certain variables used below can be declared here.
     /// permitted.  Certain variables used below can be declared here.
+
+    /// If user has stopped the server, we won't enter the
+    /// coroutine body, just return
+    if (stopped_by_hand_) {
+        return;
+    }
+
     boost::array<const_buffer,2> bufs;
     boost::array<const_buffer,2> bufs;
     OutputBuffer lenbuf(TCP_MESSAGE_LENGTHSIZE);
     OutputBuffer lenbuf(TCP_MESSAGE_LENGTHSIZE);
 
 
@@ -76,11 +84,21 @@ TCPServer::operator()(error_code ec, size_t length) {
             /// Create a socket to listen for connections
             /// Create a socket to listen for connections
             socket_.reset(new tcp::socket(acceptor_->get_io_service()));
             socket_.reset(new tcp::socket(acceptor_->get_io_service()));
 
 
-            /// Wait for new connections. In the event of error,
+            /// Wait for new connections. In the event of non-fatal error,
             /// try again
             /// try again
             do {
             do {
                 CORO_YIELD acceptor_->async_accept(*socket_, *this);
                 CORO_YIELD acceptor_->async_accept(*socket_, *this);
-            } while (!ec);
+                // Abort on fatal errors
+                // TODO: Log error?
+                if (ec) {
+                    using namespace asio::error;
+                    if (ec.value() != would_block && ec.value() != try_again &&
+                        ec.value() != connection_aborted &&
+                        ec.value() != interrupted) {
+                        return;
+                    }
+                }
+            } while (ec);
 
 
             /// Fork the coroutine by creating a copy of this one and
             /// Fork the coroutine by creating a copy of this one and
             /// scheduling it on the ASIO service queue.  The parent
             /// scheduling it on the ASIO service queue.  The parent
@@ -103,7 +121,7 @@ TCPServer::operator()(error_code ec, size_t length) {
         /// Now read the message itself. (This is done in a different scope
         /// Now read the message itself. (This is done in a different scope
         /// to allow inline variable declarations.)
         /// to allow inline variable declarations.)
         CORO_YIELD {
         CORO_YIELD {
-            InputBuffer dnsbuffer((const void *) data_.get(), length);
+            InputBuffer dnsbuffer(data_.get(), length);
             uint16_t msglen = dnsbuffer.readUint16();
             uint16_t msglen = dnsbuffer.readUint16();
             async_read(*socket_, asio::buffer(data_.get(), msglen), *this);
             async_read(*socket_, asio::buffer(data_.get(), msglen), *this);
         }
         }
@@ -118,7 +136,14 @@ TCPServer::operator()(error_code ec, size_t length) {
         // that would quickly generate an IOMessage object without
         // that would quickly generate an IOMessage object without
         // all these calls to "new".)
         // all these calls to "new".)
         peer_.reset(new TCPEndpoint(socket_->remote_endpoint()));
         peer_.reset(new TCPEndpoint(socket_->remote_endpoint()));
-        iosock_.reset(new TCPSocket(*socket_));
+
+        // The TCP socket class has been extended with asynchronous functions
+        // and takes as a template parameter a completion callback class.  As
+        // TCPServer does not use these extended functions (only those defined
+        // in the IOSocket base class) - but needs a TCPSocket to get hold of
+        // the underlying Boost TCP socket - DummyIOCallback is used.  This
+        // provides the appropriate operator() but is otherwise functionless.
+        iosock_.reset(new TCPSocket<DummyIOCallback>(*socket_));
         io_message_.reset(new IOMessage(data_.get(), length, *iosock_, *peer_));
         io_message_.reset(new IOMessage(data_.get(), length, *iosock_, *peer_));
         bytes_ = length;
         bytes_ = length;
 
 
@@ -181,6 +206,16 @@ TCPServer::asyncLookup() {
                         answer_message_, respbuf_, this);
                         answer_message_, respbuf_, this);
 }
 }
 
 
+void TCPServer::stop() {
+    // server should not be stopped twice
+    if (stopped_by_hand_) {
+        return;
+    }
+
+    stopped_by_hand_ = true;
+    acceptor_->close();
+    socket_->close();
+}
 /// Post this coroutine on the ASIO service queue so that it will
 /// Post this coroutine on the ASIO service queue so that it will
 /// resume processing where it left off.  The 'done' parameter indicates
 /// resume processing where it left off.  The 'done' parameter indicates
 /// whether there is an answer to return to the client.
 /// whether there is an answer to return to the client.

+ 4 - 0
src/lib/asiolink/tcp_server.h

@@ -43,6 +43,7 @@ public:
     void operator()(asio::error_code ec = asio::error_code(),
     void operator()(asio::error_code ec = asio::error_code(),
                     size_t length = 0);
                     size_t length = 0);
     void asyncLookup();
     void asyncLookup();
+    void stop();
     void resume(const bool done);
     void resume(const bool done);
     bool hasAnswer() { return (done_); }
     bool hasAnswer() { return (done_); }
     int value() { return (get_value()); }
     int value() { return (get_value()); }
@@ -106,6 +107,9 @@ private:
     size_t bytes_;
     size_t bytes_;
     bool done_;
     bool done_;
 
 
+    // whether user has stopped the server
+    bool stopped_by_hand_;
+
     // Callback functions provided by the caller
     // Callback functions provided by the caller
     const SimpleCallback* checkin_callback_;
     const SimpleCallback* checkin_callback_;
     const DNSLookup* lookup_callback_;
     const DNSLookup* lookup_callback_;

+ 380 - 16
src/lib/asiolink/tcp_socket.h

@@ -19,34 +19,398 @@
 #error "asio.hpp must be included before including this, see asiolink.h as to why"
 #error "asio.hpp must be included before including this, see asiolink.h as to why"
 #endif
 #endif
 
 
-#include <asiolink/io_socket.h>
+#include <log/dummylog.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <unistd.h>             // for some IPC/network system calls
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+
+#include <boost/bind.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+#include <config.h>
+
+#include <dns/buffer.h>
+
+#include <asiolink/asiolink_utilities.h>
+#include <asiolink/io_asio_socket.h>
+#include <asiolink/io_endpoint.h>
+#include <asiolink/io_service.h>
+#include <asiolink/tcp_endpoint.h>
 
 
 namespace asiolink {
 namespace asiolink {
 
 
-/// \brief The \c TCPSocket class is a concrete derived class of
+/// \brief Buffer Too Large
-/// \c IOSocket that represents a TCP socket.
 ///
 ///
-/// In the current implementation, an object of this class is always
+/// Thrown on an attempt to send a buffer > 64k
-/// instantiated within the wrapper routines.  Applications are expected to
+class BufferTooLarge : public IOError {
-/// get access to the object via the abstract base class, \c IOSocket.
+public:
-/// This design may be changed when we generalize the wrapper interface.
+    BufferTooLarge(const char* file, size_t line, const char* what) :
-class TCPSocket : public IOSocket {
+        IOError(file, line, what) {}
+};
+
+/// \brief The \c TCPSocket class is a concrete derived class of \c IOAsioSocket
+/// that represents a TCP socket.
+///
+/// \param C Callback type
+template <typename C>
+class TCPSocket : public IOAsioSocket<C> {
 private:
 private:
-    TCPSocket(const TCPSocket& source);
+    /// \brief Class is non-copyable
-    TCPSocket& operator=(const TCPSocket& source);
+    TCPSocket(const TCPSocket&);
+    TCPSocket& operator=(const TCPSocket&);
+
 public:
 public:
+
     /// \brief Constructor from an ASIO TCP socket.
     /// \brief Constructor from an ASIO TCP socket.
     ///
     ///
-    /// \param socket The ASIO representation of the TCP socket.
+    /// \param socket The ASIO representation of the TCP socket.  It is assumed
-    TCPSocket(asio::ip::tcp::socket& socket) : socket_(socket) {}
+    ///        that the caller will open and close the socket, so these
+    ///        operations are a no-op for that socket.
+    TCPSocket(asio::ip::tcp::socket& socket);
+
+    /// \brief Constructor
+    ///
+    /// Used when the TCPSocket is being asked to manage its own internal
+    /// socket.  In this case, the open() and close() methods are used.
+    ///
+    /// \param service I/O Service object used to manage the socket.
+    TCPSocket(IOService& service);
+
+    /// \brief Destructor
+    virtual ~TCPSocket();
+
+    /// \brief Return file descriptor of underlying socket
+    virtual int getNative() const {
+        return (socket_.native());
+    }
+
+    /// \brief Return protocol of socket
+    virtual int getProtocol() const {
+        return (IPPROTO_TCP);
+    }
+
+    /// \brief Is "open()" synchronous?
+    ///
+    /// Indicates that the opening of a TCP socket is asynchronous.
+    virtual bool isOpenSynchronous() const {
+        return (false);
+    }
+
+    /// \brief Open Socket
+    ///
+    /// Opens the TCP socket.  This is an asynchronous operation, completion of
+    /// which will be signalled via a call to the callback function.
+    ///
+    /// \param endpoint Endpoint to which the socket will connect.
+    /// \param callback Callback object.
+    virtual void open(const IOEndpoint* endpoint, C& callback);
+
+    /// \brief Send Asynchronously
+    ///
+    /// Calls the underlying socket's async_send() method to send a packet of
+    /// data asynchronously to the remote endpoint.  The callback will be called
+    /// on completion.
+    ///
+    /// \param data Data to send
+    /// \param length Length of data to send
+    /// \param endpoint Target of the send. (Unused for a TCP socket because
+    ///        that was determined when the connection was opened.)
+    /// \param callback Callback object.
+    virtual void asyncSend(const void* data, size_t length,
+                           const IOEndpoint* endpoint, C& callback);
+
+    /// \brief Receive Asynchronously
+    ///
+    /// Calls the underlying socket's async_receive() method to read a packet
+    /// of data from a remote endpoint.  Arrival of the data is signalled via a
+    /// call to the callback function.
+    ///
+    /// \param data Buffer to receive incoming message
+    /// \param length Length of the data buffer
+    /// \param offset Offset into buffer where data is to be put
+    /// \param endpoint Source of the communication
+    /// \param callback Callback object
+    virtual void asyncReceive(void* data, size_t length, size_t offset,
+                              IOEndpoint* endpoint, C& callback);
+
+    /// \brief Process received data packet
+    ///
+    /// See the description of IOAsioSocket::receiveComplete for a complete
+    /// description of this method.
+    ///
+    /// \param staging Pointer to the start of the staging buffer.
+    /// \param length Amount of data in the staging buffer.
+    /// \param cumulative Amount of data received before the staging buffer is
+    ///        processed.
+    /// \param offset Unused.
+    /// \param expected unused.
+    /// \param outbuff Output buffer.  Data in the staging buffer is be copied
+    ///        to this output buffer in the call.
+    ///
+    /// \return Always true
+    virtual bool processReceivedData(const void* staging, size_t length,
+                                     size_t& cumulative, size_t& offset,
+                                     size_t& expected,
+                                     isc::dns::OutputBufferPtr& outbuff);
+
+    /// \brief Cancel I/O On Socket
+    virtual void cancel();
+
+    /// \brief Close socket
+    virtual void close();
 
 
-    int getNative() const { return (socket_.native()); }
-    int getProtocol() const { return (IPPROTO_TCP); }
 
 
 private:
 private:
-    asio::ip::tcp::socket& socket_;
+    // Two variables to hold the socket - a socket and a pointer to it.  This
+    // handles the case where a socket is passed to the TCPSocket on
+    // construction, or where it is asked to manage its own socket.
+    asio::ip::tcp::socket*      socket_ptr_;    ///< Pointer to own socket
+    asio::ip::tcp::socket&      socket_;        ///< Socket
+    bool                        isopen_;        ///< true when socket is open
+
+    // TODO: Remove temporary buffer
+    // The current implementation copies the buffer passed to asyncSend() into
+    // a temporary buffer and precedes it with a two-byte count field.  As
+    // ASIO should really be just about sending and receiving data, the TCP
+    // code should not do this.  If the protocol using this requires a two-byte
+    // count, it should add it before calling this code.  (This may be best
+    // achieved by altering isc::dns::buffer to have pairs of methods:
+    // getLength()/getTCPLength(), getData()/getTCPData(), with the getTCPXxx()
+    // methods taking into account a two-byte count field.)
+    //
+    // The option of sending the data in two operations, the count followed by
+    // the data was discounted as that would lead to two callbacks which would
+    // cause problems with the stackless coroutine code.
+    isc::dns::OutputBufferPtr   send_buffer_;   ///< Send buffer
 };
 };
 
 
+// Constructor - caller manages socket
+
+template <typename C>
+TCPSocket<C>::TCPSocket(asio::ip::tcp::socket& socket) :
+    socket_ptr_(NULL), socket_(socket), isopen_(true), send_buffer_()
+{
+}
+
+// Constructor - create socket on the fly
+
+template <typename C>
+TCPSocket<C>::TCPSocket(IOService& service) :
+    socket_ptr_(new asio::ip::tcp::socket(service.get_io_service())),
+    socket_(*socket_ptr_), isopen_(false)
+{
+}
+
+// Destructor.  Only delete the socket if we are managing it.
+
+template <typename C>
+TCPSocket<C>::~TCPSocket()
+{
+    delete socket_ptr_;
+}
+
+// Open the socket.
+
+template <typename C> void
+TCPSocket<C>::open(const IOEndpoint* endpoint, C& callback) {
+
+    // Ignore opens on already-open socket.  Don't throw a failure because
+    // of uncertainties as to what precedes whan when using asynchronous I/O.
+    // At also allows us a treat a passed-in socket as a self-managed socket.
+    if (!isopen_) {
+        if (endpoint->getFamily() == AF_INET) {
+            socket_.open(asio::ip::tcp::v4());
+        }
+        else {
+            socket_.open(asio::ip::tcp::v6());
+        }
+        isopen_ = true;
+
+        // Set options on the socket:
+
+        // Reuse address - allow the socket to bind to a port even if the port
+        // is in the TIMED_WAIT state.
+        socket_.set_option(asio::socket_base::reuse_address(true));
+    }
+
+    // Upconvert to a TCPEndpoint.  We need to do this because although
+    // IOEndpoint is the base class of UDPEndpoint and TCPEndpoint, it does not
+    // contain a method for getting at the underlying endpoint type - that is in
+    /// the derived class and the two classes differ on return type.
+    assert(endpoint->getProtocol() == IPPROTO_TCP);
+    const TCPEndpoint* tcp_endpoint =
+        static_cast<const TCPEndpoint*>(endpoint);
+
+    // Connect to the remote endpoint.  On success, the handler will be
+    // called (with one argument - the length argument will default to
+    // zero).
+    socket_.async_connect(tcp_endpoint->getASIOEndpoint(), callback);
+}
+
+// Send a message.  Should never do this if the socket is not open, so throw
+// an exception if this is the case.
+
+template <typename C> void
+TCPSocket<C>::asyncSend(const void* data, size_t length,
+    const IOEndpoint*, C& callback)
+{
+    if (isopen_) {
+
+        // Need to copy the data into a temporary buffer and precede it with
+        // a two-byte count field.
+        // TODO: arrange for the buffer passed to be preceded by the count
+        try {
+            // Ensure it fits into 16 bits
+            uint16_t count = boost::numeric_cast<uint16_t>(length);
+
+            // Copy data into a buffer preceded by the count field.
+            send_buffer_.reset(new isc::dns::OutputBuffer(length + 2));
+            send_buffer_->writeUint16(count);
+            send_buffer_->writeData(data, length);
+
+            // ... and send it
+            socket_.async_send(asio::buffer(send_buffer_->getData(),
+                               send_buffer_->getLength()), callback);
+        } catch (boost::numeric::bad_numeric_cast& e) {
+            isc_throw(BufferTooLarge,
+                      "attempt to send buffer larger than 64kB");
+        }
+
+    } else {
+        isc_throw(SocketNotOpen,
+            "attempt to send on a TCP socket that is not open");
+    }
+}
+
+// Receive a message. Note that the "offset" argument is used as an index
+// into the buffer in order to decide where to put the data.  It is up to the
+// caller to initialize the data to zero
+template <typename C> void
+TCPSocket<C>::asyncReceive(void* data, size_t length, size_t offset,
+    IOEndpoint* endpoint, C& callback)
+{
+    if (isopen_) {
+        // Upconvert to a TCPEndpoint.  We need to do this because although
+        // IOEndpoint is the base class of UDPEndpoint and TCPEndpoint, it
+        // does not contain a method for getting at the underlying endpoint
+        // type - that is in the derived class and the two classes differ on
+        // return type.
+        assert(endpoint->getProtocol() == IPPROTO_TCP);
+        TCPEndpoint* tcp_endpoint = static_cast<TCPEndpoint*>(endpoint);
+
+        // Write the endpoint details from the communications link.  Ideally
+        // we should make IOEndpoint assignable, but this runs in to all sorts
+        // of problems concerning the management of the underlying Boost
+        // endpoint (e.g. if it is not self-managed, is the copied one
+        // self-managed?) The most pragmatic solution is to let Boost take care
+        // of everything and copy details of the underlying endpoint.
+        tcp_endpoint->getASIOEndpoint() = socket_.remote_endpoint();
+
+        // Ensure we can write into the buffer and if so, set the pointer to
+        // where the data will be written.
+        if (offset >= length) {
+            isc_throw(BufferOverflow, "attempt to read into area beyond end of "
+                                      "TCP receive buffer");
+        }
+        void* buffer_start = static_cast<void*>(static_cast<uint8_t*>(data) + offset);
+
+        // ... and kick off the read.
+        socket_.async_receive(asio::buffer(buffer_start, length - offset), callback);
+
+    } else {
+        isc_throw(SocketNotOpen,
+            "attempt to receive from a TCP socket that is not open");
+    }
+}
+
+// Is the receive complete?
+
+template <typename C> bool
+TCPSocket<C>::processReceivedData(const void* staging, size_t length,
+                                  size_t& cumulative, size_t& offset,
+                                  size_t& expected,
+                                  isc::dns::OutputBufferPtr& outbuff)
+{
+    // Point to the data in the staging buffer and note how much there is.
+    const uint8_t* data = static_cast<const uint8_t*>(staging);
+    size_t data_length = length;
+
+    // Is the number is "expected" valid?  It won't be unless we have received
+    // at least two bytes of data in total for this set of receives.
+    if (cumulative < 2) {
+
+        // "expected" is not valid.  Did this read give us enough data to
+        // work it out?
+        cumulative += length;
+        if (cumulative < 2) {
+
+            // Nope, still not valid.  This must have been the first packet and
+            // was only one byte long.  Tell the fetch code to read the next
+            // packet into the staging buffer beyond the data that is already
+            // there so that the next time we are called we have a complete
+            // TCP count.
+            offset = cumulative;
+            return (false);
+        }
+
+        // Have enough data to interpret the packet count, so do so now.
+        expected = readUint16(data);
+
+        // We have two bytes less of data to process.  Point to the start of the
+        // data and adjust the packet size.  Note that at this point,
+        // "cumulative" is the true amount of data in the staging buffer, not
+        // "length".
+        data += 2;
+        data_length = cumulative - 2;
+    } else {
+
+        // Update total amount of data received.
+        cumulative += length;
+    }
+
+    // Regardless of anything else, the next read goes into the start of the
+    // staging buffer.
+    offset = 0;
+
+    // Work out how much data we still have to put in the output buffer. (This
+    // could be zero if we have just interpreted the TCP count and that was
+    // set to zero.)
+    if (expected >= outbuff->getLength()) {
+
+        // Still need data in the output packet.  Copy what we can from the
+        // staging buffer to the output buffer.
+        size_t copy_amount = std::min(expected - outbuff->getLength(), data_length);
+        outbuff->writeData(data, copy_amount);
+    }
+
+    // We can now say if we have all the data.
+    return (expected == outbuff->getLength());
+}
+
+// Cancel I/O on the socket.  No-op if the socket is not open.
+
+template <typename C> void
+TCPSocket<C>::cancel() {
+    if (isopen_) {
+        socket_.cancel();
+    }
+}
+
+// Close the socket down.  Can only do this if the socket is open and we are
+// managing it ourself.
+
+template <typename C> void
+TCPSocket<C>::close() {
+    if (isopen_ && socket_ptr_) {
+        socket_.close();
+        isopen_ = false;
+    }
+}
+
+} // namespace asiolink
 
 
-}      // namespace asiolink
 #endif // __TCP_SOCKET_H
 #endif // __TCP_SOCKET_H

+ 20 - 11
src/lib/asiolink/tests/Makefile.am

@@ -15,24 +15,33 @@ CLEANFILES = *.gcno *.gcda
 TESTS =
 TESTS =
 if HAVE_GTEST
 if HAVE_GTEST
 TESTS += run_unittests
 TESTS += run_unittests
-run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
+run_unittests_SOURCES  = run_unittests.cc
+run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
 run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
 run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
-run_unittests_SOURCES += udp_query_unittest.cc
+run_unittests_SOURCES += asiolink_utilities_unittest.cc
-run_unittests_SOURCES += ioaddress_unittest.cc
+run_unittests_SOURCES += io_address_unittest.cc
-run_unittests_SOURCES += ioendpoint_unittest.cc
+run_unittests_SOURCES += io_endpoint_unittest.cc
-run_unittests_SOURCES += iosocket_unittest.cc
+run_unittests_SOURCES += io_fetch_unittest.cc
+run_unittests_SOURCES += io_socket_unittest.cc
 run_unittests_SOURCES += io_service_unittest.cc
 run_unittests_SOURCES += io_service_unittest.cc
 run_unittests_SOURCES += interval_timer_unittest.cc
 run_unittests_SOURCES += interval_timer_unittest.cc
-run_unittests_SOURCES += recursive_query_unittest.cc
+run_unittests_SOURCES += tcp_endpoint_unittest.cc
-run_unittests_SOURCES += run_unittests.cc
+run_unittests_SOURCES += tcp_socket_unittest.cc
+run_unittests_SOURCES += udp_endpoint_unittest.cc
+run_unittests_SOURCES += udp_socket_unittest.cc
+run_unittests_SOURCES += qid_gen_unittest.cc
+
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) $(LOG4CXX_LDFLAGS)
+
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD  = $(GTEST_LDADD)
 run_unittests_LDADD += $(SQLITE_LIBS)
 run_unittests_LDADD += $(SQLITE_LIBS)
-run_unittests_LDADD +=  $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS)
 # B10_CXXFLAGS)
 run_unittests_CXXFLAGS = $(AM_CXXFLAGS)
 run_unittests_CXXFLAGS = $(AM_CXXFLAGS)

+ 74 - 0
src/lib/asiolink/tests/asiolink_utilities_unittest.cc

@@ -0,0 +1,74 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// \brief Test of asiolink utilties
+///
+/// Tests the fuctionality of the asiolink utilities code by comparing them
+/// with the equivalent methods in isc::dns::[Input/Output]Buffer.
+
+#include <cstddef>
+
+#include <gtest/gtest.h>
+
+#include <dns/buffer.h>
+#include <asiolink/asiolink_utilities.h>
+
+using namespace asiolink;
+using namespace isc::dns;
+
+TEST(asioutil, readUint16) {
+
+    // Reference buffer
+    uint8_t data[2];
+    isc::dns::InputBuffer buffer(data, sizeof(data));
+
+    // Avoid possible compiler warnings by only setting uint8_t variables to
+    // uint8_t values.
+    uint8_t i8 = 0;
+    uint8_t j8 = 0;
+    for (int i = 0; i < (2 << 8); ++i, ++i8) {
+        for (int j = 0; j < (2 << 8); ++j, ++j8) {
+            data[0] = i8;
+            data[1] = j8;
+            buffer.setPosition(0);
+            EXPECT_EQ(buffer.readUint16(), readUint16(data));
+        }
+    }
+}
+
+
+TEST(asioutil, writeUint16) {
+
+    // Reference buffer
+    isc::dns::OutputBuffer buffer(2);
+    uint8_t test[2];
+
+    // Avoid possible compiler warnings by only setting uint16_t variables to
+    // uint16_t values.
+    uint16_t i16 = 0;
+    for (uint32_t i = 0; i < (2 << 16); ++i, ++i16) {
+
+        // Write the reference data
+        buffer.clear();
+        buffer.writeUint16(i16);
+
+        // ... and the test data
+        writeUint16(i16, test);
+
+        // ... and compare
+        const uint8_t* ref = static_cast<const uint8_t*>(buffer.getData());
+        EXPECT_EQ(ref[0], test[0]);
+        EXPECT_EQ(ref[1], test[1]);
+    }
+}

+ 6 - 2
src/lib/asiolink/tests/interval_timer_unittest.cc

@@ -15,6 +15,7 @@
 #include <config.h>
 #include <config.h>
 #include <gtest/gtest.h>
 #include <gtest/gtest.h>
 
 
+#include <asio.hpp>
 #include <asiolink/asiolink.h>
 #include <asiolink/asiolink.h>
 
 
 #include <boost/date_time/posix_time/posix_time_types.hpp>
 #include <boost/date_time/posix_time/posix_time_types.hpp>
@@ -32,7 +33,9 @@ using namespace asiolink;
 // or not.
 // or not.
 class IntervalTimerTest : public ::testing::Test {
 class IntervalTimerTest : public ::testing::Test {
 protected:
 protected:
-    IntervalTimerTest() : io_service_() {}
+    IntervalTimerTest() :
+        io_service_(), timer_called_(false), timer_cancel_success_(false)
+    {}
     ~IntervalTimerTest() {}
     ~IntervalTimerTest() {}
     class TimerCallBack : public std::unary_function<void, void> {
     class TimerCallBack : public std::unary_function<void, void> {
     public:
     public:
@@ -63,7 +66,8 @@ protected:
         TimerCallBackCancelDeleter(IntervalTimerTest* test_obj,
         TimerCallBackCancelDeleter(IntervalTimerTest* test_obj,
                                    IntervalTimer* timer,
                                    IntervalTimer* timer,
                                    TimerCallBackCounter& counter)
                                    TimerCallBackCounter& counter)
-            : test_obj_(test_obj), timer_(timer), counter_(counter), count_(0)
+            : test_obj_(test_obj), timer_(timer), counter_(counter), count_(0),
+              prev_counter_(-1)
         {}
         {}
         void operator()() {
         void operator()() {
             ++count_;
             ++count_;

+ 7 - 1
src/lib/asiolink/tests/ioaddress_unittest.cc

@@ -15,7 +15,8 @@
 #include <config.h>
 #include <config.h>
 #include <gtest/gtest.h>
 #include <gtest/gtest.h>
 
 
-#include <asiolink/asiolink.h>
+#include <asiolink/io_error.h>
+#include <asiolink/io_address.h>
 
 
 using namespace asiolink;
 using namespace asiolink;
 
 
@@ -55,3 +56,8 @@ TEST(IOAddressTest, Equality) {
     EXPECT_TRUE(IOAddress("2001:db8::1234") != IOAddress("192.0.2.3"));
     EXPECT_TRUE(IOAddress("2001:db8::1234") != IOAddress("192.0.2.3"));
     EXPECT_FALSE(IOAddress("2001:db8::1234") == IOAddress("192.0.2.3"));
     EXPECT_FALSE(IOAddress("2001:db8::1234") == IOAddress("192.0.2.3"));
 }
 }
+
+TEST(IOAddressTest, Family) {
+    EXPECT_EQ(AF_INET, IOAddress("192.0.2.1").getFamily());
+    EXPECT_EQ(AF_INET6, IOAddress("2001:0DB8:0:0::0012").getFamily());
+}

+ 0 - 0
src/lib/asiolink/tests/ioendpoint_unittest.cc


Some files were not shown because too many files changed in this diff