Parcourir la source

Merge branch 'master' into trac1239

Conflicts:
	ChangeLog
	src/bin/dhcp4/tests/Makefile.am
	src/bin/dhcp6/tests/Makefile.am
	src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
	src/lib/dhcp/libdhcp++.cc
	src/lib/dhcp/tests/pkt4_unittest.cc
Tomek Mrugalski il y a 13 ans
Parent
commit
3e3ba93026
100 fichiers modifiés avec 6272 ajouts et 1767 suppressions
  1. 161 16
      ChangeLog
  2. 88 28
      configure.ac
  3. 193 48
      doc/guide/bind10-guide.html
  4. 182 23
      doc/guide/bind10-guide.txt
  5. 214 34
      doc/guide/bind10-guide.xml
  6. 388 94
      doc/guide/bind10-messages.html
  7. 458 91
      doc/guide/bind10-messages.xml
  8. 2 2
      src/bin/Makefile.am
  9. 1 0
      src/bin/auth/Makefile.am
  10. 7 7
      src/bin/auth/auth_config.cc
  11. 15 14
      src/bin/auth/auth_srv.cc
  12. 1 1
      src/bin/auth/auth_srv.h
  13. 2 1
      src/bin/auth/benchmarks/Makefile.am
  14. 2 3
      src/bin/auth/command.cc
  15. 33 3
      src/bin/auth/query.cc
  16. 12 0
      src/bin/auth/query.h
  17. 39 22
      src/bin/auth/statistics.cc
  18. 14 8
      src/bin/auth/statistics.h
  19. 1 0
      src/bin/auth/tests/Makefile.am
  20. 12 13
      src/bin/auth/tests/auth_srv_unittest.cc
  21. 115 11
      src/bin/auth/tests/query_unittest.cc
  22. 19 20
      src/bin/auth/tests/statistics_unittest.cc
  23. 201 19
      src/bin/bind10/bind10.8
  24. 199 1
      src/bin/bind10/bind10.xml
  25. 11 0
      src/bin/bind10/bind10_messages.mes
  26. 255 98
      src/bin/bind10/bind10_src.py.in
  27. 2 2
      src/bin/bind10/bob.spec
  28. 1 1
      src/bin/bind10/run_bind10.sh.in
  29. 472 6
      src/bin/bind10/tests/bind10_test.py.in
  30. 1 1
      src/bin/cfgmgr/plugins/logging.spec
  31. 42 0
      src/bin/ddns/Makefile.am
  32. 97 0
      src/bin/ddns/b10-ddns.8
  33. 161 0
      src/bin/ddns/b10-ddns.xml
  34. 209 0
      src/bin/ddns/ddns.py.in
  35. 42 0
      src/bin/ddns/ddns.spec
  36. 66 0
      src/bin/ddns/ddns_messages.mes
  37. 28 0
      src/bin/ddns/tests/Makefile.am
  38. 142 0
      src/bin/ddns/tests/ddns_test.py
  39. 1 2
      src/bin/dhcp4/Makefile.am
  40. 7 7
      src/bin/dhcp4/dhcp4_srv.cc
  41. 13 9
      src/bin/dhcp4/dhcp4_srv.h
  42. 3 2
      src/bin/dhcp4/tests/Makefile.am
  43. 2 9
      src/bin/dhcp4/tests/dhcp4_srv_unittest.cc
  44. 1 1
      src/bin/dhcp4/tests/dhcp4_unittests.cc
  45. 1 1
      src/bin/dhcp6/Makefile.am
  46. 1 1
      src/bin/dhcp6/tests/Makefile.am
  47. 14 10
      src/bin/resolver/resolver.cc
  48. 1 1
      src/bin/resolver/tests/Makefile.am
  49. 1 3
      src/bin/resolver/tests/response_scrubber_unittest.cc
  50. 1 0
      src/bin/xfrin/tests/Makefile.am
  51. 280 52
      src/bin/xfrin/tests/xfrin_test.py
  52. 228 109
      src/bin/xfrin/xfrin.py.in
  53. 62 7
      src/bin/xfrin/xfrin_messages.mes
  54. 13 0
      src/bin/xfrout/b10-xfrout.8
  55. 11 11
      src/bin/xfrout/b10-xfrout.xml
  56. 170 14
      src/bin/xfrout/tests/xfrout_test.py.in
  57. 44 14
      src/bin/xfrout/xfrout.py.in
  58. 10 16
      src/bin/zonemgr/b10-zonemgr.xml
  59. 1 0
      src/bin/zonemgr/tests/Makefile.am
  60. 82 54
      src/bin/zonemgr/tests/zonemgr_test.py
  61. 42 9
      src/bin/zonemgr/zonemgr.py.in
  62. 1 1
      src/lib/Makefile.am
  63. 9 9
      src/lib/acl/dns.cc
  64. 2 3
      src/lib/acl/tests/acl_test.cc
  65. 4 4
      src/lib/acl/tests/ip_check_unittest.cc
  66. 26 25
      src/lib/acl/tests/loader_test.cc
  67. 11 10
      src/lib/acl/tests/logic_check_test.cc
  68. 3 1
      src/lib/asiodns/dns_lookup.h
  69. 3 1
      src/lib/asiodns/dns_server.h
  70. 2 2
      src/lib/asiodns/dns_service.cc
  71. 11 11
      src/lib/asiodns/io_fetch.cc
  72. 7 3
      src/lib/asiodns/io_fetch.h
  73. 1 1
      src/lib/asiodns/tcp_server.cc
  74. 1 1
      src/lib/asiodns/tests/dns_server_unittest.cc
  75. 10 5
      src/lib/asiodns/tests/io_fetch_unittest.cc
  76. 1 1
      src/lib/asiodns/udp_server.cc
  77. 1 1
      src/lib/asiodns/udp_server.h
  78. 3 0
      src/lib/asiolink/Makefile.am
  79. 1 1
      src/lib/asiolink/io_address.cc
  80. 2 2
      src/lib/asiolink/io_service.cc
  81. 3 1
      src/lib/asiolink/simple_callback.h
  82. 1 1
      src/lib/asiolink/tcp_socket.h
  83. 1 2
      src/lib/asiolink/tests/io_endpoint_unittest.cc
  84. 1 1
      src/lib/bench/benchmark.h
  85. 1 1
      src/lib/bench/benchmark_util.cc
  86. 6 3
      src/lib/cache/resolver_cache.cc
  87. 9 5
      src/lib/cc/data.cc
  88. 1 1
      src/lib/cc/session.cc
  89. 11 0
      src/lib/cc/tests/data_unittests.cc
  90. 2 1
      src/lib/cryptolink/Makefile.am
  91. 2 2
      src/lib/cryptolink/tests/Makefile.am
  92. 2 1
      src/lib/cryptolink/tests/crypto_unittests.cc
  93. 11 2
      src/lib/datasrc/Makefile.am
  94. 427 259
      src/lib/datasrc/database.cc
  95. 639 483
      src/lib/datasrc/database.h
  96. 31 0
      src/lib/datasrc/datasrc_config.h.pre.in
  97. 87 20
      src/lib/datasrc/datasrc_messages.mes
  98. 51 2
      src/lib/datasrc/factory.cc
  99. 10 1
      src/lib/datasrc/factory.h
  100. 0 0
      src/lib/datasrc/rbtree.h

+ 161 - 16
ChangeLog

@@ -6,6 +6,147 @@
 	same link.
 	(Trac #1239, #1240, git TBD)
 
+349.	[bug]		dvv
+	resolver: If an upstream server responds with FORMERR to an EDNS query,
+	try querying it without EDNS.
+	(Trac #1386, git 99ad0292af284a246fff20b3702fbd7902c45418)
+
+348.	[bug]		stephen
+	By default the logging output stream is now flushed after each write.
+	This fixes a problem seen on some systems where the log output from
+	different processes was jumbled up.  Flushing can be disabled by setting
+	the appropriate option in the logging configuration.
+	(Trac #1405, git 2f0aa20b44604b671e6bde78815db39381e563bf)
+
+347.	[bug]		jelte
+	Fixed a bug where adding Zonemgr/secondary_zones without explicitely
+	setting the class value of the added zone resulted in a cryptic
+	error in bindctl ("Error: class"). It will now correctly default to
+	IN if not set. This also adds better checks on the name and class
+	values, and better errors if they are bad.
+	(Trac #1414, git 7b122af8489acf0f28f935a19eca2c5509a3677f)
+
+346.	[build]*		jreed
+	Renamed libdhcp to libdhcp++.
+	(Trac #1446, git d394e64f4c44f16027b1e62b4ac34e054b49221d)
+
+345.	[func]		tomek
+	dhcp4: Dummy DHCPv4 component implemented. Currently it does
+	nothing useful, except providing skeleton implementation that can
+	be expanded in the future.
+	(Trac #992, git d6e33479365c8f8f62ef2b9aa5548efe6b194601)
+
+344.	[func]		y-aharen
+	src/lib/statistics: Added statistics counter library for entire server
+	items and per zone items. Also, modified b10-auth to use it. It is
+	also intended to use in the other modules such as b10-resolver.
+	(Trac #510, git afddaf4c5718c2a0cc31f2eee79c4e0cc625499f)
+
+343.	[func]		jelte
+	Added IXFR-out system tests, based on the first two test sets of
+	http://bind10.isc.org/wiki/IxfrSystemTests.
+	(Trac #1314, git 1655bed624866a766311a01214597db01b4c7cec)
+
+342.	[bug]		stephen
+	In the resolver, a FORMERR received from an upstream nameserver
+	now results in a SERVFAIL being returned as a response to the original
+	query.  Additional debug messages added to distinguish between
+	different errors in packets received from upstream nameservers.
+	(Trac #1383, git 9b2b249d23576c999a65d8c338e008cabe45f0c9)
+
+341.	[func]		tomek
+	libdhcp++: Support for handling both IPv4 and IPv6 added.
+	Also added support for binding IPv4 sockets.
+	(Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
+
+340.	[build]		jelte
+	Fixed several linker issues related to recent gcc versions, botan
+	and gtest.
+	(Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
+
+339.	[bug]		jinmei
+	libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
+	incorrectly propagated ASIO specific exceptions to the application
+	if the given file name was too long.  This could lead to
+	unexpected shut down of b10-auth.
+	(Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
+
+338.	[bug]		jinmei
+	b10-xfrin didn't check SOA serials of SOA and IXFR responses,
+	which resulted in unnecessary transfer or unexpected IXFR
+	timeouts (these issues were not overlooked but deferred to be
+	fixed until #1278 was completed).  Validation on responses to SOA
+	queries were tightened, too.
+	(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
+
+337.	[func]		tomek
+	libdhcp++: Support for DHCPv4 option that can store a single
+	address or a list of IPv4 addresses added. Support for END option
+	added.
+	(Trac #1350, git cc20ff993da1ddb1c6e8a98370438b45a2be9e0a)
+
+336.	[func]		jelte
+	libdns++ (and its python wrapper) now includes a class Serial, for 
+	SOA SERIAL comparison and addition. Operations on instances of this 
+	class follow the specification from RFC 1982. 
+	Rdata::SOA::getSerial() now returns values of this type (and not 
+	uint32_t).
+	(Trac #1278, git 2ae72d76c74f61a67590722c73ebbf631388acbd)
+
+335.	[bug]*		jelte
+	The DataSourceClientContainer class that dynamically loads 
+	datasource backend libraries no longer provides just a .so file name 
+	to its call to dlopen(), but passes it an absolute path. This means 
+	that it is no longer an system implementation detail that depends on 
+	[DY]LD_LIBRARY_PATH which file is chosen, should there be multiple 
+	options (for instance, when test-running a new build while a 
+	different version is installed).
+	These loadable libraries are also no longer installed in the default 
+	library path, but in a subdirectory of the libexec directory of the 
+	target ($prefix/libexec/[version]/backends).
+	This also removes the need to handle b10-xfin and b10-xfrout as 
+	'special' hardcoded components, and they are now started as regular 
+	components as dictated by the configuration of the boss process.
+	(Trac #1292, git 83ce13c2d85068a1bec015361e4ef8c35590a5d0)
+
+334.	[bug]		jinmei
+	b10-xfrout could potentially create an overflow response message
+	(exceeding the 64KB max) or could create unnecessarily small
+	messages.  The former was actually unlikely to happen due to the
+	effect of name compression, and the latter was marginal and at least
+	shouldn't cause an interoperability problem, but these were still
+	potential problems and were fixed.
+	(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
+
+333.	[bug]		dvv
+	Solaris needs "-z now" to force non-lazy binding and prevent
+	g++ static initialization code from deadlocking.
+	(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
+
+332.	[bug]		vorner
+	C++ exceptions in the isc.dns.Rdata wrapper are now converted
+	to python ones instead of just aborting the interpretter.
+	(Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
+
+bind10-devel-20111128 released on November 28, 2011
+
+331.	[bug]		shane
+	Fixed a bug in data source library where a zone with more labels
+	than an out-of-bailiwick name server would cause an exception to
+	be raised.
+	(Trac #1430, git 81f62344db074bc5eea3aaf3682122fdec6451ad)
+
+330.	[bug]		jelte
+	Fixed a bug in b10-auth where it would sometimes fail because it
+	tried to check for queued msgq messages before the session was
+	fully running.
+	(git c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca)
+
+329.	[doc]		vorner, jreed
+	Document the bind10 run control configuration in guide and
+	manual page.
+	(Trac #1341, git c1171699a2b501321ab54207ad26e5da2b092d63)
+
 328.	[func]		jelte
 	b10-auth now passes IXFR requests on to b10-xfrout, and no longer
 	responds to them with NOTIMPL.
@@ -17,7 +158,7 @@
 	always respond to IXFR requests according to RFC1995).
 	(Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
 
-326.	[build]*	jinmei
+326.	[build]*		jinmei
 	Added a check script for the SQLite3 schema version.  It will be
 	run at the beginning of 'make install', and if it detects an old
 	version of schema, installation will stop.  You'll then need to
@@ -71,29 +212,33 @@
 
 319.	[func]		naokikambe
 	b10-stats-httpd was updated. In addition of the access to all
-	statistics items of all modules, the specified item or the items of the
-	specified module name can be accessed. For example, the URI requested
-	by using the feature is showed as "/bind10/statistics/xml/Auth" or
+	statistics items of all modules, the specified item or the items
+	of the specified module name can be accessed.  For example, the
+	URI requested by using the feature is showed as
+	"/bind10/statistics/xml/Auth" or
 	"/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
-	module names and all possible item names can be showed in the root
-	document, whose URI is "/bind10/statistics/xml". This change is not
-	only for the XML documents but also is for the XSD and XSL documents.
+	module names and all possible item names can be showed in the
+	root document, whose URI is "/bind10/statistics/xml".  This change
+	is not only for the XML documents but also is for the XSD and
+	XSL documents.
 	(Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
 
-318.    [func]		stephen
-	Add C++ API for accessing zone difference information in database-based
-	data sources.
+318.	[func]		stephen
+	Add C++ API for accessing zone difference information in
+	database-based data sources.
 	(Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
 
-317.    [func]		vorner
-	datasrc: the getUpdater method of DataSourceClient supports an optional
-	'journaling' parameter to indicate the generated updater to store diffs.
-	The database based derived class implements this extension.
+317.	[func]		vorner
+	datasrc: the getUpdater method of DataSourceClient supports an
+	optional 'journaling' parameter to indicate the generated updater
+	to store diffs.  The database based derived class implements this
+	extension.
 	(Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
 
 316.	[func]*		vorner
-	The configuration of what parts of the system run is more flexible now.
-	Everything that should run must have an entry in Boss/components.
+	The configuration of what parts of the system run is more
+	flexible now.  Everything that should run must have an
+	entry in Boss/components.
 	(Trac #213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
 
 315.	[func]		tomek

+ 88 - 28
configure.ac

@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20111021, bind10-dev@isc.org)
+AC_INIT(bind10-devel, 20111129, bind10-dev@isc.org)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE
 AC_CONFIG_HEADERS([config.h])
@@ -96,6 +96,8 @@ case "$host" in
 	# Solaris requires special definitions to get some standard libraries
 	# (e.g. getopt(3)) available with common used header files.
 	CPPFLAGS="$CPPFLAGS -D_XPG4_2 -D__EXTENSIONS__"
+	# "now" binding is necessary to prevent deadlocks in C++ static initialization code
+	LDFLAGS="$LDFLAGS -z now"
 	;;
 *-apple-darwin*)
 	# libtool doesn't work perfectly with Darwin: libtool embeds the
@@ -478,23 +480,33 @@ else
     fi
 fi
 
-BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
 BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
 
 # We expect botan-config --libs to contain -L<path_to_libbotan>, but
 # this is not always the case.  As a heuristics workaround we add
-# -L`botan-config --prefix/lib` in this case.  Same for BOTAN_INCLUDES
-# (but using include instead of lib) below.
+# -L`botan-config --prefix/lib` in this case (if not present already).
+# Same for BOTAN_INCLUDES (but using include instead of lib) below.
 if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
-    echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
-        BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
+    echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
+        BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
     echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
         BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
 fi
+
+# botan-config script (and the way we call pkg-config) returns -L and -l
+# as one string, but we need them in separate values
+BOTAN_LDFLAGS=
+BOTAN_NEWLIBS=
+for flag in ${BOTAN_LIBS}; do
+    BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
+    BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+done
+
 # See python_rpath for some info on why we do this
 if test $rpath_available = yes; then
     BOTAN_RPATH=
-    for flag in ${BOTAN_LDFLAGS}; do
+    for flag in ${BOTAN_LIBS}; do
             BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
     done
 AC_SUBST(BOTAN_RPATH)
@@ -510,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
 fi
 
 AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_LIBS)
 AC_SUBST(BOTAN_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
-
+LIBS_SAVED="$LIBS"
+LIBS="$LIBS $BOTAN_LIBS"
 AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
         [AC_LANG_PROGRAM([#include <botan/botan.h>
@@ -531,7 +543,7 @@ AC_LINK_IFELSE(
          AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
 )
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 # Check for log4cplus
 log4cplus_path="yes"
@@ -543,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
     AC_MSG_ERROR([Need log4cplus])
 elif test "${log4cplus_path}" != "yes" ; then
   LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
-  LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+  LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
 else
 # If not specified, try some common paths.
 	log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
@@ -551,21 +563,21 @@ else
 	do
 		if test -f $d/include/log4cplus/logger.h; then
 			LOG4CPLUS_INCLUDES="-I$d/include"
-			LOG4CPLUS_LDFLAGS="-L$d/lib"
+			LOG4CPLUS_LIBS="-L$d/lib"
 			break
 		fi
 	done
 fi
 
-LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
 
-AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_LIBS)
 AC_SUBST(LOG4CPLUS_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+LIBS_SAVED="$LIBS"
+LIBS="$LOG4CPLUS_LIBS $LIBS"
 
 AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
@@ -580,7 +592,7 @@ AC_LINK_IFELSE(
 )
 
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 #
 # Configure Boost header path
@@ -673,6 +685,13 @@ else
     AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
 fi
 
+# I can't get some of the #include <asio.hpp> right without this
+# TODO: find the real cause of asio/boost wanting pthreads
+# (this currently only occurs for src/lib/cc/session_unittests)
+PTHREAD_LDFLAGS=
+AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(MULTITHREADING_FLAG)
 
 #
 # Check availability of gtest, which will be used for unit tests.
@@ -709,6 +728,48 @@ then
 				GTEST_LDFLAGS="-L$dir/lib"
 				GTEST_LDADD="-lgtest"
 				GTEST_FOUND="true"
+				# There is no gtest-config script on this
+				# system, which is supposed to inform us
+				# whether we need pthreads as well (a
+				# gtest compile-time option). So we still
+				# need to test that manually.
+				CPPFLAGS_SAVED="$CPPFLAGS"
+				CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
+				LDFLAGS_SAVED="$LDFLAGS"
+				LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
+				LIBS_SAVED=$LIBS
+				LIBS="$LIBS $GTEST_LDADD"
+				AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
+				# First try to compile without pthreads
+				AC_TRY_LINK([
+					#include <gtest/gtest.h>
+					],[
+						int i = 0;
+						char* c = NULL;
+						::testing::InitGoogleTest(&i, &c);
+						return (0);
+					],
+					[ AC_MSG_RESULT(no) ],
+					[
+						LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
+						# Now try to compile with pthreads
+						AC_TRY_LINK([
+							#include <gtest/gtest.h>
+							],[
+								int i = 0;
+								char* c = NULL;
+								::testing::InitGoogleTest(&i, &c);
+								return (0);
+							],
+							[ AC_MSG_RESULT(yes)
+							  GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
+							],
+							# Apparently we can't compile it at all
+							[ AC_MSG_ERROR(unable to compile with gtest) ])
+				])
+				CPPFLAGS=$CPPFLAGS_SAVED
+				LDFLAGS=$LDFLAGS_SAVED
+				LIBS=$LIBS_SAVED
 				break
 			fi
 		done
@@ -735,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
 fi
 PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
 
-# I can't get some of the #include <asio.hpp> right without this
-# TODO: find the real cause of asio/boost wanting pthreads
-# (this currently only occurs for src/lib/cc/session_unittests)
-PTHREAD_LDFLAGS=
-AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
-AC_SUBST(PTHREAD_LDFLAGS)
-
-AC_SUBST(MULTITHREADING_FLAG)
-
 #
 # ASIO: we extensively use it as the C++ event management module.
 #
@@ -838,6 +890,8 @@ AC_CONFIG_FILES([Makefile
                  src/bin/auth/Makefile
                  src/bin/auth/tests/Makefile
                  src/bin/auth/benchmarks/Makefile
+                 src/bin/ddns/Makefile
+                 src/bin/ddns/tests/Makefile
                  src/bin/dhcp6/Makefile
                  src/bin/dhcp6/tests/Makefile
 		 src/bin/dhcp4/Makefile
@@ -912,6 +966,7 @@ AC_CONFIG_FILES([Makefile
                  src/lib/datasrc/tests/Makefile
                  src/lib/datasrc/tests/testdata/Makefile
                  src/lib/xfr/Makefile
+                 src/lib/xfr/tests/Makefile
                  src/lib/log/Makefile
                  src/lib/log/compiler/Makefile
                  src/lib/log/tests/Makefile
@@ -933,6 +988,8 @@ AC_CONFIG_FILES([Makefile
                  src/lib/util/tests/Makefile
                  src/lib/acl/Makefile
                  src/lib/acl/tests/Makefile
+                 src/lib/statistics/Makefile
+                 src/lib/statistics/tests/Makefile
                  tests/Makefile
                  tests/system/Makefile
                  tests/tools/Makefile
@@ -947,6 +1004,7 @@ AC_OUTPUT([doc/version.ent
            src/bin/cmdctl/run_b10-cmdctl.sh
            src/bin/cmdctl/tests/cmdctl_test
            src/bin/cmdctl/cmdctl.spec.pre
+           src/bin/ddns/ddns.py
            src/bin/xfrin/tests/xfrin_test
            src/bin/xfrin/xfrin.py
            src/bin/xfrin/run_b10-xfrin.sh
@@ -993,6 +1051,7 @@ AC_OUTPUT([doc/version.ent
            src/lib/python/bind10_config.py
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
+           src/lib/datasrc/datasrc_config.h.pre
            src/lib/log/tests/console_test.sh
            src/lib/log/tests/destination_test.sh
            src/lib/log/tests/init_logger_test.sh
@@ -1087,8 +1146,9 @@ dnl includes too
   Boost:         ${BOOST_INCLUDES}
   Botan:         ${BOTAN_INCLUDES}
                  ${BOTAN_LDFLAGS}
+                 ${BOTAN_LIBS}
   Log4cplus:     ${LOG4CPLUS_INCLUDES}
-                 ${LOG4CPLUS_LDFLAGS}
+                 ${LOG4CPLUS_LIBS}
   SQLite:        $SQLITE_CFLAGS
                  $SQLITE_LIBS
 

Fichier diff supprimé car celui-ci est trop grand
+ 193 - 48
doc/guide/bind10-guide.html


+ 182 - 23
doc/guide/bind10-guide.txt

@@ -2,7 +2,7 @@
 
 Administrator Reference for BIND 10
 
-   This is the reference guide for BIND 10 version 20110809.
+   This is the reference guide for BIND 10 version 20111021.
 
    Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
 
@@ -12,7 +12,7 @@ Administrator Reference for BIND 10
    Consortium (ISC). It includes DNS libraries and modular components for
    controlling authoritative and recursive DNS servers.
 
-   This is the reference guide for BIND 10 version 20110809. The most
+   This is the reference guide for BIND 10 version 20111021. The most
    up-to-date version of this document (in PDF, HTML, and plain text
    formats), along with other documents for BIND 10, can be found at
    http://bind10.isc.org/docs.
@@ -55,6 +55,8 @@ Administrator Reference for BIND 10
 
                 Starting BIND 10
 
+                Configuration of started processes
+
    4. Command channel
 
    5. Configuration manager
@@ -105,6 +107,10 @@ Administrator Reference for BIND 10
 
                 Logging Message Format
 
+   List of Tables
+
+   3.1.
+
 Chapter 1. Introduction
 
    Table of Contents
@@ -124,7 +130,7 @@ Chapter 1. Introduction
 
   Note
 
-   This guide covers the experimental prototype of BIND 10 version 20110809.
+   This guide covers the experimental prototype of BIND 10 version 20111021.
 
   Note
 
@@ -427,24 +433,28 @@ Chapter 3. Starting BIND10 with bind10
 
    Starting BIND 10
 
+   Configuration of started processes
+
    BIND 10 provides the bind10 command which starts up the required
-   processes. bind10 will also restart processes that exit unexpectedly. This
-   is the only command needed to start the BIND 10 system.
+   processes. bind10 will also restart some processes that exit unexpectedly.
+   This is the only command needed to start the BIND 10 system.
 
    After starting the b10-msgq communications channel, bind10 connects to it,
    runs the configuration manager, and reads its own configuration. Then it
    starts the other modules.
 
-   The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
-   provides the communication channel between every part of the system. The
-   b10-cfgmgr daemon is always needed by every module, if only to send
-   information about themselves somewhere, but more importantly to ask about
-   their own settings, and about other modules. The bind10 master process
-   will also start up b10-cmdctl for admins to communicate with the system,
-   b10-auth for authoritative DNS service or b10-resolver for recursive name
-   service, b10-stats for statistics collection, b10-xfrin for inbound DNS
-   zone transfers, b10-xfrout for outbound DNS zone transfers, and
-   b10-zonemgr for secondary service.
+   The b10-sockcreator, b10-msgq and b10-cfgmgr services make up the core.
+   The b10-msgq daemon provides the communication channel between every part
+   of the system. The b10-cfgmgr daemon is always needed by every module, if
+   only to send information about themselves somewhere, but more importantly
+   to ask about their own settings, and about other modules. The
+   b10-sockcreator will allocate sockets for the rest of the system.
+
+   In its default configuration, the bind10 master process will also start up
+   b10-cmdctl for admins to communicate with the system, b10-auth for
+   authoritative DNS service, b10-stats for statistics collection, b10-xfrin
+   for inbound DNS zone transfers, b10-xfrout for outbound DNS zone
+   transfers, and b10-zonemgr for secondary service.
 
 Starting BIND 10
 
@@ -457,6 +467,110 @@ Starting BIND 10
    names for the Python-based daemons will be renamed to better identify them
    instead of just "python". This is not needed on some operating systems.
 
+Configuration of started processes
+
+   The processes to be started can be configured, with the exception of the
+   b10-sockcreator, b10-msgq and b10-cfgmgr.
+
+   The configuration is in the Boss/components section. Each element
+   represents one component, which is an abstraction of a process (currently
+   there's also one component which doesn't represent a process). If you
+   didn't want to transfer out at all (your server is a slave only), you
+   would just remove the corresponding component from the set, like this and
+   the process would be stopped immediately (and not started on the next
+   startup):
+
+ > config remove Boss/components b10-xfrout
+ > config commit
+
+   To add a process to the set, let's say the resolver (which not started by
+   default), you would do this:
+
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+   Now, what it means. We add an entry called b10-resolver. It is both a name
+   used to reference this component in the configuration and the name of the
+   process to start. Then we set some parameters on how to start it.
+
+   The special one is for components that need some kind of special care
+   during startup or shutdown. Unless specified, the component is started in
+   usual way. This is the list of components that need to be started in a
+   special way, with the value of special used for them:
+
+   Table 3.1.
+
+   +------------------------------------------------------------------------+
+   | Component    | Special  | Description                                  |
+   |--------------+----------+----------------------------------------------|
+   | b10-auth     | auth     | Authoritative server                         |
+   |--------------+----------+----------------------------------------------|
+   | b10-resolver | resolver | The resolver                                 |
+   |--------------+----------+----------------------------------------------|
+   | b10-cmdctl   | cmdctl   | The command control (remote control          |
+   |              |          | interface)                                   |
+   |--------------+----------+----------------------------------------------|
+   | setuid       | setuid   | Virtual component, see below                 |
+   +------------------------------------------------------------------------+
+
+   The kind specifies how a failure of the component should be handled. If it
+   is set to "dispensable" (the default unless you set something else), it
+   will get started again if it fails. If it is set to "needed" and it fails
+   at startup, the whole bind10 shuts down and exits with error exit code.
+   But if it fails some time later, it is just started again. If you set it
+   to "core", you indicate that the system is not usable without the
+   component and if such component fails, the system shuts down no matter
+   when the failure happened. This is the behaviour of the core components
+   (the ones you can't turn off), but you can declare any other components as
+   core as well if you wish (but you can turn these off, they just can't
+   fail).
+
+   The priority defines order in which the components should start. The ones
+   with higher number are started sooner than the ones with lower ones. If
+   you don't set it, 0 (zero) is used as the priority.
+
+   There are other parameters we didn't use in our example. One of them is
+   "address". It is the address used by the component on the b10-msgq message
+   bus. The special components already know their address, but the usual ones
+   don't. The address is by convention the thing after b10-, with the first
+   letter capital (eg. b10-stats would have "Stats" as its address).
+
+   The last one is process. It is the name of the process to be started. It
+   defaults to the name of the component if not set, but you can use this to
+   override it.
+
+  Note
+
+   This system allows you to start the same component multiple times (by
+   including it in the configuration with different names, but the same
+   process setting). However, the rest of the system doesn't expect such
+   situation, so it would probably not do what you want. Such support is yet
+   to be implemented.
+
+  Note
+
+   The configuration is quite powerful, but that includes a lot of space for
+   mistakes. You could turn off the b10-cmdctl, but then you couldn't change
+   it back the usual way, as it would require it to be running (you would
+   have to find and edit the configuration directly). Also, some modules
+   might have dependencies -- b10-stats-httpd need b10-stats, b10-xfrout
+   needs the b10-auth to be running, etc.
+
+   In short, you should think twice before disabling something here.
+
+   Now, to the mysterious setuid virtual component. If you use the -u option
+   to start the bind10 as root, but change the user later, we need to start
+   the b10-auth or b10-resolver as root (until the socket creator is
+   finished). So we need to specify the time when the switch from root do the
+   given user happens and that's what the setuid component is for. The switch
+   is done at the time the setuid component would be started, if it was a
+   process. The default configuration contains the setuid component with
+   priority 5, b10-auth has 10 to be started before the switch and everything
+   else is without priority, so it is started after the switch.
+
 Chapter 4. Command channel
 
    The BIND 10 components use the b10-msgq message routing daemon to
@@ -739,15 +853,55 @@ Trigger an Incoming Zone Transfer Manually
 Chapter 10. Outbound Zone Transfers
 
    The b10-xfrout process is started by bind10. When the b10-auth
-   authoritative DNS server receives an AXFR request, b10-xfrout sends the
-   zone. This is used to provide master DNS service to share zones to
-   secondary name servers. The b10-xfrout is also used to send NOTIFY
-   messages to slaves.
+   authoritative DNS server receives an AXFR or IXFR request, b10-auth
+   internally forwards the request to b10-xfrout, which handles the rest of
+   request processing. This is used to provide primary DNS service to share
+   zones to secondary name servers. The b10-xfrout is also used to send
+   NOTIFY messages to secondary servers.
+
+   A global or per zone transfer_acl configuration can be used to control
+   accessibility of the outbound zone transfer service. By default,
+   b10-xfrout allows any clients to perform zone transfers for any zones:
+
+ > config show Xfrout/transfer_acl
+ Xfrout/transfer_acl[0]  {"action": "ACCEPT"}    any     (default)
+
+   You can change this to, for example, rejecting all transfer requests by
+   default while allowing requests for the transfer of zone "example.com"
+   from 192.0.2.1 and 2001:db8::1 as follows:
+
+ > config set Xfrout/transfer_acl[0] {"action": "REJECT"}
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},
+                                                  {"action": "ACCEPT", "from": "2001:db8::1"}]
+ > config commit
+
+  Note
+
+   In the above example the lines for transfer_acl were divided for
+   readability. In the actual input it must be in a single line.
+
+   If you want to require TSIG in access control, a separate TSIG "key ring"
+   must be configured specifically for b10-xfrout as well as a system wide
+   key ring, both containing a consistent set of keys. For example, to change
+   the previous example to allowing requests from 192.0.2.1 signed by a TSIG
+   with a key name of "key.example", you'll need to do this:
+
+ > config set tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]
+ > config commit
+
+   The first line of configuration defines a system wide key ring. This is
+   necessary because the b10-auth server also checks TSIGs and it uses the
+   system wide configuration.
 
   Note
 
-   The current development release of BIND 10 only supports AXFR. (IXFR is
-   not supported.) Access control is not yet provided.
+   In a future version, b10-xfrout will also use the system wide TSIG
+   configuration. The way to specify zone specific configuration (ACLs, etc)
+   is likely to be changed, too.
 
 Chapter 11. Secondary Manager
 
@@ -777,8 +931,13 @@ Chapter 12. Recursive Name Server
    authoritative or resolver or both. By default, it starts the authoritative
    service. You may change this using bindctl, for example:
 
- > config set Boss/start_auth false
- > config set Boss/start_resolver true
+ > config remove Boss/components b10-xfrout
+ > config remove Boss/components b10-xfrin
+ > config remove Boss/components b10-auth
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
  > config commit
 
    The master bind10 will stop and start the desired services.

+ 214 - 34
doc/guide/bind10-guide.xml

@@ -706,7 +706,7 @@ Debian and Ubuntu:
       BIND 10 provides the <command>bind10</command> command which
       starts up the required processes.
       <command>bind10</command>
-      will also restart processes that exit unexpectedly.
+      will also restart some processes that exit unexpectedly.
       This is the only command needed to start the BIND 10 system.
     </para>
 
@@ -718,17 +718,22 @@ Debian and Ubuntu:
     </para>
 
     <para>
-      The <command>b10-msgq</command> and <command>b10-cfgmgr</command>
+      The <command>b10-sockcreator</command>, <command>b10-msgq</command> and
+      <command>b10-cfgmgr</command>
       services make up the core. The <command>b10-msgq</command> daemon
       provides the communication channel between every part of the system.
       The <command>b10-cfgmgr</command> daemon is always needed by every
       module, if only to send information about themselves somewhere,
       but more importantly to ask about their own settings, and
-      about other modules.
-      The <command>bind10</command> master process will also start up
+      about other modules. The <command>b10-sockcreator</command> will
+      allocate sockets for the rest of the system.
+    </para>
+
+    <para>
+      In its default configuration, the <command>bind10</command>
+      master process will also start up
       <command>b10-cmdctl</command> for admins to communicate with the
-      system, <command>b10-auth</command> for authoritative DNS service or
-      <command>b10-resolver</command> for recursive name service,
+      system, <command>b10-auth</command> for authoritative DNS service,
       <command>b10-stats</command> for statistics collection,
       <command>b10-xfrin</command> for inbound DNS zone transfers,
       <command>b10-xfrout</command> for outbound DNS zone transfers,
@@ -754,6 +759,159 @@ Debian and Ubuntu:
       </note>
 
     </section>
+    <section id="bind10.config">
+      <title>Configuration of started processes</title>
+      <para>
+        The processes to be started can be configured, with the exception
+        of the <command>b10-sockcreator</command>, <command>b10-msgq</command>
+        and <command>b10-cfgmgr</command>.
+      </para>
+
+      <para>
+        The configuration is in the Boss/components section. Each element
+        represents one component, which is an abstraction of a process
+        (currently there's also one component which doesn't represent
+        a process). If you didn't want to transfer out at all (your server
+        is a slave only), you would just remove the corresponding component
+        from the set, like this and the process would be stopped immediately
+        (and not started on the next startup):
+      <screen>&gt; <userinput>config remove Boss/components b10-xfrout</userinput>
+&gt; <userinput>config commit</userinput></screen>
+      </para>
+
+      <para>
+        To add a process to the set, let's say the resolver (which not started
+        by default), you would do this:
+        <screen>&gt; <userinput>config add Boss/components b10-resolver</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+&gt; <userinput>config commit</userinput></screen></para>
+
+      <para>
+        Now, what it means. We add an entry called b10-resolver. It is both a
+        name used to reference this component in the configuration and the
+        name of the process to start. Then we set some parameters on how to
+        start it.
+      </para>
+
+      <para>
+        The special one is for components that need some kind of special care
+        during startup or shutdown. Unless specified, the component is started
+        in usual way. This is the list of components that need to be started
+        in a special way, with the value of special used for them:
+        <table>
+          <tgroup cols='3' align='left'>
+          <colspec colname='component'/>
+          <colspec colname='special'/>
+          <colspec colname='description'/>
+          <thead><row><entry>Component</entry><entry>Special</entry><entry>Description</entry></row></thead>
+          <tbody>
+            <row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative server</entry></row>
+            <row><entry>b10-resolver</entry><entry>resolver</entry><entry>The resolver</entry></row>
+            <row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>The command control (remote control interface)</entry></row>
+            <row><entry>setuid</entry><entry>setuid</entry><entry>Virtual component, see below</entry></row>
+            <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+          </tbody>
+          </tgroup>
+        </table>
+      </para>
+
+      <para>
+	The kind specifies how a failure of the component should
+	be handled.  If it is set to <quote>dispensable</quote>
+	(the default unless you set something else), it will get
+	started again if it fails. If it is set to <quote>needed</quote>
+	and it fails at startup, the whole <command>bind10</command>
+	shuts down and exits with error exit code. But if it fails
+	some time later, it is just started again. If you set it
+	to <quote>core</quote>, you indicate that the system is
+	not usable without the component and if such component
+	fails, the system shuts down no matter when the failure
+	happened.  This is the behaviour of the core components
+	(the ones you can't turn off), but you can declare any
+	other components as core as well if you wish (but you can
+	turn these off, they just can't fail).
+      </para>
+
+      <para>
+        The priority defines order in which the components should start.
+        The ones with higher number are started sooner than the ones with
+        lower ones. If you don't set it, 0 (zero) is used as the priority.
+      </para>
+
+      <para>
+        There are other parameters we didn't use in our example.
+	One of them is <quote>address</quote>. It is the address
+	used by the component on the <command>b10-msgq</command>
+	message bus. The special components already know their
+	address, but the usual ones don't. The address is by
+	convention the thing after <emphasis>b10-</emphasis>, with
+	the first letter capital (eg. <command>b10-stats</command>
+	would have <quote>Stats</quote> as its address).
+<!-- TODO: this should be simplified so we don't even have to document it -->
+      </para>
+
+<!-- TODO: what does "The special components already know their
+address, but the usual ones don't." mean? -->
+
+<!-- TODO: document params when is enabled -->
+
+      <para>
+        The last one is process. It is the name of the process to be started.
+        It defaults to the name of the component if not set, but you can use
+        this to override it.
+      </para>
+
+      <!-- TODO Add parameters when they work, not implemented yet-->
+
+      <note>
+        <para>
+          This system allows you to start the same component multiple times
+          (by including it in the configuration with different names, but the
+          same process setting). However, the rest of the system doesn't expect
+          such situation, so it would probably not do what you want. Such
+          support is yet to be implemented.
+        </para>
+      </note>
+
+      <note>
+        <para>
+	  The configuration is quite powerful, but that includes
+	  a lot of space for mistakes. You could turn off the
+	  <command>b10-cmdctl</command>, but then you couldn't
+	  change it back the usual way, as it would require it to
+	  be running (you would have to find and edit the configuration
+	  directly).  Also, some modules might have dependencies
+	  -- <command>b10-stats-httpd</command> need
+	  <command>b10-stats</command>, <command>b10-xfrout</command>
+	  needs the <command>b10-auth</command> to be running, etc.
+
+<!-- TODO: should we define dependencies? -->
+
+        </para>
+        <para>
+          In short, you should think twice before disabling something here.
+        </para>
+      </note>
+
+      <para>
+	Now, to the mysterious setuid virtual component. If you
+	use the <command>-u</command> option to start the
+	<command>bind10</command> as root, but change the user
+	later, we need to start the <command>b10-auth</command> or
+	<command>b10-resolver</command> as root (until the socket
+	creator is finished).<!-- TODO --> So we need to specify
+	the time when the switch from root do the given user happens
+	and that's what the setuid component is for. The switch is
+	done at the time the setuid component would be started, if
+	it was a process. The default configuration contains the
+	setuid component with priority 5, <command>b10-auth</command>
+	has 10 to be started before the switch and everything else
+	is without priority, so it is started after the switch.
+      </para>
+
+    </section>
 
   </chapter>
 
@@ -1344,6 +1502,49 @@ what if a NOTIFY is sent?
 
 -->
 
+    <section id="zonemgr">
+      <title>Secondary Manager</title>
+
+      <para>
+        The <command>b10-zonemgr</command> process is started by
+        <command>bind10</command>.
+        It keeps track of SOA refresh, retry, and expire timers
+        and other details for BIND 10 to perform as a slave.
+        When the <command>b10-auth</command> authoritative DNS server
+        receives a NOTIFY message, <command>b10-zonemgr</command>
+        may tell <command>b10-xfrin</command> to do a refresh
+        to start an inbound zone transfer.
+        The secondary manager resets its counters when a new zone is
+        transferred in.
+      </para>
+
+      <note><simpara>
+        Access control (such as allowing notifies) is not yet provided.
+        The primary/secondary service is not yet complete.
+      </simpara></note>
+
+      <para>
+        The following example shows using <command>bindctl</command>
+        to configure the server to be a secondary for the example zone:
+
+      <screen>&gt; <userinput>config add Zonemgr/secondary_zones</userinput>
+&gt; <userinput>config set Zonemgr/secondary_zones[0]/name "<option>example.com</option>"</userinput>
+&gt; <userinput>config set Zonemgr/secondary_zones[0]/class "<option>IN</option>"</userinput>
+&gt; <userinput>config commit</userinput></screen>
+
+<!-- TODO: remove the IN class example above when it is the default -->
+
+      </para>
+
+      <para>
+        If the zone does not exist in the data source already
+        (i.e. no SOA record for it), <command>b10-zonemgr</command>
+        will automatically tell <command>b10-xfrin</command>
+        to transfer the zone in.
+      </para>
+
+    </section>
+
     <section>
       <title>Trigger an Incoming Zone Transfer Manually</title>
 
@@ -1356,7 +1557,6 @@ what if a NOTIFY is sent?
       </para>
     </section>
 
-
 <!-- TODO: can that retransfer be used to identify a new zone? -->
 <!-- TODO: what if doesn't exist at that master IP? -->
 
@@ -1448,31 +1648,6 @@ what is XfroutClient xfr_client??
 
   </chapter>
 
-  <chapter id="zonemgr">
-    <title>Secondary Manager</title>
-
-    <para>
-      The <command>b10-zonemgr</command> process is started by
-      <command>bind10</command>.
-      It keeps track of SOA refresh, retry, and expire timers
-      and other details for BIND 10 to perform as a slave.
-      When the <command>b10-auth</command> authoritative DNS server
-      receives a NOTIFY message, <command>b10-zonemgr</command>
-      may tell <command>b10-xfrin</command> to do a refresh
-      to start an inbound zone transfer.
-      The secondary manager resets its counters when a new zone is
-      transferred in.
-    </para>
-
-    <note><simpara>
-     Access control (such as allowing notifies) is not yet provided.
-     The primary/secondary service is not yet complete.
-    </simpara></note>
-
-<!-- TODO: lots to describe for zonemgr -->
-
-  </chapter>
-
   <chapter id="resolverserver">
     <title>Recursive Name Server</title>
 
@@ -1494,8 +1669,13 @@ what is XfroutClient xfr_client??
       You may change this using <command>bindctl</command>, for example:
 
       <screen>
-&gt; <userinput>config set Boss/start_auth false</userinput>
-&gt; <userinput>config set Boss/start_resolver true</userinput>
+&gt; <userinput>config remove Boss/components b10-xfrout</userinput>
+&gt; <userinput>config remove Boss/components b10-xfrin</userinput>
+&gt; <userinput>config remove Boss/components b10-auth</userinput>
+&gt; <userinput>config add Boss/components b10-resolver</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+&gt; <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
 &gt; <userinput>config commit</userinput>
 </screen>
 

Fichier diff supprimé car celui-ci est trop grand
+ 388 - 94
doc/guide/bind10-messages.html


+ 458 - 91
doc/guide/bind10-messages.xml

@@ -573,19 +573,117 @@ needs a dedicated message bus.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
-<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<varlistentry id="BIND10_COMPONENT_FAILED">
+<term>BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</term>
 <listitem><para>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
-<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<varlistentry id="BIND10_COMPONENT_RESTART">
+<term>BIND10_COMPONENT_RESTART component %1 is about to restart</term>
 <listitem><para>
-This message shows whether or not the resolver should be
-started according to the configuration.
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START">
+<term>BIND10_COMPONENT_START component %1 is starting</term>
+<listitem><para>
+The named component is about to be started by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START_EXCEPTION">
+<term>BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</term>
+<listitem><para>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_STOP">
+<term>BIND10_COMPONENT_STOP component %1 is being stopped</term>
+<listitem><para>
+A component is about to be asked to stop willingly by the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_UNSATISFIED">
+<term>BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</term>
+<listitem><para>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_BUILD">
+<term>BIND10_CONFIGURATOR_BUILD building plan '%1' -&gt; '%2'</term>
+<listitem><para>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_PLAN_INTERRUPTED">
+<term>BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</term>
+<listitem><para>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RECONFIGURE">
+<term>BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</term>
+<listitem><para>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RUN">
+<term>BIND10_CONFIGURATOR_RUN running plan of %1 tasks</term>
+<listitem><para>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_START">
+<term>BIND10_CONFIGURATOR_START bind10 component configurator is starting up</term>
+<listitem><para>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_STOP">
+<term>BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</term>
+<listitem><para>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_TASK">
+<term>BIND10_CONFIGURATOR_TASK performing task %1 on %2</term>
+<listitem><para>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
 </para></listitem>
 </varlistentry>
 
@@ -632,14 +730,6 @@ running, which needs to be stopped.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
-<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
-<listitem><para>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="BIND10_MSGQ_DISAPPEARED">
 <term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
 <listitem><para>
@@ -649,24 +739,12 @@ inconsistent state of the system, and BIND 10 will now shut down.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
-<listitem><para>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<varlistentry id="BIND10_PROCESS_ENDED">
+<term>BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</term>
 <listitem><para>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
 </para></listitem>
 </varlistentry>
 
@@ -740,6 +818,13 @@ The boss module is sending a SIGTERM signal to the given process.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_SETUID">
+<term>BIND10_SETUID setting UID to %1</term>
+<listitem><para>
+The boss switches the user it runs as to the given UID.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_SHUTDOWN">
 <term>BIND10_SHUTDOWN stopping the server</term>
 <listitem><para>
@@ -774,15 +859,6 @@ looks like a programmer error.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
-<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
-<listitem><para>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="BIND10_SOCKCREATOR_EOF">
 <term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
 <listitem><para>
@@ -846,6 +922,14 @@ The boss forwards a request for a socket to the socket creator.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_STARTED_CC">
+<term>BIND10_STARTED_CC started configuration/command session</term>
+<listitem><para>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_STARTED_PROCESS">
 <term>BIND10_STARTED_PROCESS started %1</term>
 <listitem><para>
@@ -867,6 +951,14 @@ Informational message on startup that shows the full version.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_STARTING_CC">
+<term>BIND10_STARTING_CC starting configuration/command session</term>
+<listitem><para>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_STARTING_PROCESS">
 <term>BIND10_STARTING_PROCESS starting process %1</term>
 <listitem><para>
@@ -905,10 +997,41 @@ shown, and BIND10 will now shut down.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_START_AS_NON_ROOT">
-<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<varlistentry id="BIND10_STARTUP_UNEXPECTED_MESSAGE">
+<term>BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</term>
 <listitem><para>
-The given module is being started or restarted without root privileges.
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNRECOGNISED_MESSAGE">
+<term>BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_AUTH">
+<term>BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</term>
+<listitem><para>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_RESOLVER">
+<term>BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</term>
+<listitem><para>
+The resolver is being started or restarted without root privileges.
 If the module needs these privileges, it may have problems starting.
 Note that this issue should be resolved by the pending 'socket-creator'
 process; once that has been implemented, modules should not need root
@@ -932,6 +1055,20 @@ action will be taken by the boss process.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_WAIT_CFGMGR">
+<term>BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</term>
+<listitem><para>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="CACHE_ENTRY_MISSING_RRSET">
 <term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
 <listitem><para>
@@ -1535,6 +1672,13 @@ certificate file could not be read.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="CMDCTL_STARTED">
+<term>CMDCTL_STARTED cmdctl is listening for connections on %1:%2</term>
+<listitem><para>
+The cmdctl daemon has started and is now listening for connections.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
 <term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
 <listitem><para>
@@ -1909,6 +2053,50 @@ database). The data in database should be checked and fixed.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_END">
+<term>DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_NEXT">
+<term>DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</term>
+<listitem><para>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_START">
+<term>DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADR_BADDATA">
+<term>DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</term>
+<listitem><para>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
 <term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
 <listitem><para>
@@ -2890,6 +3078,20 @@ together, the later one get's overwritten to the earlier one in the sequence.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="LIBXFRIN_NO_JOURNAL">
+<term>LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</term>
+<listitem><para>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
 <term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
 <listitem><para>
@@ -3126,6 +3328,26 @@ to the named output file.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="NOTIFY_OUT_DATASRC_ACCESS_FAILURE">
+<term>NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</term>
+<listitem><para>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND">
+<term>NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</term>
+<listitem><para>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
 <term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
 <listitem><para>
@@ -3237,6 +3459,23 @@ is reached.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="NOTIFY_OUT_ZONE_BAD_SOA">
+<term>NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_ZONE_NO_NS">
+<term>NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="NSAS_FIND_NS_ADDRESS">
 <term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
 <listitem><para>
@@ -4144,6 +4383,16 @@ be ignored.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="STATHTTPD_SERVER_DATAERROR">
+<term>STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="STATHTTPD_SERVER_ERROR">
 <term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
 <listitem><para>
@@ -4518,6 +4767,25 @@ in database connection.  The error is shown in the log message.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFRIN_XFR_PROCESS_FAILURE">
+<term>XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</term>
+<listitem><para>
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
 <term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
 <listitem><para>
@@ -4526,6 +4794,16 @@ The error is shown in the log message.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFRIN_XFR_TRANSFER_FALLBACK">
+<term>XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</term>
+<listitem><para>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
 <term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
 <listitem><para>
@@ -4541,44 +4819,6 @@ The XFR transfer of the given zone was successfully completed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
-<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
-<listitem><para>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
-<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
-<listitem><para>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
-<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
-<listitem><para>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
-<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
-<listitem><para>
-A transfer out of the given zone has started.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
 <term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
 <listitem><para>
@@ -4641,6 +4881,69 @@ system and your specific installation.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFROUT_IXFR_MULTIPLE_SOA">
+<term>XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</term>
+<listitem><para>
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_JOURNAL_SUPPORT">
+<term>XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_SOA">
+<term>XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</term>
+<listitem><para>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_VERSION">
+<term>XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_ZONE">
+<term>XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</term>
+<listitem><para>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_UPTODATE">
+<term>XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</term>
+<listitem><para>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
 <term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
 <listitem><para>
@@ -4699,21 +5002,36 @@ in the code.
 </varlistentry>
 
 <varlistentry id="XFROUT_QUERY_DROPPED">
-<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<term>XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_QUOTA_EXCCEEDED">
+<term>XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</term>
 <listitem><para>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="XFROUT_QUERY_REJECTED">
-<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<term>XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</term>
 <listitem><para>
 The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
 </para></listitem>
 </varlistentry>
 
@@ -4792,6 +5110,55 @@ printing this message) will not start.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFROUT_XFR_TRANSFER_CHECK_ERROR">
+<term>XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</term>
+<listitem><para>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_DONE">
+<term>XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_ERROR">
+<term>XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_FAILED">
+<term>XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_STARTED">
+<term>XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="ZONEMGR_CCSESSION_ERROR">
 <term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
 <listitem><para>

+ 2 - 2
src/bin/Makefile.am

@@ -1,4 +1,4 @@
-SUBDIRS = bind10 bindctl cfgmgr loadzone msgq host cmdctl auth xfrin xfrout \
-	usermgr zonemgr stats tests resolver sockcreator dhcp6 dhcp4
+SUBDIRS = bind10 bindctl cfgmgr ddns loadzone msgq host cmdctl auth xfrin \
+	xfrout usermgr zonemgr stats tests resolver sockcreator dhcp4 dhcp6
 
 check-recursive: all-recursive

+ 1 - 0
src/bin/auth/Makefile.am

@@ -71,6 +71,7 @@ b10_auth_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_auth_LDADD += $(top_builddir)/src/lib/log/liblog.la
 b10_auth_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 b10_auth_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+b10_auth_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
 b10_auth_LDADD += $(SQLITE_LIBS)
 
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir

+ 7 - 7
src/bin/auth/auth_config.cc

@@ -35,7 +35,6 @@
 #include <server_common/portconfig.h>
 
 using namespace std;
-using boost::shared_ptr;
 using namespace isc::dns;
 using namespace isc::data;
 using namespace isc::datasrc;
@@ -56,7 +55,7 @@ public:
     virtual void commit();
 private:
     AuthSrv& server_;
-    vector<shared_ptr<AuthConfigParser> > datasources_;
+    vector<boost::shared_ptr<AuthConfigParser> > datasources_;
     set<string> configured_sources_;
 };
 
@@ -86,8 +85,8 @@ DatasourcesConfig::build(ConstElementPtr config_value) {
                       datasrc_type->stringValue() << "' already configured");
         }
         
-        shared_ptr<AuthConfigParser> datasrc_config =
-            shared_ptr<AuthConfigParser>(
+        boost::shared_ptr<AuthConfigParser> datasrc_config =
+            boost::shared_ptr<AuthConfigParser>(
                 createAuthConfigParser(server_, string("datasources/") +
                                        datasrc_type->stringValue(),
                                        true));
@@ -109,7 +108,8 @@ DatasourcesConfig::commit() {
     // Currently memory data source for class IN is the only possibility.
     server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr());
 
-    BOOST_FOREACH(shared_ptr<AuthConfigParser> datasrc_config, datasources_) {
+    BOOST_FOREACH(boost::shared_ptr<AuthConfigParser> datasrc_config,
+                  datasources_) {
         datasrc_config->commit();
     }
 }
@@ -163,7 +163,7 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
             isc_throw(AuthConfigError, "Missing zone file for zone: "
                       << origin->str());
         }
-        shared_ptr<InMemoryZoneFinder> zone_finder(new
+        boost::shared_ptr<InMemoryZoneFinder> zone_finder(new
                                                    InMemoryZoneFinder(rrclass_,
             Name(origin->stringValue())));
         const result::Result result = memory_client_->addZone(zone_finder);
@@ -327,7 +327,7 @@ configureAuthServer(AuthSrv& server, ConstElementPtr config_set) {
                   "Null pointer is passed to configuration parser");
     }
 
-    typedef shared_ptr<AuthConfigParser> ParserPtr;
+    typedef boost::shared_ptr<AuthConfigParser> ParserPtr;
     vector<ParserPtr> parsers;
     typedef pair<string, ConstElementPtr> ConfigPair;
     try {

+ 15 - 14
src/bin/auth/auth_srv.cc

@@ -76,7 +76,6 @@ using namespace isc::xfr;
 using namespace isc::asiolink;
 using namespace isc::asiodns;
 using namespace isc::server_common::portconfig;
-using boost::shared_ptr;
 
 class AuthSrvImpl {
 private:
@@ -124,7 +123,7 @@ public:
     AddressList listen_addresses_;
 
     /// The TSIG keyring
-    const shared_ptr<TSIGKeyRing>* keyring_;
+    const boost::shared_ptr<TSIGKeyRing>* keyring_;
 
     /// Bind the ModuleSpec object in config_session_ with
     /// isc:config::ModuleSpec::validateStatistics.
@@ -219,20 +218,22 @@ class ConfigChecker : public SimpleCallback {
 public:
     ConfigChecker(AuthSrv* srv) : server_(srv) {}
     virtual void operator()(const IOMessage&) const {
-        if (server_->getConfigSession()->hasQueuedMsgs()) {
-            server_->getConfigSession()->checkCommand();
+        ModuleCCSession* cfg_session = server_->getConfigSession();
+        if (cfg_session != NULL && cfg_session->hasQueuedMsgs()) {
+            cfg_session->checkCommand();
         }
     }
 private:
     AuthSrv* server_;
 };
 
-AuthSrv::AuthSrv(const bool use_cache, AbstractXfroutClient& xfrout_client) :
-    impl_(new AuthSrvImpl(use_cache, xfrout_client)),
-    checkin_(new ConfigChecker(this)),
-    dns_lookup_(new MessageLookup(this)),
-    dns_answer_(new MessageAnswer(this))
-{}
+AuthSrv::AuthSrv(const bool use_cache, AbstractXfroutClient& xfrout_client)
+{
+    impl_ = new AuthSrvImpl(use_cache, xfrout_client);
+    checkin_ = new ConfigChecker(this);
+    dns_lookup_ = new MessageLookup(this);
+    dns_answer_ = new MessageAnswer(this);
+}
 
 void
 AuthSrv::stop() {
@@ -670,9 +671,9 @@ void
 AuthSrvImpl::incCounter(const int protocol) {
     // Increment query counter.
     if (protocol == IPPROTO_UDP) {
-        counters_.inc(AuthCounters::COUNTER_UDP_QUERY);
+        counters_.inc(AuthCounters::SERVER_UDP_QUERY);
     } else if (protocol == IPPROTO_TCP) {
-        counters_.inc(AuthCounters::COUNTER_TCP_QUERY);
+        counters_.inc(AuthCounters::SERVER_TCP_QUERY);
     } else {
         // unknown protocol
         isc_throw(Unexpected, "Unknown protocol: " << protocol);
@@ -765,7 +766,7 @@ bool AuthSrv::submitStatistics() const {
 }
 
 uint64_t
-AuthSrv::getCounter(const AuthCounters::CounterType type) const {
+AuthSrv::getCounter(const AuthCounters::ServerCounterType type) const {
     return (impl_->counters_.getCounter(type));
 }
 
@@ -785,6 +786,6 @@ AuthSrv::setDNSService(isc::asiodns::DNSService& dnss) {
 }
 
 void
-AuthSrv::setTSIGKeyRing(const shared_ptr<TSIGKeyRing>* keyring) {
+AuthSrv::setTSIGKeyRing(const boost::shared_ptr<TSIGKeyRing>* keyring) {
     impl_->keyring_ = keyring;
 }

+ 1 - 1
src/bin/auth/auth_srv.h

@@ -343,7 +343,7 @@ public:
     /// \param type Type of a counter to get the value of
     ///
     /// \return the value of the counter.
-    uint64_t getCounter(const AuthCounters::CounterType type) const;
+    uint64_t getCounter(const AuthCounters::ServerCounterType type) const;
 
     /**
      * \brief Set and get the addresses we listen on.

+ 2 - 1
src/bin/auth/benchmarks/Makefile.am

@@ -32,8 +32,9 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
 query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
+query_bench_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
 query_bench_LDADD += $(SQLITE_LIBS)
 

+ 2 - 3
src/bin/auth/command.cc

@@ -32,7 +32,6 @@
 #include <auth/command.h>
 
 using boost::scoped_ptr;
-using boost::shared_ptr;
 using namespace isc::auth;
 using namespace isc::config;
 using namespace isc::data;
@@ -136,7 +135,7 @@ public:
         // that doesn't block other server operations.
         // TODO: we may (should?) want to check the "last load time" and
         // the timestamp of the file and skip loading if the file isn't newer.
-        shared_ptr<InMemoryZoneFinder> zone_finder(
+        boost::shared_ptr<InMemoryZoneFinder> zone_finder(
             new InMemoryZoneFinder(old_zone_finder->getClass(),
                                    old_zone_finder->getOrigin()));
         zone_finder->load(old_zone_finder->getFileName());
@@ -147,7 +146,7 @@ public:
 
 private:
     // zone finder to be updated with the new file.
-    shared_ptr<InMemoryZoneFinder> old_zone_finder;
+    boost::shared_ptr<InMemoryZoneFinder> old_zone_finder;
 
     // A helper private method to parse and validate command parameters.
     // On success, it sets 'old_zone_finder' to the zone to be updated.

+ 33 - 3
src/bin/auth/query.cc

@@ -117,7 +117,6 @@ void
 Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (nsec->getRdataCount() == 0) {
         isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
-        return;
     }
 
     // Add the NSEC proving NXDOMAIN to the authority section.
@@ -152,7 +151,6 @@ Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
-        return;
     }
 
     // Add the (no-) wildcard proof only when it's different from the NSEC
@@ -178,7 +176,6 @@ Query::addWildcardProof(ZoneFinder& finder) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard proof");
-        return;
     }
     response_.addRRset(Message::SECTION_AUTHORITY,
                        boost::const_pointer_cast<RRset>(fresult.rrset),
@@ -186,6 +183,33 @@ Query::addWildcardProof(ZoneFinder& finder) {
 }
 
 void
+Query::addWildcardNXRRSETProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+    // There should be one NSEC RR which was found in the zone to prove
+    // that there is not matched <QNAME,QTYPE> via wildcard expansion.
+    if (nsec->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "NSEC for WILDCARD_NXRRSET is empty");
+    }
+    // Add this NSEC RR to authority section.
+    response_.addRRset(Message::SECTION_AUTHORITY,
+                      boost::const_pointer_cast<RRset>(nsec), dnssec_);
+    
+    const ZoneFinder::FindResult fresult =
+        finder.find(qname_, RRType::NSEC(), NULL,
+                    dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+    if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+        fresult.rrset->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "Unexpected result for no match QNAME proof");
+    }
+   
+    if (nsec->getName() != fresult.rrset->getName()) {
+        // one NSEC RR proves wildcard_nxrrset that no matched QNAME.
+        response_.addRRset(Message::SECTION_AUTHORITY,
+                           boost::const_pointer_cast<RRset>(fresult.rrset),
+                           dnssec_);
+    }
+}
+    
+void
 Query::addAuthAdditional(ZoneFinder& finder) {
     // Fill in authority and addtional sections.
     ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
@@ -355,6 +379,12 @@ Query::process() {
                                        dnssec_);
                 }
                 break;
+            case ZoneFinder::WILDCARD_NXRRSET:
+                addSOA(*result.zone_finder);
+                if (dnssec_ && db_result.rrset) {
+                    addWildcardNXRRSETProof(zfinder, db_result.rrset);
+                }
+                break;
             default:
                 // This is basically a bug of the data source implementation,
                 // but could also happen in the middle of development where

+ 12 - 0
src/bin/auth/query.h

@@ -82,6 +82,18 @@ private:
     /// This corresponds to Section 3.1.3.3 of RFC 4035.
     void addWildcardProof(isc::datasrc::ZoneFinder& finder);
 
+    /// \brief Adds one NSEC RR proved no matched QNAME,one NSEC RR proved no
+    /// matched <QNAME,QTYPE> through wildcard extension.
+    ///
+    /// Add NSEC RRs that prove an WILDCARD_NXRRSET result.
+    /// This corresponds to Section 3.1.3.4 of RFC 4035.
+    /// \param finder The ZoneFinder through which the authority data for the
+    /// query is to be found.
+    /// \param nsec The RRset (NSEC RR) which proved that there is no matched 
+    /// <QNAME,QTTYPE>.
+    void addWildcardNXRRSETProof(isc::datasrc::ZoneFinder& finder,
+                                 isc::dns::ConstRRsetPtr nsec);
+    
     /// \brief Look up additional data (i.e., address records for the names
     /// included in NS or MX records) and add them to the additional section.
     ///

+ 39 - 22
src/bin/auth/statistics.cc

@@ -18,48 +18,67 @@
 #include <cc/data.h>
 #include <cc/session.h>
 
+#include <statistics/counter.h>
+#include <statistics/counter_dict.h>
+
 #include <sstream>
 #include <iostream>
 
+#include <boost/noncopyable.hpp>
+
 using namespace isc::auth;
+using namespace isc::statistics;
 
 // TODO: We need a namespace ("auth_server"?) to hold
 // AuthSrv and AuthCounters.
 
-class AuthCountersImpl {
-private:
-    // prohibit copy
-    AuthCountersImpl(const AuthCountersImpl& source);
-    AuthCountersImpl& operator=(const AuthCountersImpl& source);
+// TODO: Make use of wrappers like isc::dns::Opcode
+// for counter item type.
+
+class AuthCountersImpl : boost::noncopyable {
 public:
     AuthCountersImpl();
     ~AuthCountersImpl();
-    void inc(const AuthCounters::CounterType type);
+    void inc(const AuthCounters::ServerCounterType type);
+    void inc(const std::string& zone,
+             const AuthCounters::PerZoneCounterType type);
     bool submitStatistics() const;
     void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
     void registerStatisticsValidator
     (AuthCounters::validator_type validator);
     // Currently for testing purpose only
-    uint64_t getCounter(const AuthCounters::CounterType type) const;
+    uint64_t getCounter(const AuthCounters::ServerCounterType type) const;
 private:
-    std::vector<uint64_t> counters_;
+    Counter server_counter_;
+    CounterDictionary per_zone_counter_;
     isc::cc::AbstractSession* statistics_session_;
     AuthCounters::validator_type validator_;
 };
 
 AuthCountersImpl::AuthCountersImpl() :
     // initialize counter
-    // size: AuthCounters::COUNTER_TYPES, initial value: 0
-    counters_(AuthCounters::COUNTER_TYPES, 0),
+    // size of server_counter_: AuthCounters::SERVER_COUNTER_TYPES
+    // size of per_zone_counter_: AuthCounters::PER_ZONE_COUNTER_TYPES
+    server_counter_(AuthCounters::SERVER_COUNTER_TYPES),
+    per_zone_counter_(AuthCounters::PER_ZONE_COUNTER_TYPES),
     statistics_session_(NULL)
-{}
+{
+    per_zone_counter_.addElement("_SERVER_");
+}
 
 AuthCountersImpl::~AuthCountersImpl()
 {}
 
 void
-AuthCountersImpl::inc(const AuthCounters::CounterType type) {
-    ++counters_.at(type);
+AuthCountersImpl::inc(const AuthCounters::ServerCounterType type) {
+    server_counter_.inc(type);
+}
+
+void
+AuthCountersImpl::inc(const std::string& zone,
+                      const AuthCounters::PerZoneCounterType type)
+{
+    per_zone_counter_[zone].inc(type);
 }
 
 bool
@@ -73,9 +92,9 @@ AuthCountersImpl::submitStatistics() const {
                       <<   "{ \"owner\": \"Auth\","
                       <<   "  \"data\":"
                       <<     "{ \"queries.udp\": "
-                      <<     counters_.at(AuthCounters::COUNTER_UDP_QUERY)
+                      <<     server_counter_.get(AuthCounters::SERVER_UDP_QUERY)
                       <<     ", \"queries.tcp\": "
-                      <<     counters_.at(AuthCounters::COUNTER_TCP_QUERY)
+                      <<     server_counter_.get(AuthCounters::SERVER_TCP_QUERY)
                       <<   " }"
                       <<   "}"
                       << "]}";
@@ -126,19 +145,17 @@ AuthCountersImpl::registerStatisticsValidator
 
 // Currently for testing purpose only
 uint64_t
-AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
-    return (counters_.at(type));
+AuthCountersImpl::getCounter(const AuthCounters::ServerCounterType type) const {
+    return (server_counter_.get(type));
 }
 
 AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
 {}
 
-AuthCounters::~AuthCounters() {
-    delete impl_;
-}
+AuthCounters::~AuthCounters() {}
 
 void
-AuthCounters::inc(const AuthCounters::CounterType type) {
+AuthCounters::inc(const AuthCounters::ServerCounterType type) {
     impl_->inc(type);
 }
 
@@ -155,7 +172,7 @@ AuthCounters::setStatisticsSession
 }
 
 uint64_t
-AuthCounters::getCounter(const AuthCounters::CounterType type) const {
+AuthCounters::getCounter(const AuthCounters::ServerCounterType type) const {
     return (impl_->getCounter(type));
 }
 

+ 14 - 8
src/bin/auth/statistics.h

@@ -17,6 +17,7 @@
 
 #include <cc/session.h>
 #include <stdint.h>
+#include <boost/scoped_ptr.hpp>
 
 class AuthCountersImpl;
 
@@ -51,13 +52,18 @@ class AuthCountersImpl;
 /// \todo Consider overhead of \c AuthCounters::inc()
 class AuthCounters {
 private:
-    AuthCountersImpl* impl_;
+    boost::scoped_ptr<AuthCountersImpl> impl_;
 public:
     // Enum for the type of counter
-    enum CounterType {
-        COUNTER_UDP_QUERY = 0,  ///< COUNTER_UDP_QUERY: counter for UDP queries
-        COUNTER_TCP_QUERY = 1,  ///< COUNTER_TCP_QUERY: counter for TCP queries
-        COUNTER_TYPES = 2 ///< The number of defined counters
+    enum ServerCounterType {
+        SERVER_UDP_QUERY,       ///< SERVER_UDP_QUERY: counter for UDP queries
+        SERVER_TCP_QUERY,       ///< SERVER_TCP_QUERY: counter for TCP queries
+        SERVER_COUNTER_TYPES    ///< The number of defined counters
+    };
+    enum PerZoneCounterType {
+        ZONE_UDP_QUERY,         ///< ZONE_UDP_QUERY: counter for UDP queries
+        ZONE_TCP_QUERY,         ///< ZONE_TCP_QUERY: counter for TCP queries
+        PER_ZONE_COUNTER_TYPES  ///< The number of defined counters
     };
     /// The constructor.
     ///
@@ -77,9 +83,9 @@ public:
     ///
     /// \throw std::out_of_range \a type is unknown.
     ///
-    /// usage: counter.inc(CounterType::COUNTER_UDP_QUERY);
+    /// usage: counter.inc(AuthCounters::SERVER_UDP_QUERY);
     /// 
-    void inc(const CounterType type);
+    void inc(const ServerCounterType type);
 
     /// \brief Submit statistics counters to statistics module.
     ///
@@ -130,7 +136,7 @@ public:
     ///
     /// \return the value of the counter specified by \a type.
     ///
-    uint64_t getCounter(const AuthCounters::CounterType type) const;
+    uint64_t getCounter(const AuthCounters::ServerCounterType type) const;
 
     /// \brief A type of validation function for the specification in
     /// isc::config::ModuleSpec.

+ 1 - 0
src/bin/auth/tests/Makefile.am

@@ -65,6 +65,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
 endif
 
 noinst_PROGRAMS = $(TESTS)

+ 12 - 13
src/bin/auth/tests/auth_srv_unittest.cc

@@ -54,7 +54,6 @@ using namespace isc::asiolink;
 using namespace isc::testutils;
 using namespace isc::server_common::portconfig;
 using isc::UnitTestUtil;
-using boost::shared_ptr;
 
 namespace {
 const char* const CONFIG_TESTDB =
@@ -251,7 +250,7 @@ TEST_F(AuthSrvTest, TSIGSigned) {
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Run the message through the server
-    shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+    boost::shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
     keyring->add(key);
     server.setTSIGKeyRing(&keyring);
     server.processMessage(*io_message, parse_message, response_obuffer,
@@ -285,7 +284,7 @@ TEST_F(AuthSrvTest, TSIGSignedBadKey) {
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
-    shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+    boost::shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
     server.setTSIGKeyRing(&keyring);
     server.processMessage(*io_message, parse_message, response_obuffer,
                           &dnsserv);
@@ -317,7 +316,7 @@ TEST_F(AuthSrvTest, TSIGBadSig) {
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
-    shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+    boost::shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
     keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
     server.setTSIGKeyRing(&keyring);
     server.processMessage(*io_message, parse_message, response_obuffer,
@@ -353,7 +352,7 @@ TEST_F(AuthSrvTest, TSIGCheckFirst) {
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
-    shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+    boost::shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
     keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
     server.setTSIGKeyRing(&keyring);
     server.processMessage(*io_message, parse_message, response_obuffer,
@@ -779,7 +778,7 @@ TEST_F(AuthSrvTest, cacheSlots) {
 // Submit UDP normal query and check query counter
 TEST_F(AuthSrvTest, queryCounterUDPNormal) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+    EXPECT_EQ(0, server.getCounter(AuthCounters::SERVER_UDP_QUERY));
     // Create UDP message and process.
     UnitTestUtil::createRequestMessage(request_message, Opcode::QUERY(),
                                        default_qid, Name("example.com"),
@@ -788,13 +787,13 @@ TEST_F(AuthSrvTest, queryCounterUDPNormal) {
     server.processMessage(*io_message, parse_message, response_obuffer,
                           &dnsserv);
     // After processing UDP query, the counter should be 1.
-    EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+    EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_UDP_QUERY));
 }
 
 // Submit TCP normal query and check query counter
 TEST_F(AuthSrvTest, queryCounterTCPNormal) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
     // Create TCP message and process.
     UnitTestUtil::createRequestMessage(request_message, Opcode::QUERY(),
                                        default_qid, Name("example.com"),
@@ -803,13 +802,13 @@ TEST_F(AuthSrvTest, queryCounterTCPNormal) {
     server.processMessage(*io_message, parse_message, response_obuffer,
                           &dnsserv);
     // After processing TCP query, the counter should be 1.
-    EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
 }
 
 // Submit TCP AXFR query and check query counter
 TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
                          Name("example.com"), RRClass::IN(), RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
@@ -818,13 +817,13 @@ TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_FALSE(dnsserv.hasAnswer());
     // After processing TCP AXFR query, the counter should be 1.
-    EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
 }
 
 // Submit TCP IXFR query and check query counter
 TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
                          Name("example.com"), RRClass::IN(), RRType::IXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
@@ -833,7 +832,7 @@ TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_FALSE(dnsserv.hasAnswer());
     // After processing TCP IXFR query, the counter should be 1.
-    EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
 }
 
 // class for queryCounterUnexpected test

+ 115 - 11
src/bin/auth/tests/query_unittest.cc

@@ -100,6 +100,22 @@ const char* const cnamewild_txt =
     "*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
 const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
     "3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
+// Wildcard_nxrrset
+const char* const wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN A 192.0.2.9\n";
+const char* const nsec_wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN NSEC www.uwild.example.com. A NSEC RRSIG\n";
+const char* const wild_txt_next =
+    "www.uwild.example.com. 3600 IN A 192.0.2.11\n";
+const char* const nsec_wild_txt_next =
+    "www.uwild.example.com. 3600 IN NSEC *.wild.example.com. A NSEC RRSIG\n";
+// Wildcard empty
+const char* const empty_txt = "b.*.t.example.com. 3600 IN A 192.0.2.13\n";
+const char* const nsec_empty_txt =
+    "b.*.t.example.com. 3600 IN NSEC *.uwild.example.com. A NSEC RRSIG\n";
+const char* const empty_prev_txt = "t.example.com. 3600 IN A 192.0.2.15\n";
+const char* const nsec_empty_prev_txt =
+    "t.example.com. 3600 IN NSEC b.*.t.example.com. A NSEC RRSIG\n";
 // Used in NXDOMAIN proof test.  We are going to test some unusual case where
 // the best possible wildcard is below the "next domain" of the NSEC RR that
 // proves the NXDOMAIN, i.e.,
@@ -116,7 +132,6 @@ const char* const nsec_mx_txt =
     "mx.example.com. 3600 IN NSEC ).no.example.com. MX NSEC RRSIG\n";
 const char* const nsec_no_txt =
     ").no.example.com. 3600 IN NSEC nz.no.example.com. AAAA NSEC RRSIG\n";
-
 // We'll also test the case where a single NSEC proves both NXDOMAIN and the
 // non existence of wildcard.  The following records will be used for that
 // test.
@@ -179,7 +194,10 @@ public:
             other_zone_rrs << no_txt << nz_txt <<
             nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
             nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
-            wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt;
+            wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt <<
+            wild_txt_nxrrset << nsec_wild_txt_nxrrset << wild_txt_next <<
+            nsec_wild_txt_next << empty_txt << nsec_empty_txt <<
+            empty_prev_txt << nsec_empty_prev_txt;
 
         masterLoad(zone_stream, origin_, rrclass_,
                    boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -396,15 +414,47 @@ MockZoneFinder::find(const Name& name, const RRType& type,
     // hardcoded specific cases, ignoring other details such as canceling
     // due to the existence of closer name.
     if ((options & NO_WILDCARD) == 0) {
-        const Name wild_suffix("wild.example.com");
-        if (name.compare(wild_suffix).getRelation() ==
-            NameComparisonResult::SUBDOMAIN) {
-            domain = domains_.find(Name("*").concatenate(wild_suffix));
-            assert(domain != domains_.end());
-            RRsetStore::const_iterator found_rrset = domain->second.find(type);
-            assert(found_rrset != domain->second.end());
-            return (FindResult(WILDCARD,
-                               substituteWild(*found_rrset->second, name)));
+        const Name wild_suffix(name.split(1));
+        // Unit Tests use those domains for Wildcard test.
+        if (name.equals(Name("www.wild.example.com"))||
+           name.equals(Name("www1.uwild.example.com"))||
+           name.equals(Name("a.t.example.com"))) {
+            if (name.compare(wild_suffix).getRelation() ==
+                NameComparisonResult::SUBDOMAIN) {
+                domain = domains_.find(Name("*").concatenate(wild_suffix));
+                // Matched the QNAME
+                if (domain != domains_.end()) {
+                   RRsetStore::const_iterator found_rrset =
+                       domain->second.find(type);
+                   // Matched the QTYPE
+                   if(found_rrset != domain->second.end()) {
+                    return (FindResult(WILDCARD,
+                            substituteWild(*found_rrset->second, name)));
+                   } else {
+                   // No matched QTYPE, this case is for WILDCARD_NXRRSET
+                     found_rrset = domain->second.find(RRType::NSEC());
+                     assert(found_rrset != domain->second.end());
+                     Name newName = Name("*").concatenate(wild_suffix);
+                     return (FindResult(WILDCARD_NXRRSET,
+                           substituteWild(*found_rrset->second,newName)));
+                   }
+                 } else {
+                    // This is empty non terminal name case on wildcard.
+                    Name emptyName = Name("*").concatenate(wild_suffix);
+                    for (Domains::reverse_iterator it = domains_.rbegin();
+                        it != domains_.rend();
+                        ++it) {
+                            RRsetStore::const_iterator nsec_it;
+                            if ((*it).first < emptyName &&
+                            (nsec_it = (*it).second.find(RRType::NSEC()))
+                            != (*it).second.end()) {
+                                return (FindResult(WILDCARD_NXRRSET,
+                                                   (*nsec_it).second));
+                            }
+                        }
+                }
+                return (FindResult(WILDCARD_NXRRSET,RRsetPtr()));
+             }
         }
         const Name cnamewild_suffix("cnamewild.example.com");
         if (name.compare(cnamewild_suffix).getRelation() ==
@@ -924,6 +974,60 @@ TEST_F(QueryTest, badWildcardProof3) {
                  Query::BadNSEC);
 }
 
+TEST_F(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we only need one NSEC,
+    // which proves both NXDOMAIN and the non existence RRSETs of wildcard.
+    Query(memory_client, Name("www.wild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt) +
+                   string("*.wild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence RRSETs of wildcard.
+    Query(memory_client, Name("www1.uwild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt_nxrrset) +
+                   string("*.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_wild_txt_next) +
+                   string("www.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC") + "\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardEmptyWithNSEC) {
+    // WILDCARD_EMPTY with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXDOMAIN and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence wildcard.
+    Query(memory_client, Name("a.t.example.com"), RRType::A(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_empty_prev_txt) +
+                   string("t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_empty_txt) +
+                   string("b.*.t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
 /*
  * This tests that when there's no SOA and we need a negative answer. It should
  * throw in that case.

+ 19 - 20
src/bin/auth/tests/statistics_unittest.cc

@@ -150,25 +150,24 @@ AuthCountersTest::MockSession::setThrowSessionTimeout(bool flag) {
 
 TEST_F(AuthCountersTest, incrementUDPCounter) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
-    EXPECT_NO_THROW(counters.inc(AuthCounters::COUNTER_UDP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_UDP_QUERY));
+    EXPECT_NO_THROW(counters.inc(AuthCounters::SERVER_UDP_QUERY));
     // After increment, the counter should be 1.
-    EXPECT_EQ(1, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+    EXPECT_EQ(1, counters.getCounter(AuthCounters::SERVER_UDP_QUERY));
 }
 
 TEST_F(AuthCountersTest, incrementTCPCounter) {
     // The counter should be initialized to 0.
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
-    EXPECT_NO_THROW(counters.inc(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_TCP_QUERY));
+    EXPECT_NO_THROW(counters.inc(AuthCounters::SERVER_TCP_QUERY));
     // After increment, the counter should be 1.
-    EXPECT_EQ(1, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(1, counters.getCounter(AuthCounters::SERVER_TCP_QUERY));
 }
 
 TEST_F(AuthCountersTest, incrementInvalidCounter) {
-    // Expect to throw isc::InvalidParameter if the type of the counter is
-    // invalid.
-    EXPECT_THROW(counters.inc(AuthCounters::COUNTER_TYPES),
-                 std::out_of_range);
+    // Expect to throw an isc::OutOfRange
+    EXPECT_THROW(counters.inc(AuthCounters::SERVER_COUNTER_TYPES),
+                 isc::OutOfRange);
 }
 
 TEST_F(AuthCountersTest, submitStatisticsWithoutSession) {
@@ -195,14 +194,14 @@ TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
     // Validate if it submits correct data.
 
     // Counters should be initialized to 0.
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_UDP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_TCP_QUERY));
 
     // UDP query counter is set to 2.
-    counters.inc(AuthCounters::COUNTER_UDP_QUERY);
-    counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
     // TCP query counter is set to 1.
-    counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+    counters.inc(AuthCounters::SERVER_TCP_QUERY);
     counters.submitStatistics();
 
     // Destination is "Stats".
@@ -237,14 +236,14 @@ TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
     counters.registerStatisticsValidator(validator);
 
     // Counters should be initialized to 0.
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
-    EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_UDP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_TCP_QUERY));
 
     // UDP query counter is set to 2.
-    counters.inc(AuthCounters::COUNTER_UDP_QUERY);
-    counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
     // TCP query counter is set to 1.
-    counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+    counters.inc(AuthCounters::SERVER_TCP_QUERY);
 
     // checks the value returned by submitStatistics
     EXPECT_TRUE(counters.submitStatistics());

Fichier diff supprimé car celui-ci est trop grand
+ 201 - 19
src/bin/bind10/bind10.8


+ 199 - 1
src/bin/bind10/bind10.xml

@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>August 11, 2011</date>
+    <date>November 23, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -218,6 +218,204 @@ TODO: configuration section
 -->
 
   <refsect1>
+    <title>CONFIGURATION AND COMMANDS</title>
+
+    <para>
+      The configuration provides settings for components for
+      <command>bind10</command> to manage under
+      <varname>/Boss/components/</varname>.
+      The default elements are:
+    </para>
+
+    <itemizedlist>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-auth</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/setuid</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrin</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrout</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
+      </listitem>
+
+    </itemizedlist>
+
+    <para>
+      (Note that the startup of <command>b10-sockcreator</command>,
+      <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+      is not configurable. It is hardcoded and <command>bind10</command>
+      will not run without them.)
+    </para>
+
+    <para>
+      These named sets (listed above) contain the following settings:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term><varname>address</varname></term>
+        <listitem>
+	  <para>The name used for communicating to it on the message
+	  bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><varname>kind</varname></term>
+        <listitem>
+          <para>
+            This defines how required a component is.
+            The possible settings for <varname>kind</varname> are:
+            <varname>core</varname> (system won't start if it won't
+            start and <command>bind10</command> will shutdown if
+            a <quote>core</quote> component crashes),
+            <varname>dispensable</varname> (<command>bind10</command>
+            will restart failing component),
+            and
+	    <varname>needed</varname> (<command>bind10</command>
+	    will shutdown if component won't initially start, but
+	    if crashes later, it will attempt to restart).
+            This setting is required.
+<!-- TODO: formatting -->
+          </para>
+        </listitem>
+      </varlistentry>
+
+<!--
+TODO: currently not used
+      <varlistentry>
+        <term> <varname>params</varname> </term>
+        <listitem>
+          <para>
+list
+</para>
+        </listitem>
+      </varlistentry>
+-->
+
+      <varlistentry>
+        <term> <varname>priority</varname> </term>
+        <listitem>
+          <para>This is an integer. <command>bind10</command>
+            will start the components with largest priority numbers first.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>process</varname> </term>
+        <listitem>
+          <para>This is the filename of the executable to be started.
+            If not defined, then <command>bind10</command> will
+            use the component name instead.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>special</varname> </term>
+        <listitem>
+          <para>
+            This defines if the component is started a special
+            way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+setuid
+sockcreator
+xfrin
+-->
+
+</para>
+        </listitem>
+      </varlistentry>
+
+    </variablelist>
+
+<!-- TODO: formating -->
+    <para>
+      The <varname>Boss</varname> configuration commands are:
+    </para>
+<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
+
+    <para>
+      <command>getstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon.
+      This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+<!-- TODO: explain difference with sendstat -->
+    </para>
+
+    <para>
+      <command>ping</command> is used to check the connection with the
+      <command>bind10</command> daemon.
+      It returns the text <quote>pong</quote>.
+    </para>
+
+    <para>
+      <command>sendstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon immediately.
+<!-- TODO: compare with internal command getstats? -->
+    </para>
+
+    <para>
+      <command>show_processes</command> lists the current processes
+      managed by <command>bind10</command>.
+      The output is an array in JSON format containing the process
+      ID and the name for each.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+    </para>
+
+    <para>
+      <command>shutdown</command> tells <command>bind10</command>
+      to shutdown the BIND 10 servers.
+      It will tell each process it manages to shutdown and, when
+      complete, <command>bind10</command> will exit.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
     <title>STATISTICS DATA</title>
 
     <para>

+ 11 - 0
src/bin/bind10/bind10_messages.mes

@@ -99,6 +99,12 @@ The boss module is sending a kill signal to process with the given name,
 as part of the process of killing all started processes during a failed
 startup, as described for BIND10_KILLING_ALL_PROCESSES
 
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
 % BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
 There already appears to be a message bus daemon running. Either an
 old process was not shut down correctly, and needs to be killed, or
@@ -110,6 +116,11 @@ While listening on the message bus channel for messages, it suddenly
 disappeared. The msgq daemon may have died. This might lead to an
 inconsistent state of the system, and BIND 10 will now shut down.
 
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
 % BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
 This indicates a process started previously terminated. The process id
 and component owning the process are indicated, as well as the exit code.

+ 255 - 98
src/bin/bind10/bind10_src.py.in

@@ -72,6 +72,9 @@ import isc.log
 from isc.log_messages.bind10_messages import *
 import isc.bind10.component
 import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
 
 isc.log.init("b10-boss")
 logger = isc.log.Logger("boss")
@@ -81,6 +84,10 @@ logger = isc.log.Logger("boss")
 DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
 DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
 
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = "1\n"
+CREATOR_SOCKET_UNAVAILABLE = "0\n"
+
 # Assign this process some longer name
 isc.util.process.rename(sys.argv[0])
 
@@ -241,6 +248,12 @@ class BoB:
         # If -v was set, enable full debug logging.
         if self.verbose:
             logger.set_severity("DEBUG", 99)
+        # This is set in init_socket_srv
+        self._socket_path = None
+        self._socket_cache = None
+        self._tmpdir = None
+        self._srv_socket = None
+        self._unix_sockets = {}
 
     def __propagate_component_config(self, config):
         comps = dict(config)
@@ -315,6 +328,18 @@ class BoB:
             elif command == "show_processes":
                 answer = isc.config.ccsession. \
                     create_answer(0, self.get_processes())
+            elif command == "get_socket":
+                answer = self._get_socket(args)
+            elif command == "drop_socket":
+                if "token" not in args:
+                    answer = isc.config.ccsession. \
+                        create_answer(1, "Missing token parameter")
+                else:
+                    try:
+                        self._socket_cache.drop_socket(args["token"])
+                        answer = isc.config.ccsession.create_answer(0)
+                    except Exception as e:
+                        answer = isc.config.ccsession.create_answer(1, str(e))
             else:
                 answer = isc.config.ccsession.create_answer(1,
                                                             "Unknown command")
@@ -574,33 +599,6 @@ class BoB:
         # ... and start
         return self.start_process("b10-resolver", resargs, self.c_channel_env)
 
-    def __ld_path_hack(self):
-        # XXX: a quick-hack workaround.  xfrin/out will implicitly use
-        # dynamically loadable data source modules, which will be installed in
-        # $(libdir).
-        # On some OSes (including MacOS X and *BSDs) the main process (python)
-        # cannot find the modules unless they are located in a common shared
-        # object path or a path in the (DY)LD_LIBRARY_PATH.  We should seek
-        # a cleaner solution, but for a short term workaround we specify the
-        # path here, unconditionally, and without even bothering which
-        # environment variable should be used.
-        #
-        # We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
-        # do this, as the conditions that make this workaround needed are
-        # the same as for the libexec path addition
-        # TODO: Once #1292 is finished, remove this method and the special
-        # component, use it as normal component.
-        env = dict(self.c_channel_env)
-        if ADD_LIBEXEC_PATH:
-            cur_path = os.getenv('DYLD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-
-            cur_path = os.getenv('LD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-        return env
-
     def start_cmdctl(self):
         """
             Starts the command control process
@@ -613,22 +611,6 @@ class BoB:
         return self.start_process("b10-cmdctl", args, self.c_channel_env,
                                   self.cmdctl_port)
 
-    def start_xfrin(self):
-        # Set up the command arguments.
-        args = ['b10-xfrin']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrin", args, self.__ld_path_hack())
-
-    def start_xfrout(self):
-        # Set up the command arguments.
-        args = ['b10-xfrout']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrout", args, self.__ld_path_hack())
-
     def start_all_components(self):
         """
             Starts up all the components.  Any exception generated during the
@@ -700,7 +682,7 @@ class BoB:
 
         If we did not start yet, it raises an exception, which is meant
         to propagate through the component and configurator to the startup
-        routine and abort the startup imediatelly. If it is started up already,
+        routine and abort the startup immediately. If it is started up already,
         we just mark it so we terminate soon.
 
         It does set the exit code in both cases.
@@ -812,6 +794,209 @@ class BoB:
 
         return next_restart_time
 
+    def _get_socket(self, args):
+        """
+        Implementation of the get_socket CC command. It asks the cache
+        to provide the token and sends the information back.
+        """
+        try:
+            try:
+                addr = isc.net.parse.addr_parse(args['address'])
+                port = isc.net.parse.port_parse(args['port'])
+                protocol = args['protocol']
+                if protocol not in ['UDP', 'TCP']:
+                    raise ValueError("Protocol must be either UDP or TCP")
+                share_mode = args['share_mode']
+                if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+                    raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+                                     " or NO")
+                share_name = args['share_name']
+            except KeyError as ke:
+                return \
+                    isc.config.ccsession.create_answer(1,
+                                                       "Missing parameter " +
+                                                       str(ke))
+
+            # FIXME: This call contains blocking IPC. It is expected to be
+            # short, but if it turns out to be problem, we'll need to do
+            # something about it.
+            token = self._socket_cache.get_token(protocol, addr, port,
+                                                 share_mode, share_name)
+            return isc.config.ccsession.create_answer(0, {
+                'token': token,
+                'path': self._socket_path
+            })
+        except Exception as e:
+            return isc.config.ccsession.create_answer(1, str(e))
+
+    def socket_request_handler(self, token, unix_socket):
+        """
+        This function handles a token that comes over a unix_domain socket.
+        The function looks into the _socket_cache and sends the socket
+        identified by the token back over the unix_socket.
+        """
+        try:
+            fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+            # FIXME: These two calls are blocking in their nature. An OS-level
+            # buffer is likely to be large enough to hold all these data, but
+            # if it wasn't and the remote application got stuck, we would have
+            # a problem. If there appear such problems, we should do something
+            # about it.
+            unix_socket.sendall(CREATOR_SOCKET_OK)
+            libutil_io_python.send_fd(unix_socket.fileno(), fd)
+        except Exception as e:
+            logger.info(BIND10_NO_SOCKET, token, e)
+            unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+    def socket_consumer_dead(self, unix_socket):
+        """
+        This function handles when a unix_socket closes. This means all
+        sockets sent to it are to be considered closed. This function signals
+        so to the _socket_cache.
+        """
+        logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+        try:
+            self._socket_cache.drop_application(unix_socket.fileno())
+        except ValueError:
+            # This means the application holds no sockets. It's harmless, as it
+            # can happen in real life - for example, it requests a socket, but
+            # get_socket doesn't find it, so the application dies. It should be
+            # rare, though.
+            pass
+
+    def set_creator(self, creator):
+        """
+        Registeres a socket creator into the boss. The socket creator is not
+        used directly, but through a cache. The cache is created in this
+        method.
+
+        If called more than once, it raises a ValueError.
+        """
+        if self._socket_cache is not None:
+            raise ValueError("A creator was inserted previously")
+        self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+    def init_socket_srv(self):
+        """
+        Creates and listens on a unix-domain socket to be able to send out
+        the sockets.
+
+        This method should be called after switching user, or the switched
+        applications won't be able to access the socket.
+        """
+        self._srv_socket = socket.socket(socket.AF_UNIX)
+        # We create a temporary directory somewhere safe and unique, to avoid
+        # the need to find the place ourself or bother users. Also, this
+        # secures the socket on some platforms, as it creates a private
+        # directory.
+        self._tmpdir = tempfile.mkdtemp()
+        # Get the name
+        self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+        # And bind the socket to the name
+        self._srv_socket.bind(self._socket_path)
+        self._srv_socket.listen(5)
+
+    def remove_socket_srv(self):
+        """
+        Closes and removes the listening socket and the directory where it
+        lives, as we created both.
+
+        It does nothing if the _srv_socket is not set (eg. it was not yet
+        initialized).
+        """
+        if self._srv_socket is not None:
+            self._srv_socket.close()
+            os.remove(self._socket_path)
+            os.rmdir(self._tmpdir)
+
+    def _srv_accept(self):
+        """
+        Accept a socket from the unix domain socket server and put it to the
+        others we care about.
+        """
+        socket = self._srv_socket.accept()
+        self._unix_sockets[socket.fileno()] = (socket, b'')
+
+    def _socket_data(self, socket_fileno):
+        """
+        This is called when a socket identified by the socket_fileno needs
+        attention. We try to read data from there. If it is closed, we remove
+        it.
+        """
+        (sock, previous) = self._unix_sockets[socket_fileno]
+        while True:
+            try:
+                data = sock.recv(1, socket.MSG_DONTWAIT)
+            except socket.error as se:
+                # These two might be different on some systems
+                if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+                    # No more data now. Oh, well, just store what we have.
+                    self._unix_sockets[socket_fileno] = (sock, previous)
+                    return
+                else:
+                    data = b'' # Pretend it got closed
+            if len(data) == 0: # The socket got to it's end
+                del self._unix_sockets[socket_fileno]
+                self.socket_consumer_dead(sock)
+                sock.close()
+                return
+            else:
+                if data == b"\n":
+                    # Handle this token and clear it
+                    self.socket_request_handler(previous, sock)
+                    previous = b''
+                else:
+                    previous += data
+
+    def run(self, wakeup_fd):
+        """
+        The main loop, waiting for sockets, commands and dead processes.
+        Runs as long as the runnable is true.
+
+        The wakeup_fd descriptor is the read end of pipe where CHLD signal
+        handler writes.
+        """
+        ccs_fd = self.ccs.get_socket().fileno()
+        while self.runnable:
+            # clean up any processes that exited
+            self.reap_children()
+            next_restart = self.restart_processes()
+            if next_restart is None:
+                wait_time = None
+            else:
+                wait_time = max(next_restart - time.time(), 0)
+
+            # select() can raise EINTR when a signal arrives,
+            # even if they are resumable, so we have to catch
+            # the exception
+            try:
+                (rlist, wlist, xlist) = \
+                    select.select([wakeup_fd, ccs_fd,
+                                   self._srv_socket.fileno()] +
+                                   list(self._unix_sockets.keys()), [], [],
+                                  wait_time)
+            except select.error as err:
+                if err.args[0] == errno.EINTR:
+                    (rlist, wlist, xlist) = ([], [], [])
+                else:
+                    logger.fatal(BIND10_SELECT_ERROR, err)
+                    break
+
+            for fd in rlist + xlist:
+                if fd == ccs_fd:
+                    try:
+                        self.ccs.check_command()
+                    except isc.cc.session.ProtocolError:
+                        logger.fatal(BIND10_MSGQ_DISAPPEARED)
+                        self.runnable = False
+                        break
+                elif fd == wakeup_fd:
+                    os.read(wakeup_fd, 32)
+                elif fd == self._srv_socket.fileno():
+                    self._srv_accept()
+                elif fd in self._unix_sockets:
+                    self._socket_data(fd)
+
 # global variables, needed for signal handlers
 options = None
 boss_of_bind = None
@@ -974,60 +1159,32 @@ def main():
     # Block SIGPIPE, as we don't want it to end this process
     signal.signal(signal.SIGPIPE, signal.SIG_IGN)
 
-    # Go bob!
-    boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
-                       options.config_file, options.nocache, options.verbose,
-                       setuid, username, options.cmdctl_port,
-                       options.wait_time)
-    startup_result = boss_of_bind.startup()
-    if startup_result:
-        logger.fatal(BIND10_STARTUP_ERROR, startup_result)
-        sys.exit(1)
-    logger.info(BIND10_STARTUP_COMPLETE)
-    dump_pid(options.pid_file)
-
-    # In our main loop, we check for dead processes or messages 
-    # on the c-channel.
-    wakeup_fd = wakeup_pipe[0]
-    ccs_fd = boss_of_bind.ccs.get_socket().fileno()
-    while boss_of_bind.runnable:
-        # clean up any processes that exited
-        boss_of_bind.reap_children()
-        next_restart = boss_of_bind.restart_processes()
-        if next_restart is None:
-            wait_time = None
-        else:
-            wait_time = max(next_restart - time.time(), 0)
-
-        # select() can raise EINTR when a signal arrives, 
-        # even if they are resumable, so we have to catch
-        # the exception
-        try:
-            (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [], 
-                                                  wait_time)
-        except select.error as err:
-            if err.args[0] == errno.EINTR:
-                (rlist, wlist, xlist) = ([], [], [])
-            else:
-                logger.fatal(BIND10_SELECT_ERROR, err)
-                break
-
-        for fd in rlist + xlist:
-            if fd == ccs_fd:
-                try:
-                    boss_of_bind.ccs.check_command()
-                except isc.cc.session.ProtocolError:
-                    logger.fatal(BIND10_MSGQ_DISAPPEARED)
-                    self.runnable = False
-                    break
-            elif fd == wakeup_fd:
-                os.read(wakeup_fd, 32)
-
-    # shutdown
-    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    boss_of_bind.shutdown()
-    unlink_pid_file(options.pid_file)
-    sys.exit(0)
+    try:
+        # Go bob!
+        boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+                           options.config_file, options.nocache,
+                           options.verbose, setuid, username,
+                           options.cmdctl_port, options.wait_time)
+        startup_result = boss_of_bind.startup()
+        if startup_result:
+            logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+            sys.exit(1)
+        boss_of_bind.init_socket_srv()
+        logger.info(BIND10_STARTUP_COMPLETE)
+        dump_pid(options.pid_file)
+
+        # Let it run
+        boss_of_bind.run(wakeup_pipe[0])
+
+        # shutdown
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+        boss_of_bind.shutdown()
+    finally:
+        # Clean up the filesystem
+        unlink_pid_file(options.pid_file)
+        if boss_of_bind is not None:
+            boss_of_bind.remove_socket_srv()
+    sys.exit(boss_of_bind.exitcode)
 
 if __name__ == "__main__":
     main()

+ 2 - 2
src/bin/bind10/bob.spec

@@ -14,8 +14,8 @@
             "priority": 5,
             "kind": "dispensable"
           },
-          "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
-          "b10-xfrout": { "special": "xfrout", "kind": "dispensable" },
+          "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+          "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
           "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
           "b10-stats": { "address": "Stats", "kind": "dispensable" },
           "b10-stats-httpd": {

+ 1 - 1
src/bin/bind10/run_bind10.sh.in

@@ -20,7 +20,7 @@ export PYTHON_EXEC
 
 BIND10_PATH=@abs_top_builddir@/src/bin/bind10
 
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/ddns:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
 export PATH
 
 PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs

+ 472 - 6
src/bin/bind10/tests/bind10_test.py.in

@@ -13,7 +13,11 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
+# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the bind10_src.
 from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import bind10_src
 
 # XXX: environment tests are currently disabled, due to the preprocessor
 #      setup that we have now complicating the environment
@@ -28,6 +32,8 @@ from isc.net.addr import IPAddr
 import time
 import isc
 import isc.log
+import isc.bind10.socket_cache
+import errno
 
 from isc.testutils.parse_args import TestOptParser, OptsError
 
@@ -97,6 +103,232 @@ class TestProcessInfo(unittest.TestCase):
         self.assertTrue(type(pi.pid) is int)
         self.assertNotEqual(pi.pid, old_pid)
 
+class TestCacheCommands(unittest.TestCase):
+    """
+    Test methods of boss related to the socket cache and socket handling.
+    """
+    def setUp(self):
+        """
+        Prepare the boss for some tests.
+
+        Also prepare some variables we need.
+        """
+        self.__boss = BoB()
+        # Fake the cache here so we can pretend it is us and hijack the
+        # calls to its methods.
+        self.__boss._socket_cache = self
+        self.__boss._socket_path = '/socket/path'
+        self.__raise_exception = None
+        self.__socket_args = {
+            "port": 53,
+            "address": "::",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # What was and wasn't called.
+        self.__drop_app_called = None
+        self.__get_socket_called = None
+        self.__send_fd_called = None
+        self.__get_token_called = None
+        self.__drop_socket_called = None
+        bind10_src.libutil_io_python.send_fd = self.__send_fd
+
+    def __send_fd(self, to, socket):
+        """
+        A function to hook the send_fd in the bind10_src.
+        """
+        self.__send_fd_called = (to, socket)
+
+    class FalseSocket:
+        """
+        A socket where we can fake methods we need instead of having a real
+        socket.
+        """
+        def __init__(self):
+            self.send = ""
+        def fileno(self):
+            """
+            The file number. Used for identifying the remote application.
+            """
+            return 42
+
+        def sendall(self, data):
+            """
+            Adds data to the self.send.
+            """
+            self.send += data
+
+    def drop_application(self, application):
+        """
+        Part of pretending to be the cache. Logs the parameter to
+        self.__drop_app_called.
+
+        In the case self.__raise_exception is set, the exception there
+        is raised instead.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_app_called = application
+
+    def test_consumer_dead(self):
+        """
+        Test that it calls the drop_application method of the cache.
+        """
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+        self.assertEqual(42, self.__drop_app_called)
+
+    def test_consumer_dead_invalid(self):
+        """
+        Test that it doesn't crash in case the application is not known to
+        the cache, the boss doesn't crash, as this actually can happen in
+        practice.
+        """
+        self.__raise_exception = ValueError("This application is unknown")
+        # This doesn't crash
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+
+    def get_socket(self, token, application):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the call is logged
+        into __get_socket_called and a number is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_socket_called = (token, application)
+        return 13
+
+    def test_request_handler(self):
+        """
+        Test that a request for socket is forwarded and the socket is sent
+        back, if it returns a socket.
+        """
+        socket = self.FalseSocket()
+        # An exception from the cache
+        self.__raise_exception = ValueError("Test value error")
+        self.__boss.socket_request_handler("token", socket)
+        # It was called, but it threw, so it is not noted here
+        self.assertIsNone(self.__get_socket_called)
+        self.assertEqual("0\n", socket.send)
+        # It should not have sent any socket.
+        self.assertIsNone(self.__send_fd_called)
+        # Now prepare a valid scenario
+        self.__raise_exception = None
+        socket.send = ""
+        self.__boss.socket_request_handler("token", socket)
+        self.assertEqual("1\n", socket.send)
+        self.assertEqual((42, 13), self.__send_fd_called)
+        self.assertEqual(("token", 42), self.__get_socket_called)
+
+    def get_token(self, protocol, address, port, share_mode, share_name):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameters are
+        logged into __get_token_called and a token is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_token_called = (protocol, address, port, share_mode,
+                                   share_name)
+        return "token"
+
+    def test_get_socket_ok(self):
+        """
+        Test the successful scenario of getting a socket.
+        """
+        result = self.__boss._get_socket(self.__socket_args)
+        [code, answer] = result['result']
+        self.assertEqual(0, code)
+        self.assertEqual({
+            'token': 'token',
+            'path': '/socket/path'
+        }, answer)
+        addr = self.__get_token_called[1]
+        self.assertTrue(isinstance(addr, IPAddr))
+        self.assertEqual("::", str(addr))
+        self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+                         self.__get_token_called)
+
+    def test_get_socket_error(self):
+        """
+        Test that bad inputs are handled correctly, etc.
+        """
+        def check_code(code, args):
+            """
+            Pass the args there and check if it returns success or not.
+
+            The rest is not tested, as it is already checked in the
+            test_get_socket_ok.
+            """
+            [rcode, ranswer] = self.__boss._get_socket(args)['result']
+            self.assertEqual(code, rcode)
+            if code == 1:
+                # This should be an error message. The exact formatting
+                # is unknown, but we check it is string at least
+                self.assertTrue(isinstance(ranswer, str))
+        def mod_args(name, value):
+            """
+            Override a parameter in the args.
+            """
+            result = dict(self.__socket_args)
+            result[name] = value
+            return result
+
+        # Port too large
+        check_code(1, mod_args('port', 65536))
+        # Not numeric address
+        check_code(1, mod_args('address', 'example.org.'))
+        # Some bad values of enum-like params
+        check_code(1, mod_args('protocol', 'BAD PROTO'))
+        check_code(1, mod_args('share_mode', 'BAD SHARE'))
+        # Check missing parameters
+        for param in self.__socket_args.keys():
+            args = dict(self.__socket_args)
+            del args[param]
+            check_code(1, args)
+        # These are OK values for the enum-like parameters
+        # The ones from test_get_socket_ok are not tested here
+        check_code(0, mod_args('protocol', 'TCP'))
+        check_code(0, mod_args('share_mode', 'SAMEAPP'))
+        check_code(0, mod_args('share_mode', 'NO'))
+        # If an exception is raised from within the cache, it is converted
+        # to an error, not propagated
+        self.__raise_exception = Exception("Test exception")
+        check_code(1, self.__socket_args)
+
+    def drop_socket(self, token):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameter is stored
+        in __drop_socket_called.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_socket_called = token
+
+    def test_drop_socket(self):
+        """
+        Check the drop_socket command. It should directly call the method
+        on the cache. Exceptions should be translated to error messages.
+        """
+        # This should be OK and just propagated to the call.
+        self.assertEqual({"result": [0]},
+                         self.__boss.command_handler("drop_socket",
+                                                     {"token": "token"}))
+        self.assertEqual("token", self.__drop_socket_called)
+        self.__drop_socket_called = None
+        # Missing parameter
+        self.assertEqual({"result": [1, "Missing token parameter"]},
+                         self.__boss.command_handler("drop_socket", {}))
+        self.assertIsNone(self.__drop_socket_called)
+        # An exception is raised from within the cache
+        self.__raise_exception = ValueError("Test error")
+        self.assertEqual({"result": [1, "Test error"]},
+                         self.__boss.command_handler("drop_socket",
+                         {"token": "token"}))
+
+
 class TestBoB(unittest.TestCase):
     def test_init(self):
         bob = BoB()
@@ -109,6 +341,22 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
+        self.assertIsNone(bob._socket_cache)
+
+    def test_set_creator(self):
+        """
+        Test the call to set_creator. First time, the cache is created
+        with the passed creator. The next time, it throws an exception.
+        """
+        bob = BoB()
+        # The cache doesn't use it at start, so just create an empty class
+        class Creator: pass
+        creator = Creator()
+        bob.set_creator(creator)
+        self.assertTrue(isinstance(bob._socket_cache,
+                        isc.bind10.socket_cache.Cache))
+        self.assertEqual(creator, bob._socket_cache._creator)
+        self.assertRaises(ValueError, bob.set_creator, creator)
 
     def test_init_alternate_socket(self):
         bob = BoB("alt_socket_file")
@@ -183,6 +431,26 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.command_handler("__UNKNOWN__", None),
                          isc.config.ccsession.create_answer(1, "Unknown command"))
 
+        # Fake the get_token of cache and test the command works
+        bob._socket_path = '/socket/path'
+        class cache:
+            def get_token(self, protocol, addr, port, share_mode, share_name):
+                return str(addr) + ':' + str(port)
+        bob._socket_cache = cache()
+        args = {
+            "port": 53,
+            "address": "0.0.0.0",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # at all and this is the easiest way to check.
+        self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+                                         'path': '/socket/path'}]},
+                         bob.command_handler("get_socket", args))
+        # The drop_socket is not tested here, but in TestCacheCommands.
+        # It needs the cache mocks to be in place and they are there.
+
 # Class for testing the BoB without actually starting processes.
 # This is used for testing the start/stop components routines and
 # the BoB commands.
@@ -268,7 +536,9 @@ class MockBob(BoB):
                     'b10-stats-httpd': self.start_stats_httpd,
                     'b10-cmdctl': self.start_cmdctl,
                     'b10-dhcp6': self.start_dhcp6,
-                    'b10-dhcp4': self.start_dhcp4 }
+                    'b10-dhcp4': self.start_dhcp4,
+                    'b10-xfrin': self.start_xfrin,
+                    'b10-xfrout': self.start_xfrout }
         return procmap[name]()
 
     def start_xfrout(self):
@@ -463,8 +733,9 @@ class TestStartStopProcessesBob(unittest.TestCase):
         if start_auth:
             config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
             config['b10-xfrout'] = { 'kind': 'dispensable',
-                                     'special': 'xfrout' }
-            config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
+                                     'address': 'Xfrout' }
+            config['b10-xfrin'] = { 'kind': 'dispensable',
+                                    'address': 'Xfrin' }
             config['b10-zonemgr'] = { 'kind': 'dispensable',
                                       'address': 'Zonemgr' }
         if start_resolver:
@@ -876,9 +1147,9 @@ class TestBossComponents(unittest.TestCase):
             (anyway it is not told so). It does not die if it is killed
             the first time. It dies only when killed forcefully.
             """
-            def kill(self, forcefull=False):
-                killed.append(forcefull)
-                if forcefull:
+            def kill(self, forceful=False):
+                killed.append(forceful)
+                if forceful:
                     bob.components = {}
             def pid(self):
                 return 1
@@ -928,6 +1199,201 @@ class TestBossComponents(unittest.TestCase):
         bob.start_all_components()
         self.__check_extended(self.__param)
 
+class SocketSrvTest(unittest.TestCase):
+    """
+    This tests some methods of boss related to the unix domain sockets used
+    to transfer other sockets to applications.
+    """
+    def setUp(self):
+        """
+        Create the boss to test, testdata and backup some functions.
+        """
+        self.__boss = BoB()
+        self.__select_backup = bind10_src.select.select
+        self.__select_called = None
+        self.__socket_data_called = None
+        self.__consumer_dead_called = None
+        self.__socket_request_handler_called = None
+
+    def tearDown(self):
+        """
+        Restore functions.
+        """
+        bind10_src.select.select = self.__select_backup
+
+    class __FalseSocket:
+        """
+        A mock socket for the select and accept and stuff like that.
+        """
+        def __init__(self, owner, fileno=42):
+            self.__owner = owner
+            self.__fileno = fileno
+            self.data = None
+            self.closed = False
+
+        def fileno(self):
+            return self.__fileno
+
+        def accept(self):
+            return self.__class__(self.__owner, 13)
+
+        def recv(self, bufsize, flags=0):
+            self.__owner.assertEqual(1, bufsize)
+            self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+            if isinstance(self.data, socket.error):
+                raise self.data
+            elif self.data is not None:
+                if len(self.data):
+                    result = self.data[0:1]
+                    self.data = self.data[1:]
+                    return result
+                else:
+                    raise socket.error(errno.EAGAIN, "Would block")
+            else:
+                return b''
+
+        def close(self):
+            self.closed = True
+
+    class __CCS:
+        """
+        A mock CCS, just to provide the socket file number.
+        """
+        class __Socket:
+            def fileno(self):
+                return 1
+        def get_socket(self):
+            return self.__Socket()
+
+    def __select_accept(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([42], [], [])
+
+    def __select_data(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([13], [], [])
+
+    def __accept(self):
+        """
+        Hijact the accept method of the boss.
+
+        Notes down it was called and stops the boss.
+        """
+        self.__accept_called = True
+        self.__boss.runnable = False
+
+    def test_srv_accept_called(self):
+        """
+        Test that the _srv_accept method of boss is called when the listening
+        socket is readable.
+        """
+        self.__boss.runnable = True
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept = self.__accept
+        self.__boss.ccs = self.__CCS()
+        bind10_src.select.select = self.__select_accept
+        self.__boss.run(2)
+        # It called the accept
+        self.assertTrue(self.__accept_called)
+        # And the select had the right parameters
+        self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+    def test_srv_accept(self):
+        """
+        Test how the _srv_accept method works.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept()
+        # After we accepted, a new socket is added there
+        socket = self.__boss._unix_sockets[13][0]
+        # The socket is properly stored there
+        self.assertTrue(isinstance(socket, self.__FalseSocket))
+        # And the buffer (yet empty) is there
+        self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
+
+    def __socket_data(self, socket):
+        self.__boss.runnable = False
+        self.__socket_data_called = socket
+
+    def test_socket_data(self):
+        """
+        Test that a socket that wants attention gets it.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._socket_data = self.__socket_data
+        self.__boss.ccs = self.__CCS()
+        self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+        self.__boss.runnable = True
+        bind10_src.select.select = self.__select_data
+        self.__boss.run(2)
+        self.assertEqual(13, self.__socket_data_called)
+        self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+    def __prepare_data(self, data):
+        socket = self.__FalseSocket(self, 13)
+        self.__boss._unix_sockets = {13: (socket, b'')}
+        socket.data = data
+        self.__boss.socket_consumer_dead = self.__consumer_dead
+        self.__boss.socket_request_handler = self.__socket_request_handler
+        return socket
+
+    def __consumer_dead(self, socket):
+        self.__consumer_dead_called = socket
+
+    def __socket_request_handler(self, token, socket):
+        self.__socket_request_handler_called = (token, socket)
+
+    def test_socket_closed(self):
+        """
+        Test that a socket is removed and the socket_consumer_dead is called
+        when it is closed.
+        """
+        socket = self.__prepare_data(None)
+        self.__boss._socket_data(13)
+        self.assertEqual(socket, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(socket.closed)
+
+    def test_socket_short(self):
+        """
+        Test that if there's not enough data to get the whole socket, it is
+        kept there, but nothing is called.
+        """
+        socket = self.__prepare_data(b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertIsNone(self.__socket_request_handler_called)
+
+    def test_socket_continue(self):
+        """
+        Test that we call the token handling function when the whole token
+        comes. This test pretends to continue reading where the previous one
+        stopped.
+        """
+        socket = self.__prepare_data(b"en\nanothe")
+        # The data to finish
+        self.__boss._unix_sockets[13] = (socket, b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertEqual((b'token', socket),
+                         self.__socket_request_handler_called)
+
+    def test_broken_socket(self):
+        """
+        If the socket raises an exception during the read other than EAGAIN,
+        it is broken and we remove it.
+        """
+        sock = self.__prepare_data(socket.error(errno.ENOMEM,
+            "There's more memory available, but not for you"))
+        self.__boss._socket_data(13)
+        self.assertEqual(sock, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(sock.closed)
+
 if __name__ == '__main__':
     # store os.environ for test_unchanged_environment
     original_os_environ = copy.deepcopy(os.environ)

+ 1 - 1
src/bin/cfgmgr/plugins/logging.spec

@@ -57,7 +57,7 @@
                       { "item_name": "flush",
                         "item_type": "boolean",
                         "item_optional": false,
-                        "item_default": false
+                        "item_default": true
                       },
                       { "item_name": "maxsize",
                         "item_type": "integer",

+ 42 - 0
src/bin/ddns/Makefile.am

@@ -0,0 +1,42 @@
+SUBDIRS = . tests
+
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+
+pkglibexec_SCRIPTS = b10-ddns
+
+b10_ddnsdir = $(pkgdatadir)
+b10_ddns_DATA = ddns.spec
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/ddns_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-ddns ddns.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/ddns_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/ddns_messages.pyc
+
+EXTRA_DIST =  ddns_messages.mes ddns.spec
+man_MANS = b10-ddns.8
+EXTRA_DIST += $(man_MANS) b10-ddns.xml
+
+if ENABLE_MAN
+
+b10-ddns.8: b10-ddns.xml
+	xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-ddns.xml
+
+endif
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/ddns_messages.py : ddns_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/ddns_messages.mes
+
+# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
+b10-ddns: ddns.py $(PYTHON_LOGMSGPKG_DIR)/work/ddns_messages.py
+	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
+	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" ddns.py >$@
+	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)

+ 97 - 0
src/bin/ddns/b10-ddns.8

@@ -0,0 +1,97 @@
+'\" t
+.\"     Title: b10-ddns
+.\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\"      Date: December 9, 2011
+.\"    Manual: BIND10
+.\"    Source: BIND10
+.\"  Language: English
+.\"
+.TH "B10\-DDNS" "8" "December 9, 2011" "BIND10" "BIND10"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el       .ds Aq '
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+b10-ddns \- Dynamic DNS update service
+.SH "SYNOPSIS"
+.HP \w'\fBb10\-ddns\fR\ 'u
+\fBb10\-ddns\fR [\fB\-v\fR | \fB\-\-verbose\fR]
+.SH "DESCRIPTION"
+.PP
+The
+\fBb10\-ddns\fR
+daemon provides the BIND 10 Dynamic Update (DDNS) service, as specified in RFC 2136\&. Normally it is started by the
+\fBbind10\fR(8)
+boss process\&. When the
+\fBb10\-auth\fR
+DNS server receives a DDNS update,
+\fBb10\-ddns\fR
+updates the zone in the BIND 10 zone data store\&.
+.PP
+This daemon communicates with BIND 10 over a
+\fBb10-msgq\fR(8)
+C\-Channel connection\&. If this connection is not established,
+\fBb10\-ddns\fR
+will exit\&.
+.PP
+
+\fBb10\-ddns\fR
+receives its configurations from
+\fBb10-cfgmgr\fR(8)\&.
+.SH "ARGUMENTS"
+.PP
+The arguments are as follows:
+.PP
+\fB\-v\fR, \fB\-\-verbose\fR
+.RS 4
+This value is ignored at this moment, but is provided for compatibility with the bind10 Boss process
+.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The configurable settings are:
+.PP
+
+\fIzones\fR
+The zones option is a named set of zones that can be updated with DDNS\&. Each entry has one element called update_acl, which is is a list of access control rules that define update permissions\&. By default this is empty; DDNS must be explicitely enabled per zone\&.
+.PP
+The module commands are:
+.PP
+
+\fBshutdown\fR
+Exits
+\fBb10\-ddns\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "SEE ALSO"
+.PP
+
+\fBb10-auth\fR(8),
+\fBb10-cfgmgr\fR(8),
+\fBb10-msgq\fR(8),
+\fBb10-xfrin\fR(8),
+\fBb10-xfrout\fR(8),
+\fBbind10\fR(8),
+BIND 10 Guide\&.
+.SH "HISTORY"
+.PP
+The
+\fBb10\-ddns\fR
+daemon was first implemented in December 2011 for the ISC BIND 10 project\&.
+.SH "COPYRIGHT"
+.br
+Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
+.br

+ 161 - 0
src/bin/ddns/b10-ddns.xml

@@ -0,0 +1,161 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+               "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+	       [<!ENTITY mdash "&#8212;">]>
+<!--
+ - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+  <refentryinfo>
+    <date>December 9, 2011</date>
+  </refentryinfo>
+
+  <refmeta>
+    <refentrytitle>b10-ddns</refentrytitle>
+    <manvolnum>8</manvolnum>
+    <refmiscinfo>BIND10</refmiscinfo>
+  </refmeta>
+
+  <refnamediv>
+    <refname>b10-ddns</refname>
+    <refpurpose>Dynamic DNS update service</refpurpose>
+  </refnamediv>
+
+  <docinfo>
+    <copyright>
+      <year>2011</year>
+      <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+    </copyright>
+  </docinfo>
+
+  <refsynopsisdiv>
+    <cmdsynopsis>
+      <command>b10-ddns</command>
+        <group choice="opt">
+          <arg choice="plain"><option>-v</option></arg>
+          <arg choice="plain"><option>--verbose</option></arg>
+        </group>
+    </cmdsynopsis>
+  </refsynopsisdiv>
+
+  <refsect1>
+    <title>DESCRIPTION</title>
+    <para>The <command>b10-ddns</command> daemon provides the BIND 10
+      Dynamic Update (DDNS) service, as specified in RFC 2136.
+      Normally it is started by the
+      <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+      boss process.
+      When the <command>b10-auth</command> DNS server receives
+      a DDNS update, <command>b10-ddns</command> updates the zone
+      in the BIND 10 zone data store.
+    </para>
+
+    <para>
+      This daemon communicates with BIND 10 over a
+      <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+      C-Channel connection.  If this connection is not established,
+      <command>b10-ddns</command> will exit.
+    </para>
+
+    <para>
+     <command>b10-ddns</command> receives its configurations from
+<citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
+    </para>
+  </refsect1>
+
+  <refsect1>
+    <title>ARGUMENTS</title>
+
+    <para>The arguments are as follows:</para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term>
+          <option>-v</option>,
+          <option>--verbose</option>
+        </term>
+        <listitem>
+          <para>
+            This value is ignored at this moment, but is provided for
+            compatibility with the bind10 Boss process
+          </para>
+        </listitem>
+      </varlistentry>
+    </variablelist>
+  </refsect1>
+
+  <refsect1>
+    <title>CONFIGURATION AND COMMANDS</title>
+    <para>
+      The configurable settings are:
+    </para>
+    <para>
+      <varname>zones</varname>
+      The zones option is a named set of zones that can be updated with
+      DDNS. Each entry has one element called update_acl, which is
+      a list of access control rules that define update permissions.
+      By default this is empty; DDNS must be explicitely enabled per zone.
+    </para>
+
+    <para>
+      The module commands are:
+    </para>
+    <para>
+      <command>shutdown</command> Exits <command>b10-ddns</command>.
+      (Note that the BIND 10 boss process will restart this service.)
+    </para>
+
+  </refsect1>
+
+
+  <refsect1>
+    <title>SEE ALSO</title>
+    <para>
+      <citerefentry>
+        <refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
+        <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
+        <refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
+        <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
+        <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
+        <refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citetitle>BIND 10 Guide</citetitle>.
+    </para>
+  </refsect1>
+
+  <refsect1>
+    <title>HISTORY</title>
+    <para>
+      The <command>b10-ddns</command> daemon was first implemented
+      in December 2011 for the ISC BIND 10 project.
+    </para>
+  </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->

+ 209 - 0
src/bin/ddns/ddns.py.in

@@ -0,0 +1,209 @@
+#!@PYTHON@
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import isc
+import bind10_config
+from isc.dns import *
+from isc.config.ccsession import *
+from isc.cc import SessionError, SessionTimeout
+import isc.util.process
+
+from isc.log_messages.ddns_messages import *
+
+from optparse import OptionParser, OptionValueError
+import os
+import signal
+
+isc.log.init("b10-ddns")
+logger = isc.log.Logger("ddns")
+
+DATA_PATH = bind10_config.DATA_PATH
+if "B10_FROM_SOURCE" in os.environ:
+    DATA_PATH = os.environ['B10_FROM_SOURCE'] + "/src/bin/ddns"
+SPECFILE_LOCATION = DATA_PATH + "/ddns.spec"
+
+
+isc.util.process.rename()
+
+class DDNSConfigError(Exception):
+    '''An exception indicating an error in updating ddns configuration.
+
+    This exception is raised when the ddns process encounters an error in
+    handling configuration updates.  Not all syntax error can be caught
+    at the module-CC layer, so ddns needs to (explicitly or implicitly)
+    validate the given configuration data itself.  When it finds an error
+    it raises this exception (either directly or by converting an exception
+    from other modules) as a unified error in configuration.
+    '''
+    pass
+
+class DDNSSessionError(Exception):
+    '''An exception raised for some unexpected events during a ddns session.
+    '''
+    pass
+
+class DDNSSession:
+    '''Class to handle one DDNS update'''
+
+    def __init__(self):
+        '''Initialize a DDNS Session'''
+        pass
+
+class DDNSServer:
+    def __init__(self, cc_session=None):
+        '''
+        Initialize the DDNS Server.
+        This sets up a ModuleCCSession for the BIND 10 system.
+        Parameters:
+        cc_session: If None (default), a new ModuleCCSession will be set up.
+                    If specified, the given session will be used. This is
+                    mainly used for testing.
+        '''
+        if cc_session is not None:
+            self._cc = cc_session
+        else:
+            self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+                                                  self.config_handler,
+                                                  self.command_handler)
+
+        self._config_data = self._cc.get_full_config()
+        self._cc.start()
+        self._shutdown = False
+
+    def config_handler(self, new_config):
+        '''Update config data.'''
+        answer = create_answer(0)
+        return answer
+
+    def command_handler(self, cmd, args):
+        '''
+        Handle a CC session command, as sent from bindctl or other
+        BIND 10 modules.
+        '''
+        if cmd == "shutdown":
+            logger.info(DDNS_RECEIVED_SHUTDOWN_COMMAND)
+            self.trigger_shutdown()
+            answer = create_answer(0)
+        else:
+            answer = create_answer(1, "Unknown command: " + str(cmd))
+        return answer
+
+    def trigger_shutdown(self):
+        '''Initiate a shutdown sequence.
+
+        This method is expected to be called in various ways including
+        in the middle of a signal handler, and is designed to be as simple
+        as possible to minimize side effects.  Actual shutdown will take
+        place in a normal control flow.
+
+        '''
+        logger.info(DDNS_SHUTDOWN)
+        self._shutdown = True
+
+    def shutdown_cleanup(self):
+        '''
+        Perform any cleanup that is necessary when shutting down the server.
+        Do NOT call this to initialize shutdown, use trigger_shutdown().
+
+        Currently, it does nothing, but cleanup routines are expected.
+        '''
+        pass
+
+    def run(self):
+        '''
+        Get and process all commands sent from cfgmgr or other modules.
+        This loops waiting for events until self.shutdown() has been called.
+        '''
+        logger.info(DDNS_RUNNING)
+        while not self._shutdown:
+            # We do not catch any exceptions here right now, but this would
+            # be a good place to catch any exceptions that b10-ddns can
+            # recover from. We currently have no exception hierarchy to
+            # make such a distinction easily, but once we do, this would
+            # be the place to catch.
+            self._cc.check_command(False)
+        self.shutdown_cleanup()
+        logger.info(DDNS_STOPPED)
+
+def create_signal_handler(ddns_server):
+    '''
+    This creates a signal_handler for use in set_signal_handler, which
+    shuts down the given DDNSServer (or any object that has a shutdown()
+    method)
+    '''
+    def signal_handler(signal, frame):
+        '''
+        Handler for process signals. Since only signals to shut down are sent
+        here, the actual signal is not checked and the server is simply shut
+        down.
+        '''
+        ddns_server.trigger_shutdown()
+    return signal_handler
+
+def set_signal_handler(signal_handler):
+    '''
+    Sets the signal handler(s).
+    '''
+    signal.signal(signal.SIGTERM, signal_handler)
+    signal.signal(signal.SIGINT, signal_handler)
+
+def set_cmd_options(parser):
+    '''
+    Helper function to set command-line options
+    '''
+    parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+            help="display more about what is going on")
+
+def main(ddns_server=None):
+    '''
+    The main function.
+    Parameters:
+    ddns_server: If None (default), a DDNSServer object is initialized.
+                 If specified, the given DDNSServer will be used. This is
+                 mainly used for testing.
+    cc_session: If None (default), a new ModuleCCSession will be set up.
+                If specified, the given session will be used. This is
+                mainly used for testing.
+    '''
+    try:
+        parser = OptionParser()
+        set_cmd_options(parser)
+        (options, args) = parser.parse_args()
+        if options.verbose:
+            print("[b10-ddns] Warning: -v verbose option is ignored at this point.")
+
+        if ddns_server is None:
+            ddns_server = DDNSServer()
+        set_signal_handler(create_signal_handler(ddns_server))
+        ddns_server.run()
+    except KeyboardInterrupt:
+        logger.info(DDNS_STOPPED_BY_KEYBOARD)
+    except SessionError as e:
+        logger.error(DDNS_CC_SESSION_ERROR, str(e))
+    except ModuleCCSessionError as e:
+        logger.error(DDNS_MODULECC_SESSION_ERROR, str(e))
+    except DDNSConfigError as e:
+        logger.error(DDNS_CONFIG_ERROR, str(e))
+    except SessionTimeout as e:
+        logger.error(DDNS_CC_SESSION_TIMEOUT_ERROR)
+    except Exception as e:
+        logger.error(DDNS_UNCAUGHT_EXCEPTION, type(e).__name__, str(e))
+
+if '__main__' == __name__:
+    main()

+ 42 - 0
src/bin/ddns/ddns.spec

@@ -0,0 +1,42 @@
+{
+  "module_spec": {
+    "module_name": "DDNS",
+    "config_data": [
+      {
+        "item_name": "zones",
+        "item_type": "named_set",
+        "item_optional": false,
+        "item_default": {},
+        "named_set_item_spec": {
+          "item_name": "entry",
+          "item_type": "map",
+          "item_optional": true,
+          "item_default": {
+            "update_acl": [{"action": "ACCEPT", "from": "127.0.0.1"},
+                           {"action": "ACCEPT", "from": "::1"}]
+          },
+          "map_item_spec": [
+            {
+              "item_name": "update_acl",
+              "item_type": "list",
+              "item_optional": false,
+              "list_item_spec": {
+                "item_name": "acl_element",
+                "item_type": "any",
+                "item_optional": true
+              }
+            }
+          ]
+        }
+      }
+    ],
+    "commands": [
+      {
+        "command_name": "shutdown",
+        "command_description": "Shut down DDNS",
+        "command_args": []
+      }
+    ]
+  }
+}
+

+ 66 - 0
src/bin/ddns/ddns_messages.mes

@@ -0,0 +1,66 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the ddns messages python module.
+
+# When you add a message to this file, it is a good idea to run
+# <topsrcdir>/tools/reorder_message_file.py to make sure the
+# messages are in the correct order.
+
+% DDNS_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq process is not running.
+
+% DDNS_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+
+% DDNS_CONFIG_ERROR error found in configuration data: %1
+The ddns process encountered an error when installing the configuration at
+startup time.  Details of the error are included in the log message.
+
+% DDNS_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands.  This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and ddns
+failed to start at initialization.  A detailed error message from the module
+will also be displayed.
+
+% DDNS_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+The ddns process received a shutdown command from the command channel
+and will now shut down.
+
+% DDNS_RUNNING ddns server is running and listening for updates
+The ddns process has successfully started and is now ready to receive commands
+and updates.
+
+% DDNS_SHUTDOWN ddns server shutting down
+The ddns process is shutting down. It will no longer listen for new commands
+or updates. Any command or update that is being addressed at this moment will
+be completed, after which the process will exit.
+
+% DDNS_STOPPED ddns server has stopped
+The ddns process has successfully stopped and is no longer listening for or
+handling commands or updates, and will now exit.
+
+% DDNS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the ddns process. The
+process will now shut down.
+
+% DDNS_UNCAUGHT_EXCEPTION uncaught exception of type %1: %2
+The b10-ddns process encountered an uncaught exception and will now shut
+down. This is indicative of a programming error and should not happen under
+normal circumstances. The exception type and message are printed.

Fichier diff supprimé car celui-ci est trop grand
+ 28 - 0
src/bin/ddns/tests/Makefile.am


+ 142 - 0
src/bin/ddns/tests/ddns_test.py

@@ -0,0 +1,142 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Tests for the DDNS module'''
+
+import unittest
+import isc
+import ddns
+import isc.config
+
+class MyCCSession(isc.config.ConfigData):
+    '''Fake session with minimal interface compliance'''
+    def __init__(self):
+        module_spec = isc.config.module_spec_from_file(
+            ddns.SPECFILE_LOCATION)
+        isc.config.ConfigData.__init__(self, module_spec)
+        self._started = False
+
+    def start(self):
+        '''Called by DDNSServer initialization, but not used in tests'''
+        self._started = True
+
+class MyDDNSServer():
+    '''Fake DDNS server used to test the main() function'''
+    def __init__(self):
+        self.reset()
+
+    def run(self):
+        '''
+        Fake the run() method of the DDNS server. This will set
+        self._run_called to True.
+        If self._exception is not None, this is raised as an exception
+        '''
+        self.run_called = True
+        if self._exception is not None:
+            self.exception_raised = True
+            raise self._exception
+
+    def set_exception(self, exception):
+        '''Set an exception to be raised when run() is called'''
+        self._exception = exception
+
+    def reset(self):
+        '''(Re)set to initial values'''
+        self.run_called = False
+        self.exception_raised = False
+        self._exception = None
+
+class TestDDNSServer(unittest.TestCase):
+    def setUp(self):
+        cc_session = MyCCSession()
+        self.assertFalse(cc_session._started)
+        self.ddns_server = ddns.DDNSServer(cc_session)
+        self.assertTrue(cc_session._started)
+
+    def test_config_handler(self):
+        # Config handler does not do anything yet, but should at least
+        # return 'ok' for now.
+        new_config = {}
+        answer = self.ddns_server.config_handler(new_config)
+        self.assertEqual((0, None), isc.config.parse_answer(answer))
+
+    def test_shutdown_command(self):
+        '''Test whether the shutdown command works'''
+        self.assertFalse(self.ddns_server._shutdown)
+        answer = self.ddns_server.command_handler('shutdown', None)
+        self.assertEqual((0, None), isc.config.parse_answer(answer))
+        self.assertTrue(self.ddns_server._shutdown)
+
+    def test_command_handler(self):
+        '''Test some commands.'''
+        # this command should not exist
+        answer = self.ddns_server.command_handler('bad_command', None)
+        self.assertEqual((1, 'Unknown command: bad_command'),
+                         isc.config.parse_answer(answer))
+
+    def test_signal_handler(self):
+        '''Test whether signal_handler calls shutdown()'''
+        signal_handler = ddns.create_signal_handler(self.ddns_server)
+        self.assertFalse(self.ddns_server._shutdown)
+        signal_handler(None, None)
+        self.assertTrue(self.ddns_server._shutdown)
+
+class TestMain(unittest.TestCase):
+    def setUp(self):
+        self._server = MyDDNSServer()
+
+    def test_main(self):
+        self.assertFalse(self._server.run_called)
+        ddns.main(self._server)
+        self.assertTrue(self._server.run_called)
+
+    def check_exception(self, ex):
+        '''Common test sequence to see if the given exception is caused.
+        '''
+        # Should technically not be necessary, but reset server to be sure
+        self._server.reset()
+        self.assertFalse(self._server.exception_raised)
+        self._server.set_exception(ex)
+        ddns.main(self._server)
+        self.assertTrue(self._server.exception_raised)
+
+    def test_exceptions(self):
+        '''
+        Test whether exceptions are caught in main()
+        These exceptions should not bubble up.
+        '''
+        self._server.set_exception(KeyboardInterrupt())
+        self.assertFalse(self._server.exception_raised)
+        ddns.main(self._server)
+        self.assertTrue(self._server.exception_raised)
+
+        self.check_exception(isc.cc.SessionError("error"))
+        self.check_exception(isc.config.ModuleCCSessionError("error"))
+        self.check_exception(ddns.DDNSConfigError("error"))
+        self.check_exception(isc.cc.SessionTimeout("error"))
+        self.check_exception(Exception("error"))
+
+        # Add one that is not a subclass of Exception, and hence not
+        # caught. Misuse BaseException for that.
+        self._server.reset()
+        self.assertFalse(self._server.exception_raised)
+        self._server.set_exception(BaseException("error"))
+        self.assertRaises(BaseException, ddns.main, self._server)
+        self.assertTrue(self._server.exception_raised)
+        
+
+if __name__== "__main__":
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()

+ 1 - 2
src/bin/dhcp4/Makefile.am

@@ -32,7 +32,7 @@ pkglibexec_PROGRAMS = b10-dhcp4
 
 b10_dhcp4_SOURCES = main.cc dhcp4_srv.cc dhcp4_srv.h
 
-b10_dhcp4_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp.la
+b10_dhcp4_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp++.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/log/liblog.la
@@ -41,4 +41,3 @@ b10_dhcp4_LDADD += $(top_builddir)/src/lib/log/liblog.la
 # and can't use @datadir@ because doesn't expand default ${prefix}
 b10_dhcp4dir = $(pkgdatadir)
 b10_dhcp4_DATA = dhcp4.spec
-

+ 7 - 7
src/bin/dhcp4/dhcp4_srv.cc

@@ -39,7 +39,7 @@ Dhcpv4Srv::Dhcpv4Srv(uint16_t port) {
 
     setServerID();
 
-    shutdown = false;
+    shutdown_ = false;
 }
 
 Dhcpv4Srv::~Dhcpv4Srv() {
@@ -49,7 +49,7 @@ Dhcpv4Srv::~Dhcpv4Srv() {
 
 bool
 Dhcpv4Srv::run() {
-    while (!shutdown) {
+    while (!shutdown_) {
         boost::shared_ptr<Pkt4> query; // client's message
         boost::shared_ptr<Pkt4> rsp;   // server's response
 
@@ -140,28 +140,28 @@ Dhcpv4Srv::setServerID() {
 }
 
 boost::shared_ptr<Pkt4>
-Dhcpv4Srv::processDiscover(boost::shared_ptr<Pkt4> discover) {
+Dhcpv4Srv::processDiscover(boost::shared_ptr<Pkt4>& discover) {
     /// TODO: Currently implemented echo mode. Implement this for real
     return (discover);
 }
 
 boost::shared_ptr<Pkt4>
-Dhcpv4Srv::processRequest(boost::shared_ptr<Pkt4> request) {
+Dhcpv4Srv::processRequest(boost::shared_ptr<Pkt4>& request) {
     /// TODO: Currently implemented echo mode. Implement this for real
     return (request);
 }
 
-void Dhcpv4Srv::processRelease(boost::shared_ptr<Pkt4> release) {
+void Dhcpv4Srv::processRelease(boost::shared_ptr<Pkt4>& release) {
     /// TODO: Implement this.
     cout << "Received RELEASE on " << release->getIface() << " interface." << endl;
 }
 
-void Dhcpv4Srv::processDecline(boost::shared_ptr<Pkt4> decline) {
+void Dhcpv4Srv::processDecline(boost::shared_ptr<Pkt4>& decline) {
     /// TODO: Implement this.
     cout << "Received DECLINE on " << decline->getIface() << " interface." << endl;
 }
 
-boost::shared_ptr<Pkt4> Dhcpv4Srv::processInform(boost::shared_ptr<Pkt4> inform) {
+boost::shared_ptr<Pkt4> Dhcpv4Srv::processInform(boost::shared_ptr<Pkt4>& inform) {
     /// TODO: Currently implemented echo mode. Implement this for real
     return (inform);
 }

+ 13 - 9
src/bin/dhcp4/dhcp4_srv.h

@@ -35,13 +35,17 @@ namespace dhcp {
 /// appropriate responses.
 class Dhcpv4Srv : public boost::noncopyable {
 
-public:
+    public:
     /// @brief Default constructor.
     ///
     /// Instantiates necessary services, required to run DHCPv6 server.
     /// In particular, creates IfaceMgr that will be responsible for
     /// network interaction. Will instantiate lease manager, and load
-    /// old or create new DUID.
+    /// old or create new DUID. It is possible to specify alternate
+    /// port on which DHCPv4 server will listen on. That is mostly useful
+    /// for testing purposes.
+    ///
+    /// @param port specifies port number to listen on
     Dhcpv4Srv(uint16_t port = DHCP4_SERVER_PORT);
 
     /// @brief Destructor. Used during DHCPv6 service shutdown.
@@ -68,13 +72,13 @@ protected:
     ///
     /// @return OFFER message or NULL
     boost::shared_ptr<Pkt4>
-    processDiscover(boost::shared_ptr<Pkt4> discover);
+    processDiscover(boost::shared_ptr<Pkt4>& discover);
 
     /// @brief Processes incoming REQUEST and returns REPLY response.
     ///
     /// Processes incoming REQUEST message and verifies that its sender
     /// should be served. In particular, verifies that requested lease
-    /// is valid, not expired, not reserved, not used by other client and 
+    /// is valid, not expired, not reserved, not used by other client and
     /// that requesting client is allowed to use it.
     ///
     /// Returns ACK message, NACK message, or NULL
@@ -82,7 +86,7 @@ protected:
     /// @param request a message received from client
     ///
     /// @return ACK or NACK message
-    boost::shared_ptr<Pkt4> processRequest(boost::shared_ptr<Pkt4> request);
+    boost::shared_ptr<Pkt4> processRequest(boost::shared_ptr<Pkt4>& request);
 
     /// @brief Stub function that will handle incoming RELEASE messages.
     ///
@@ -90,17 +94,17 @@ protected:
     /// this function does not return anything.
     ///
     /// @param release message received from client
-    void processRelease(boost::shared_ptr<Pkt4> release);
+    void processRelease(boost::shared_ptr<Pkt4>& release);
 
     /// @brief Stub function that will handle incoming DHCPDECLINE messages.
     ///
     /// @param decline message received from client
-    void processDecline(boost::shared_ptr<Pkt4> decline);
+    void processDecline(boost::shared_ptr<Pkt4>& decline);
 
     /// @brief Stub function that will handle incoming INFORM messages.
     ///
     /// @param infRequest message received from client
-    boost::shared_ptr<Pkt4> processInform(boost::shared_ptr<Pkt4> inform);
+    boost::shared_ptr<Pkt4> processInform(boost::shared_ptr<Pkt4>& inform);
 
     /// @brief Returns server-intentifier option
     ///
@@ -124,7 +128,7 @@ protected:
 
     /// indicates if shutdown is in progress. Setting it to true will
     /// initiate server shutdown procedure.
-    volatile bool shutdown;
+    volatile bool shutdown_;
 };
 
 }; // namespace isc::dhcp

+ 3 - 2
src/bin/dhcp4/tests/Makefile.am

@@ -30,14 +30,15 @@ if HAVE_GTEST
 
 TESTS += dhcp4_unittests
 
-dhcp4_unittests_SOURCES == ../dhcp4_srv.h ../dhcp4_srv.cc
+dhcp4_unittests_SOURCES = ../dhcp4_srv.h ../dhcp4_srv.cc
 dhcp4_unittests_SOURCES += dhcp4_unittests.cc
 dhcp4_unittests_SOURCES += dhcp4_srv_unittest.cc
 
 dhcp4_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 dhcp4_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 dhcp4_unittests_LDADD = $(GTEST_LDADD)
-dhcp4_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
+dhcp4_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+dhcp4_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la

+ 2 - 9
src/bin/dhcp4/tests/dhcp4_srv_unittest.cc

@@ -32,7 +32,7 @@ namespace {
 const char* const INTERFACE_FILE = "interfaces.txt";
 
 class NakedDhcpv4Srv: public Dhcpv4Srv {
-    // "naked" Interface Manager, exposes internal fields
+    // "naked" DHCPv4 server, exposes internal fields
 public:
     NakedDhcpv4Srv():Dhcpv4Srv(DHCP4_SERVER_PORT + 10000) { }
 
@@ -80,11 +80,7 @@ TEST_F(Dhcpv4SrvTest, basic) {
         srv = new Dhcpv4Srv(DHCP4_SERVER_PORT + 10000);
     });
 
-    if (srv) {
-        ASSERT_NO_THROW({
-            delete srv;
-        });
-    }
+    delete srv;
 }
 
 TEST_F(Dhcpv4SrvTest, processDiscover) {
@@ -102,7 +98,6 @@ TEST_F(Dhcpv4SrvTest, processDiscover) {
 
     // TODO: Implement more reasonable tests before starting
     // work on processSomething() method.
-
     delete srv;
 }
 
@@ -121,7 +116,6 @@ TEST_F(Dhcpv4SrvTest, processRequest) {
 
     // TODO: Implement more reasonable tests before starting
     // work on processSomething() method.
-
     delete srv;
 }
 
@@ -153,7 +147,6 @@ TEST_F(Dhcpv4SrvTest, processDecline) {
 
     // TODO: Implement more reasonable tests before starting
     // work on processSomething() method.
-
     delete srv;
 }
 

+ 1 - 1
src/bin/dhcp4/tests/dhcp4_unittests.cc

@@ -24,5 +24,5 @@ main(int argc, char* argv[]) {
 
     int result = RUN_ALL_TESTS();
 
-    return result;
+    return (result);
 }

+ 1 - 1
src/bin/dhcp6/Makefile.am

@@ -34,7 +34,7 @@ pkglibexec_PROGRAMS = b10-dhcp6
 
 b10_dhcp6_SOURCES = main.cc dhcp6_srv.cc dhcp6_srv.h
 
-b10_dhcp6_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp.la
+b10_dhcp6_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp++.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/liblog.la

+ 1 - 1
src/bin/dhcp6/tests/Makefile.am

@@ -52,7 +52,7 @@ dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 dhcp6_unittests_LDADD = $(GTEST_LDADD)
 dhcp6_unittests_LDADD += $(SQLITE_LIBS)
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 endif

+ 14 - 10
src/bin/resolver/resolver.cc

@@ -57,8 +57,6 @@
 #include "resolver_log.h"
 
 using namespace std;
-using boost::shared_ptr;
-
 using namespace isc;
 using namespace isc::util;
 using namespace isc::acl;
@@ -167,7 +165,7 @@ public:
         return (*query_acl_);
     }
 
-    void setQueryACL(shared_ptr<const RequestACL> new_acl) {
+    void setQueryACL(boost::shared_ptr<const RequestACL> new_acl) {
         query_acl_ = new_acl;
     }
 
@@ -195,7 +193,7 @@ public:
 
 private:
     /// ACL on incoming queries
-    shared_ptr<const RequestACL> query_acl_;
+    boost::shared_ptr<const RequestACL> query_acl_;
 
     /// Object to handle upstream queries
     RecursiveQuery* rec_query_;
@@ -354,13 +352,19 @@ private:
 Resolver::Resolver() :
     impl_(new ResolverImpl()),
     dnss_(NULL),
-    checkin_(new ConfigCheck(this)),
-    dns_lookup_(new MessageLookup(this)),
+    checkin_(NULL),
+    dns_lookup_(NULL),
     dns_answer_(new MessageAnswer),
     nsas_(NULL),
     cache_(NULL),
     configured_(false)
-{}
+{
+    // Operations referring to "this" must be done in the constructor body
+    // (some compilers will issue warnings if "this" is referred to in the
+    // initialization list).
+    checkin_ = new ConfigCheck(this);
+    dns_lookup_ = new MessageLookup(this);
+}
 
 Resolver::~Resolver() {
     delete impl_;
@@ -597,9 +601,9 @@ Resolver::updateConfig(ConstElementPtr config) {
         AddressList listenAddresses(parseAddresses(listenAddressesE,
                                                       "listen_on"));
         const ConstElementPtr query_acl_cfg(config->get("query_acl"));
-        const shared_ptr<const RequestACL> query_acl =
+        const boost::shared_ptr<const RequestACL> query_acl =
             query_acl_cfg ? acl::dns::getRequestLoader().load(query_acl_cfg) :
-            shared_ptr<RequestACL>();
+            boost::shared_ptr<RequestACL>();
         bool set_timeouts(false);
         int qtimeout = impl_->query_timeout_;
         int ctimeout = impl_->client_timeout_;
@@ -765,7 +769,7 @@ Resolver::getQueryACL() const {
 }
 
 void
-Resolver::setQueryACL(shared_ptr<const RequestACL> new_acl) {
+Resolver::setQueryACL(boost::shared_ptr<const RequestACL> new_acl) {
     if (!new_acl) {
         isc_throw(InvalidParameter, "NULL pointer is passed to setQueryACL");
     }

+ 1 - 1
src/bin/resolver/tests/Makefile.am

@@ -45,9 +45,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la

+ 1 - 3
src/bin/resolver/tests/response_scrubber_unittest.cc

@@ -12,15 +12,13 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-// $Id$
+#include <config.h>
 
 #include <string>
 #include <iostream>
 
 #include <gtest/gtest.h>
 
-#include <config.h>
-
 #include <asiolink/io_endpoint.h>
 #include <asiolink/io_address.h>
 #include <netinet/in.h>

+ 1 - 0
src/bin/xfrin/tests/Makefile.am

@@ -27,5 +27,6 @@ endif
 	PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
 	TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
 	TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done

+ 280 - 52
src/bin/xfrin/tests/xfrin_test.py

@@ -17,14 +17,21 @@ import unittest
 import re
 import shutil
 import socket
-import sqlite3
 import sys
 import io
 from isc.testutils.tsigctx_mock import MockTSIGContext
+from isc.testutils.rrset_utils import *
 from xfrin import *
 import xfrin
 from isc.xfrin.diff import Diff
 import isc.log
+# If we use any python library that is basically a wrapper for
+# a library we use as well (like sqlite3 in our datasources),
+# we must make sure we import ours first; If we have special
+# rpath or libtool rules to pick the correct version, python might
+# choose the wrong one first, if those rules aren't hit first.
+# This would result in missing symbols later.
+import sqlite3
 
 #
 # Commonly used (mostly constant) test parameters
@@ -36,11 +43,9 @@ TEST_RRCLASS_STR = 'IN'
 TEST_DB_FILE = 'db_file'
 TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
 TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV4_ADDRESS, 53))
 TEST_MASTER_IPV6_ADDRESS = '::1'
 TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV6_ADDRESS, 53))
 
 TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
@@ -153,7 +158,7 @@ class MockDataSourceClient():
             return (DataSourceClient.PARTIALMATCH, self)
         raise ValueError('Unexpected input to mock client: bug in test case?')
 
-    def find(self, name, rrtype, target, options):
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
         '''Mock ZoneFinder.find().
 
         It returns the predefined SOA RRset to queries for SOA of the common
@@ -224,7 +229,7 @@ class MockXfrinConnection(XfrinConnection):
     def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
                  shutdown_event, master_addr, tsig_key=None):
         super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
-                         shutdown_event, master_addr)
+                         shutdown_event, master_addr, TEST_DB_FILE)
         self.query_data = b''
         self.reply_data = b''
         self.force_time_out = False
@@ -274,10 +279,11 @@ class MockXfrinConnection(XfrinConnection):
                 self.response_generator()
         return len(data)
 
-    def create_response_data(self, response=True, bad_qid=False,
+    def create_response_data(self, response=True, auth=True, bad_qid=False,
                              rcode=Rcode.NOERROR(),
                              questions=default_questions,
                              answers=default_answers,
+                             authorities=[],
                              tsig_ctx=None):
         resp = Message(Message.RENDER)
         qid = self.qid
@@ -288,8 +294,11 @@ class MockXfrinConnection(XfrinConnection):
         resp.set_rcode(rcode)
         if response:
             resp.set_header_flag(Message.HEADERFLAG_QR)
+        if auth:
+            resp.set_header_flag(Message.HEADERFLAG_AA)
         [resp.add_question(q) for q in questions]
         [resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
+        [resp.add_rrset(Message.SECTION_AUTHORITY, a) for a in authorities]
 
         renderer = MessageRenderer()
         if tsig_ctx is not None:
@@ -342,13 +351,44 @@ class TestXfrinInitialSOA(TestXfrinState):
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinFirstData()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.conn._end_serial)
+        self.assertEqual(1234, self.conn._end_serial.get_value())
 
     def test_handle_not_soa(self):
         # The given RR is not of SOA
         self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
                           self.ns_rrset)
 
+    def test_handle_ixfr_uptodate(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate2(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate3(self):
+        # Similar to the previous case, but checking serial number arithmetic
+        # comparison
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(0xffffffff)
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_axfr_uptodate(self):
+        # "request serial" should matter only for IXFR
+        self.conn._request_type = RRType.AXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
     def test_finish_message(self):
         self.assertTrue(self.state.finish_message(self.conn))
 
@@ -357,7 +397,8 @@ class TestXfrinFirstData(TestXfrinState):
         super().setUp()
         self.state = XfrinFirstData()
         self.conn._request_type = RRType.IXFR()
-        self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+        # arbitrary chosen serial < 1234:
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._diff = None           # should be replaced in the AXFR case
 
     def test_handle_ixfr_begin_soa(self):
@@ -437,7 +478,7 @@ class TestXfrinIXFRDelete(TestXfrinState):
         # false.
         self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual([], self.conn._diff.get_buffer())
-        self.assertEqual(1234, self.conn._current_serial)
+        self.assertEqual(1234, self.conn._current_serial.get_value())
         self.assertEqual(type(XfrinIXFRAddSOA()),
                          type(self.conn.get_xfrstate()))
 
@@ -468,7 +509,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         # We need record the state in 'conn' to check the case where the
         # state doesn't change.
         XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
-        self.conn._current_serial = 1230
+        self.conn._current_serial = isc.dns.Serial(1230)
         self.state = self.conn.get_xfrstate()
 
     def test_handle_add_rr(self):
@@ -480,7 +521,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
 
     def test_handle_end_soa(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -489,7 +530,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual([], self.conn._diff.get_buffer())
 
     def test_handle_new_delete(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         # SOA RR whose serial is the current one means we are going to a new
         # difference, starting with removing that SOA.
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
@@ -500,7 +541,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
 
     def test_handle_out_of_sync(self):
         # getting SOA with an inconsistent serial.  This is an error.
-        self.conn._end_serial = 1235
+        self.conn._end_serial = isc.dns.Serial(1235)
         self.assertRaises(XfrinProtocolError, self.state.handle_rr,
                           self.conn, soa_rrset)
 
@@ -519,11 +560,24 @@ class TestXfrinIXFREnd(TestXfrinState):
     def test_finish_message(self):
         self.assertFalse(self.state.finish_message(self.conn))
 
+class TestXfrinIXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRUptodate()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertRaises(XfrinZoneUptodate, self.state.finish_message,
+                          self.conn)
+
 class TestXfrinAXFR(TestXfrinState):
     def setUp(self):
         super().setUp()
         self.state = XfrinAXFR()
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
 
     def test_handle_rr(self):
         """
@@ -598,7 +652,10 @@ class TestXfrinConnection(unittest.TestCase):
             'questions': [example_soa_question],
             'bad_qid': False,
             'response': True,
+            'auth': True,
             'rcode': Rcode.NOERROR(),
+            'answers': default_answers,
+            'authorities': [],
             'tsig': False,
             'axfr_after_soa': self._create_normal_response_data
             }
@@ -655,8 +712,11 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.reply_data = self.conn.create_response_data(
             bad_qid=self.soa_response_params['bad_qid'],
             response=self.soa_response_params['response'],
+            auth=self.soa_response_params['auth'],
             rcode=self.soa_response_params['rcode'],
             questions=self.soa_response_params['questions'],
+            answers=self.soa_response_params['answers'],
+            authorities=self.soa_response_params['authorities'],
             tsig_ctx=verify_ctx)
         if self.soa_response_params['axfr_after_soa'] != None:
             self.conn.response_generator = \
@@ -687,6 +747,15 @@ class TestXfrinConnection(unittest.TestCase):
         rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
         return rrset
 
+    def _set_test_zone(self, zone_name):
+        '''Set the zone name for transfer to the specified one.
+
+        It also make sure that the SOA RR (if exist) is correctly (re)set.
+
+        '''
+        self.conn._zone_name = zone_name
+        self.conn._zone_soa = self.conn._get_zone_soa()
+
 class TestAXFR(TestXfrinConnection):
     def setUp(self):
         super().setUp()
@@ -781,25 +850,26 @@ class TestAXFR(TestXfrinConnection):
         # IXFR query
         msg = self.conn._create_query(RRType.IXFR())
         check_query(RRType.IXFR(), begin_soa_rrset)
-        self.assertEqual(1230, self.conn._request_serial)
+        self.assertEqual(1230, self.conn._request_serial.get_value())
 
     def test_create_ixfr_query_fail(self):
         # In these cases _create_query() will fail to find a valid SOA RR to
         # insert in the IXFR query, and should raise an exception.
 
-        self.conn._zone_name = Name('no-such-zone.example')
+        self._set_test_zone(Name('no-such-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('partial-match-zone.example')
+        self._set_test_zone(Name('partial-match-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('no-soa.example')
+        self._set_test_zone(Name('no-soa.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('dup-soa.example')
+        self._set_test_zone(Name('dup-soa.example'))
+        self.conn._zone_soa = self.conn._get_zone_soa()
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
@@ -830,8 +900,10 @@ class TestAXFR(TestXfrinConnection):
         self.conn._tsig_key = TSIG_KEY
         # server tsig check fail, return with RCODE 9 (NOTAUTH)
         self.conn._send_query(RRType.SOA())
-        self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.conn.reply_data = \
+            self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_without_end_soa(self):
         self.conn._send_query(RRType.AXFR())
@@ -844,7 +916,8 @@ class TestAXFR(TestXfrinConnection):
     def test_response_bad_qid(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_error_code_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -855,7 +928,7 @@ class TestAXFR(TestXfrinConnection):
                 rcode=Rcode.SERVFAIL())
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._handle_xfrin_responses)
 
@@ -867,7 +940,7 @@ class TestAXFR(TestXfrinConnection):
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADKEY",
                                self.conn._handle_xfrin_responses)
 
@@ -880,18 +953,21 @@ class TestAXFR(TestXfrinConnection):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             rcode=Rcode.SERVFAIL())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_multi_question(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             questions=[example_axfr_question, example_axfr_question])
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_non_response(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(response = False)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_soacheck(self):
         # we need to defer the creation until we know the QID, which is
@@ -906,7 +982,7 @@ class TestAXFR(TestXfrinConnection):
     def test_soacheck_badqid(self):
         self.soa_response_params['bad_qid'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_bad_qid_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -916,19 +992,123 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._check_soa_serial)
 
     def test_soacheck_non_response(self):
         self.soa_response_params['response'] = False
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_error_code(self):
         self.soa_response_params['rcode'] = Rcode.SERVFAIL()
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_notauth(self):
+        self.soa_response_params['auth'] = False
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate(self):
+        # Primary's SOA serial is identical the local serial
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate2(self):
+        # Primary's SOA serial is "smaller" than the local serial
+        self.soa_response_params['answers'] = [create_soa(1229)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate3(self):
+        # Similar to the previous case, but checking the comparison is based
+        # on the serial number arithmetic.
+        self.soa_response_params['answers'] = [create_soa(0xffffffff)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_newzone(self):
+        # Primary's SOA is 'old', but this secondary doesn't know anything
+        # about the zone yet, so it should accept it.
+        def response_generator():
+            # _request_serial is set in _check_soa_serial().  Reset it here.
+            self.conn._request_serial = None
+            self._create_soa_response_data()
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = response_generator
+        self.assertEqual(XFRIN_OK, self.conn._check_soa_serial())
+
+    def test_soacheck_question_empty(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(Name('example.org'),
+                                                          TEST_RRCLASS,
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          RRClass.CH(),
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_type_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          TEST_RRCLASS,
+                                                          RRType.AAAA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_no_soa(self):
+        # The response just doesn't contain SOA without any other indication
+        # of errors.
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [create_soa(1234,
+                                                          Name('example.org'))]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+        self.soa_response_params['answers'] = [soa]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_multiple_soa(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [soa_rrset, soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_cname_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        # Add SOA to answer, too, to make sure that it that deceives the parser
+        self.soa_response_params['answers'] = [soa_rrset, create_cname()]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_referral_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_nodata_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig(self):
         # Use a mock tsig context emulating a validly signed response
@@ -947,7 +1127,7 @@ class TestAXFR(TestXfrinConnection):
         self.soa_response_params['rcode'] = Rcode.NOTAUTH()
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_noerror_badsig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -960,7 +1140,7 @@ class TestAXFR(TestXfrinConnection):
         # treat this as a final failure (just as BIND 9 does).
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_unsigned_response(self):
         # we can use a real TSIGContext for this.  the response doesn't
@@ -969,14 +1149,14 @@ class TestAXFR(TestXfrinConnection):
         # it as a fatal transaction failure, too.
         self.conn._tsig_key = TSIG_KEY
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_unexpected_tsig_response(self):
         # we reject unexpected TSIG in responses (following BIND 9's
         # behavior)
         self.soa_response_params['tsig'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_response_shutdown(self):
         self.conn.response_generator = self._create_normal_response_data
@@ -1238,6 +1418,18 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
 
+    def test_do_soacheck_uptodate(self):
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+
+    def test_do_soacheck_protocol_error(self):
+        # There are several cases, but at this level it's sufficient to check
+        # only one.  We use the case where there's no SOA in the response.
+        self.soa_response_params['answers'] = []
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
+
     def test_do_soacheck_and_xfrin_with_tsig(self):
         # We are going to have a SOA query/response transaction, followed by
         # AXFR, all TSIG signed.  xfrin should use a new TSIG context for
@@ -1270,9 +1462,8 @@ class TestIXFRResponse(TestXfrinConnection):
     def setUp(self):
         super().setUp()
         self.conn._query_id = self.conn.qid = 1035
-        self.conn._request_serial = 1230
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._request_type = RRType.IXFR()
-        self._zone_name = TEST_ZONE_NAME
         self.conn._datasrc_client = MockDataSourceClient()
         XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
 
@@ -1347,6 +1538,16 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate(self):
+        '''IXFR response indicates the zone is new enough'''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset])
+        self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_response_broken(self):
         '''Test with a broken response.
 
@@ -1379,6 +1580,22 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate_extra(self):
+        '''Similar to 'uptodate' test, but with extra bogus data.
+
+        In either case an exception will be raised, but in this case it's
+        considered an error.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset, soa_rrset])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_to_axfr_response(self):
         '''AXFR-style IXFR response.
 
@@ -1482,13 +1699,25 @@ class TestIXFRSession(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
-    def test_do_xfrin_fail(self):
+    def test_do_xfrin_fail2(self):
         '''IXFR fails due to a bogus DNS message.
 
         '''
         self._create_broken_response_data()
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
+    def test_do_xfrin_uptodate(self):
+        '''IXFR is (gracefully) aborted because serial is not new
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[begin_soa_rrset])
+        self.conn.response_generator = create_response
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
 class TestXFRSessionWithSQLite3(TestXfrinConnection):
     '''Tests for XFR sessions using an SQLite3 DB.
 
@@ -1522,8 +1751,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def get_zone_serial(self):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
-                                  None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA())
         self.assertEqual(ZoneFinder.SUCCESS, result)
         self.assertEqual(1, soa.get_rdata_count())
         return get_soa_serial(soa.get_rdata()[0])
@@ -1531,7 +1759,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def record_exist(self, name, type):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(name, type)
         return result == ZoneFinder.SUCCESS
 
     def test_do_ixfrin_sqlite3(self):
@@ -1543,9 +1771,9 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
 
         # Confirm xfrin succeeds and SOA is updated
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
 
         # Also confirm the corresponding diffs are stored in the diffs table
         conn = sqlite3.connect(self.sqlite3db_obj)
@@ -1574,12 +1802,12 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                          self._create_soa('1235')])
         self.conn.response_generator = create_ixfr_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
 
     def test_do_ixfrin_nozone_sqlite3(self):
-        self.conn._zone_name = Name('nosuchzone.example')
+        self._set_test_zone(Name('nosuchzone.example'))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
         # This should fail even before starting state transition
         self.assertEqual(None, self.conn.get_xfrstate())
@@ -1595,11 +1823,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_response
 
         # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 
@@ -1627,11 +1855,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                 answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
         self.conn.response_generator = create_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
 
@@ -1665,11 +1893,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                                     RRType.AXFR())],
                 answers=[soa_rrset, self._create_ns(), soa_rrset])
         self.conn.response_generator = create_response
-        self.conn._zone_name = Name('example.com')
+        self._set_test_zone(Name('example.com'))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
         self.assertEqual(type(XfrinAXFREnd()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 

+ 228 - 109
src/bin/xfrin/xfrin.py.in

@@ -24,6 +24,7 @@ import struct
 import threading
 import socket
 import random
+from functools import reduce
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 from isc.notify import notify_out
@@ -75,9 +76,10 @@ DEFAULT_MASTER_PORT = 53
 DEFAULT_ZONE_CLASS = RRClass.IN()
 
 __version__ = 'BIND10'
-# define xfrin rcode
-XFRIN_OK = 0
-XFRIN_FAIL = 1
+
+# Internal result codes of an xfr session
+XFRIN_OK = 0                    # normal success
+XFRIN_FAIL = 1                  # general failure (internal/external)
 
 class XfrinException(Exception):
     pass
@@ -87,6 +89,11 @@ class XfrinProtocolError(Exception):
     '''
     pass
 
+class XfrinZoneUptodate(Exception):
+    '''TBD
+    '''
+    pass
+
 class XfrinZoneInfoException(Exception):
     """This exception is raised if there is an error in the given
        configuration (part), or when a command does not have a required
@@ -153,7 +160,7 @@ def format_addrinfo(addrinfo):
                         "appear to be consisting of (family, socktype, (addr, port))")
 
 def get_soa_serial(soa_rdata):
-    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    '''Extract the serial field of SOA RDATA and return it as a Serial object.
 
     We don't have to be very efficient here, so we first dump the entire RDATA
     as a string and convert the first corresponding field.  This should be
@@ -162,7 +169,7 @@ def get_soa_serial(soa_rdata):
     should be a more direct and convenient way to get access to the SOA
     fields.
     '''
-    return int(soa_rdata.to_text().split()[2])
+    return Serial(int(soa_rdata.to_text().split()[2]))
 
 class XfrinState:
     '''
@@ -181,12 +188,12 @@ class XfrinState:
                              (AXFR or
             (recv SOA)        AXFR-style IXFR)  (SOA, add)
     InitialSOA------->FirstData------------->AXFR--------->AXFREnd
-                          |                  |  ^         (post xfr
-                          |                  |  |        checks, then
-                          |                  +--+        commit)
-                          |            (non SOA, add)
-                          |
-                          |                     (non SOA, delete)
+         |                |                  |  ^         (post xfr
+         |(IXFR &&        |                  |  |        checks, then
+         | recv SOA       |                  +--+        commit)
+         | not new)       |            (non SOA, add)
+         V                |
+    IXFRUptodate          |                     (non SOA, delete)
                (pure IXFR,|                           +-------+
             keep handling)|             (Delete SOA)  V       |
                           + ->IXFRDeleteSOA------>IXFRDelete--+
@@ -300,13 +307,14 @@ class XfrinInitialSOA(XfrinState):
                                      + rr.get_type().to_text() + ' received)')
         conn._end_serial = get_soa_serial(rr.get_rdata()[0])
 
-        # FIXME: we need to check the serial is actually greater than ours.
-        # To do so, however, we need to implement serial number arithmetic.
-        # Although it wouldn't be a big task, we'll leave it for a separate
-        # task for now.  (Always performing xfr could be inefficient, but
-        # shouldn't do any harm otherwise)
+        if conn._request_type == RRType.IXFR() and \
+                conn._end_serial <= conn._request_serial:
+            logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
+                        conn._request_serial, conn._end_serial)
+            self.set_xfrstate(conn, XfrinIXFRUptodate())
+        else:
+            self.set_xfrstate(conn, XfrinFirstData())
 
-        self.set_xfrstate(conn, XfrinFirstData())
         return True
 
 class XfrinFirstData(XfrinState):
@@ -430,6 +438,14 @@ class XfrinIXFREnd(XfrinState):
         '''
         return False
 
+class XfrinIXFRUptodate(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after single IXFR response ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        raise XfrinZoneUptodate
+
 class XfrinAXFR(XfrinState):
     def handle_rr(self, conn, rr):
         """
@@ -473,10 +489,13 @@ class XfrinConnection(asyncore.dispatcher):
 
     def __init__(self,
                  sock_map, zone_name, rrclass, datasrc_client,
-                 shutdown_event, master_addrinfo, tsig_key=None,
+                 shutdown_event, master_addrinfo, db_file, tsig_key=None,
                  idle_timeout=60):
         '''Constructor of the XfirnConnection class.
 
+        db_file: SQLite3 DB file.  Unforutnately we still need this for
+                 temporary workaround in _get_zone_soa().  This should be
+                 removed when we eliminate the need for the workaround.
         idle_timeout: max idle time for read data from socket.
         datasrc_client: the data source client object used for the XFR session.
                         This will eventually replace db_file completely.
@@ -500,7 +519,9 @@ class XfrinConnection(asyncore.dispatcher):
         self._rrclass = rrclass
 
         # Data source handler
+        self._db_file = db_file
         self._datasrc_client = datasrc_client
+        self._zone_soa = self._get_zone_soa()
 
         self._sock_map = sock_map
         self._soa_rr_count = 0
@@ -524,6 +545,55 @@ class XfrinConnection(asyncore.dispatcher):
         self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
         self.setblocking(1)
 
+    def _get_zone_soa(self):
+        '''Retrieve the current SOA RR of the zone to be transferred.
+
+        It will be used for various purposes in subsequent xfr protocol
+        processing.   It is validly possible that the zone is currently
+        empty and therefore doesn't have an SOA, so this method doesn't
+        consider it an error and returns None in such a case.  It may or
+        may not result in failure in the actual processing depending on
+        how the SOA is used.
+
+        When the zone has an SOA RR, this method makes sure that it's
+        valid, i.e., it has exactly one RDATA; if it is not the case
+        this method returns None.
+
+        If the underlying data source doesn't even know the zone, this method
+        tries to provide backward compatible behavior where xfrin is
+        responsible for creating zone in the corresponding DB table.
+        For a longer term we should deprecate this behavior by introducing
+        more generic zone management framework, but at the moment we try
+        to not surprise existing users.  (Note also that the part of
+        providing the compatible behavior uses the old data source API.
+        We'll deprecate this API in a near future, too).
+
+        '''
+        # get the zone finder.  this must be SUCCESS (not even
+        # PARTIALMATCH) because we are specifying the zone origin name.
+        result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            # The data source doesn't know the zone.  For now, we provide
+            # backward compatibility and creates a new one ourselves.
+            isc.datasrc.sqlite3_ds.load(self._db_file,
+                                        self._zone_name.to_text(),
+                                        lambda : [])
+            logger.warn(XFRIN_ZONE_CREATED, self.zone_str())
+            # try again
+            result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return None
+        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+                                        None, ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
+            return None
+        if soa_rrset.get_rdata_count() != 1:
+            logger.warn(XFRIN_ZONE_MULTIPLE_SOA, self.zone_str(),
+                        soa_rrset.get_rdata_count())
+            return None
+        return soa_rrset
+
     def __set_xfrstate(self, new_state):
         self.__state = new_state
 
@@ -545,37 +615,16 @@ class XfrinConnection(asyncore.dispatcher):
                          str(e))
             return False
 
-    def _get_zone_soa(self):
-        result, finder = self._datasrc_client.find_zone(self._zone_name)
-        if result != DataSourceClient.SUCCESS:
-            raise XfrinException('Zone not found in the given data ' +
-                                 'source: ' + self.zone_str())
-        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
-                                        None, ZoneFinder.FIND_DEFAULT)
-        if result != ZoneFinder.SUCCESS:
-            raise XfrinException('SOA RR not found in zone: ' +
-                                 self.zone_str())
-        # Especially for database-based zones, a working zone may be in
-        # a broken state where it has more than one SOA RR.  We proactively
-        # check the condition and abort the xfr attempt if we identify it.
-        if soa_rrset.get_rdata_count() != 1:
-            raise XfrinException('Invalid number of SOA RRs for ' +
-                                 self.zone_str() + ': ' +
-                                 str(soa_rrset.get_rdata_count()))
-        return soa_rrset
-
     def _create_query(self, query_type):
         '''Create an XFR-related query message.
 
-        query_type is either SOA, AXFR or IXFR.  For type IXFR, it searches
-        the associated data source for the current SOA record to include
-        it in the query.  If the corresponding zone or the SOA record
-        cannot be found, it raises an XfrinException exception.  Note that
-        this may not necessarily a broken configuration; for the first attempt
-        of transfer the secondary may not have any boot-strap zone
-        information, in which case IXFR simply won't work.  The xfrin
-        should then fall back to AXFR.  _request_serial is recorded for
-        later use.
+        query_type is either SOA, AXFR or IXFR.  An IXFR query needs the
+        zone's current SOA record.  If it's not known, it raises an
+        XfrinException exception.  Note that this may not necessarily a
+        broken configuration; for the first attempt of transfer the secondary
+        may not have any boot-strap zone information, in which case IXFR
+        simply won't work.  The xfrin should then fall back to AXFR.
+        _request_serial is recorded for later use.
 
         '''
         msg = Message(Message.RENDER)
@@ -585,27 +634,19 @@ class XfrinConnection(asyncore.dispatcher):
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
         msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+
+        # Remember our serial, if known
+        self._request_serial = get_soa_serial(self._zone_soa.get_rdata()[0]) \
+            if self._zone_soa is not None else None
+
+        # Set the authority section with our SOA for IXFR
         if query_type == RRType.IXFR():
-            # get the zone finder.  this must be SUCCESS (not even
-            # PARTIALMATCH) because we are specifying the zone origin name.
-            zone_soa_rr = self._get_zone_soa()
-            msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
-            self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
-        else:
-            # For AXFR, we temporarily provide backward compatible behavior
-            # where xfrin is responsible for creating zone in the corresponding
-            # DB table.  Note that the code below uses the old data source
-            # API and assumes SQLite3 in an ugly manner.  We'll have to
-            # develop a better way of managing zones in a generic way and
-            # eliminate the code like the one here.
-            try:
-                self._get_zone_soa()
-            except XfrinException:
-                def empty_rr_generator():
-                    return []
-                isc.datasrc.sqlite3_ds.load(self._db_file,
-                                            self._zone_name.to_text(),
-                                            empty_rr_generator)
+            if self._zone_soa is None:
+                # (incremental) IXFR doesn't work without known SOA
+                raise XfrinException('Failed to create IXFR query due to no ' +
+                                     'SOA for ' + self.zone_str())
+            msg.add_rrset(Message.SECTION_AUTHORITY, self._zone_soa)
+
         return msg
 
     def _send_data(self, data):
@@ -659,7 +700,8 @@ class XfrinConnection(asyncore.dispatcher):
         if self._tsig_ctx is not None:
             tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
             if tsig_error != TSIGError.NOERROR:
-                raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+                raise XfrinProtocolError('TSIG verify fail: %s' %
+                                         str(tsig_error))
         elif tsig_record is not None:
             # If the response includes a TSIG while we didn't sign the query,
             # we treat it as an error.  RFC doesn't say anything about this
@@ -668,13 +710,78 @@ class XfrinConnection(asyncore.dispatcher):
             # implementation would return such a response, and since this is
             # part of security mechanism, it's probably better to be more
             # strict.
-            raise XfrinException('Unexpected TSIG in response')
+            raise XfrinProtocolError('Unexpected TSIG in response')
+
+    def __parse_soa_response(self, msg, response_data):
+        '''Parse a response to SOA query and extract the SOA from answer.
+
+        This is a subroutine of _check_soa_serial().  This method also
+        validates message, and rejects bogus responses with XfrinProtocolError.
+
+        If everything is okay, it returns the SOA RR from the answer section
+        of the response.
+
+        '''
+        # Check TSIG integrity and validate the header.  Unlike AXFR/IXFR,
+        # we should be more strict for SOA queries and check the AA flag, too.
+        self._check_response_tsig(msg, response_data)
+        self._check_response_header(msg)
+        if not msg.get_header_flag(Message.HEADERFLAG_AA):
+            raise XfrinProtocolError('non-authoritative answer to SOA query')
+
+        # Validate the question section
+        n_question = msg.get_rr_count(Message.SECTION_QUESTION)
+        if n_question != 1:
+            raise XfrinProtocolError('Invalid response to SOA query: ' +
+                                     '(' + str(n_question) + ' questions, 1 ' +
+                                     'expected)')
+        resp_question = msg.get_question()[0]
+        if resp_question.get_name() != self._zone_name or \
+                resp_question.get_class() != self._rrclass or \
+                resp_question.get_type() != RRType.SOA():
+            raise XfrinProtocolError('Invalid response to SOA query: '
+                                     'question mismatch: ' +
+                                     str(resp_question))
+
+        # Look into the answer section for SOA
+        soa = None
+        for rr in msg.get_section(Message.SECTION_ANSWER):
+            if rr.get_type() == RRType.SOA():
+                if soa is not None:
+                    raise XfrinProtocolError('SOA response had multiple SOAs')
+                soa = rr
+            # There should not be a CNAME record at top of zone.
+            if rr.get_type() == RRType.CNAME():
+                raise XfrinProtocolError('SOA query resulted in CNAME')
+
+        # If SOA is not found, try to figure out the reason then report it.
+        if soa is None:
+            # See if we have any SOA records in the authority section.
+            for rr in msg.get_section(Message.SECTION_AUTHORITY):
+                if rr.get_type() == RRType.NS():
+                    raise XfrinProtocolError('SOA query resulted in referral')
+                if rr.get_type() == RRType.SOA():
+                    raise XfrinProtocolError('SOA query resulted in NODATA')
+            raise XfrinProtocolError('No SOA record found in response to ' +
+                                     'SOA query')
+
+        # Check if the SOA is really what we asked for
+        if soa.get_name() != self._zone_name or \
+                soa.get_class() != self._rrclass:
+            raise XfrinProtocolError("SOA response doesn't match query: " +
+                                     str(soa))
+
+        # All okay, return it
+        return soa
+
 
     def _check_soa_serial(self):
-        ''' Compare the soa serial, if soa serial in master is less than
-        the soa serial in local, Finish xfrin.
-        False: soa serial in master is less or equal to the local one.
-        True: soa serial in master is bigger
+        '''Send SOA query and compare the local and remote serials.
+
+        If we know our local serial and the remote serial isn't newer
+        than ours, we abort the session with XfrinZoneUptodate.
+        On success it returns XFRIN_OK for testing.  The caller won't use it.
+
         '''
 
         self._send_query(RRType.SOA())
@@ -682,18 +789,23 @@ class XfrinConnection(asyncore.dispatcher):
         msg_len = socket.htons(struct.unpack('H', data_len)[0])
         soa_response = self._get_request_response(msg_len)
         msg = Message(Message.PARSE)
-        msg.from_wire(soa_response)
+        msg.from_wire(soa_response, Message.PRESERVE_ORDER)
+
+        # Validate/parse the rest of the response, and extract the SOA
+        # from the answer section
+        soa = self.__parse_soa_response(msg, soa_response)
+
+        # Compare the two serials.  If ours is 'new', abort with ZoneUptodate.
+        primary_serial = get_soa_serial(soa.get_rdata()[0])
+        if self._request_serial is not None and \
+                self._request_serial >= primary_serial:
+            if self._request_serial != primary_serial:
+                logger.info(XFRIN_ZONE_SERIAL_AHEAD, primary_serial,
+                            self.zone_str(),
+                            format_addrinfo(self._master_addrinfo),
+                            self._request_serial)
+            raise XfrinZoneUptodate
 
-        # TSIG related checks, including an unexpected signed response
-        self._check_response_tsig(msg, soa_response)
-
-        # perform some minimal level validation.  It's an open issue how
-        # strict we should be (see the comment in _check_response_header())
-        self._check_response_header(msg)
-
-        # TODO, need select soa record from data source then compare the two
-        # serial, current just return OK, since this function hasn't been used
-        # now.
         return XFRIN_OK
 
     def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
@@ -704,22 +816,30 @@ class XfrinConnection(asyncore.dispatcher):
             self._request_type = request_type
             # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
             # to hardcode here.
-            request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+            req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
             if check_soa:
-                ret =  self._check_soa_serial()
-
-            if ret == XFRIN_OK:
-                logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
-                            self.zone_str())
-                self._send_query(self._request_type)
-                self.__state = XfrinInitialSOA()
-                self._handle_xfrin_responses()
-                logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
-                            self.zone_str())
-
-        except (XfrinException, XfrinProtocolError) as e:
-            logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
-                         self.zone_str(), str(e))
+                self._check_soa_serial()
+
+            logger.info(XFRIN_XFR_TRANSFER_STARTED, req_str, self.zone_str())
+            self._send_query(self._request_type)
+            self.__state = XfrinInitialSOA()
+            self._handle_xfrin_responses()
+            logger.info(XFRIN_XFR_TRANSFER_SUCCESS, req_str, self.zone_str())
+
+        except XfrinZoneUptodate:
+            # Eventually we'll probably have to treat this case as a trigger
+            # of trying another primary server, etc, but for now we treat it
+            # as "success".
+            pass
+        except XfrinProtocolError as e:
+            logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+                        self.zone_str(),
+                        format_addrinfo(self._master_addrinfo), str(e))
+            ret = XFRIN_FAIL
+        except XfrinException as e:
+            logger.error(XFRIN_XFR_TRANSFER_FAILURE, req_str,
+                         self.zone_str(),
+                         format_addrinfo(self._master_addrinfo), str(e))
             ret = XFRIN_FAIL
         except Exception as e:
             # Catching all possible exceptions like this is generally not a
@@ -730,7 +850,7 @@ class XfrinConnection(asyncore.dispatcher):
             # catch it here, but until then we need broadest coverage so that
             # we won't miss anything.
 
-            logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+            logger.error(XFRIN_XFR_OTHER_FAILURE, req_str,
                          self.zone_str(), str(e))
             ret = XFRIN_FAIL
         finally:
@@ -754,13 +874,14 @@ class XfrinConnection(asyncore.dispatcher):
 
         msg_rcode = msg.get_rcode()
         if msg_rcode != Rcode.NOERROR():
-            raise XfrinException('error response: %s' % msg_rcode.to_text())
+            raise XfrinProtocolError('error response: %s' %
+                                     msg_rcode.to_text())
 
         if not msg.get_header_flag(Message.HEADERFLAG_QR):
-            raise XfrinException('response is not a response')
+            raise XfrinProtocolError('response is not a response')
 
         if msg.get_qid() != self._query_id:
-            raise XfrinException('bad query id')
+            raise XfrinProtocolError('bad query id')
 
     def _check_response_status(self, msg):
         '''Check validation of xfr response. '''
@@ -768,7 +889,7 @@ class XfrinConnection(asyncore.dispatcher):
         self._check_response_header(msg)
 
         if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
-            raise XfrinException('query section count greater than 1')
+            raise XfrinProtocolError('query section count greater than 1')
 
     def _handle_xfrin_responses(self):
         read_next_msg = True
@@ -808,8 +929,8 @@ class XfrinConnection(asyncore.dispatcher):
         return False
 
 def __process_xfrin(server, zone_name, rrclass, db_file,
-                  shutdown_event, master_addrinfo, check_soa, tsig_key,
-                  request_type, conn_class):
+                    shutdown_event, master_addrinfo, check_soa, tsig_key,
+                    request_type, conn_class):
     conn = None
     exception = None
     ret = XFRIN_FAIL
@@ -840,11 +961,9 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
         while retry:
             retry = False
             conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
-                              shutdown_event, master_addrinfo, tsig_key)
+                              shutdown_event, master_addrinfo, db_file,
+                              tsig_key)
             conn.init_socket()
-            # XXX: We still need _db_file for temporary workaround in _create_query().
-            # This should be removed when we eliminate the need for the workaround.
-            conn._db_file = db_file
             ret = XFRIN_FAIL
             if conn.connect_to_master():
                 ret = conn.do_xfrin(check_soa, request_type)

+ 62 - 7
src/bin/xfrin/xfrin_messages.mes

@@ -15,18 +15,63 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrin messages python module.
 
+% XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created
+On starting an xfrin session, it is identified that the zone to be
+transferred is not found in the data source.  This can happen if a
+secondary DNS server first tries to perform AXFR from a primary server
+without creating the zone image beforehand (e.g. by b10-loadzone).  As
+of this writing the xfrin process provides backward compatible
+behavior to previous versions: creating a new one in the data source
+not to surprise existing users too much.  This is probably not a good
+idea, however, in terms of who should be responsible for managing
+zones at a higher level.  In future it is more likely that a separate
+zone management framework is provided, and the situation where the
+given zone isn't found in xfrout will be treated as an error.
+
+% XFRIN_ZONE_NO_SOA Zone %1 does not have SOA
+On starting an xfrin session, it is identified that the zone to be
+transferred does not have an SOA RR in the data source.  This is not
+necessarily an error; if a secondary DNS server first tries to perform
+transfer from a primary server, the zone can be empty, and therefore
+doesn't have an SOA.  Subsequent AXFR will fill in the zone; if the
+attempt is IXFR it will fail in query creation.
+
+% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
+On starting an xfrin session, it is identified that the zone to be
+transferred has multiple SOA RRs.  Such a zone is broken, but could be
+accidentally configured especially in a data source using "non
+captive" backend database.  The implementation ignores entire SOA RRs
+and tries to continue processing as if the zone were empty.  This
+means subsequent AXFR can succeed and possibly replace the zone with
+valid content, but an IXFR attempt will fail.
+
+% XFRIN_ZONE_SERIAL_AHEAD Serial number (%1) for %2 received from master %3 < ours (%4)
+The response to an SOA query prior to xfr indicated that the zone's
+SOA serial at the primary server is smaller than that of the xfrin
+client.  This is not necessarily an error especially if that
+particular primary server is another secondary server which hasn't got
+the latest version of the zone.  But if the primary server is known to
+be the real source of the zone, some unexpected inconsistency may have
+happened, and you may want to take a closer look.  In this case xfrin
+doesn't perform subsequent zone transfer.
+
 % XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
 The XFR transfer for the given zone has failed due to a problem outside
 of the xfrin module.  Possible reasons are a broken DNS message or failure
 in database connection.  The error is shown in the log message.
 
-% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.  Note: due to the code structure
-this can only happen for AXFR.
-
-% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
-The XFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to a protocol
+error, such as an unexpected response from the primary server.  The
+error is shown in the log message.  It may be because the primary
+server implementation is broken or (although less likely) there was
+some attack attempt, but it can also happen due to configuration
+mismatch such as the remote server does not have authority for the
+zone any more but the local configuration hasn't been updated.  So it
+is recommended to check the primary server configuration.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to an internal error.
 The error is shown in the log message.
 
 % XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
@@ -118,6 +163,16 @@ daemon will now shut down.
 An uncaught exception was raised while running the xfrin daemon. The
 exception message is printed in the log message.
 
+% XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating
+The first SOA record in an IXFR response indicates the zone's serial
+at the primary server is not newer than the client's.  This is
+basically unexpected event because normally the client first checks
+the SOA serial by an SOA query, but can still happen if the transfer
+is manually invoked or (although unlikely) there is a rapid change at
+the primary server between the SOA and IXFR queries.  The client
+implementation confirms the whole response is this single SOA, and
+aborts the transfer just like a successful case.
+
 % XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
 In an attempt of IXFR processing, the begenning SOA of the first difference
 (following the initial SOA that specified the final SOA for all the

+ 13 - 0
src/bin/xfrout/b10-xfrout.8

@@ -71,6 +71,19 @@ The configurable settings are:
 defines the maximum number of outgoing zone transfers that can run concurrently\&. The default is 10\&.
 .PP
 
+\fItsig_key_ring\fR
+A list of TSIG keys (each of which is in the form of name:base64\-key[:algorithm]) used for access control on transfer requests\&. The default is an empty list\&.
+.PP
+
+\fItransfer_acl\fR
+A list of ACL elements that apply to all transfer requests by default (unless overridden in zone_config)\&. See the BIND 10 guide for configuration examples\&. The default is an element that allows any transfer requests\&.
+.PP
+
+\fIzone_config\fR
+A list of JSON objects (i\&.e\&. maps) that define per zone configuration concerning
+\fBb10\-xfrout\fR\&. The supported names of each object are "origin" (the origin name of the zone), "class" (the RR class of the zone, optional, default to "IN"), and "acl_element" (ACL only applicable to transfer requests for that zone)\&. See the BIND 10 guide for configuration examples\&. The default is an empty list, that is, no zone specific configuration\&.
+.PP
+
 \fIlog_name\fR
 .PP
 

+ 11 - 11
src/bin/xfrout/b10-xfrout.xml

@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>December 1, 2010</date>
+    <date>December 15, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -52,7 +52,7 @@
   <refsect1>
     <title>DESCRIPTION</title>
     <para>The <command>b10-xfrout</command> daemon provides the BIND 10
-      outgoing DNS zone transfer service.
+      outgoing DNS zone transfer service using AXFR or IXFR.
       It is also used to send outgoing NOTIFY messages.
       Normally it is started by the
       <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
@@ -67,13 +67,13 @@
  process?, and then the socket and xfr request is sent to xfrout.
 -->
 
+<!-- TODO: IXFR from differences, DDNS, UDP socket passing -->
     <note><simpara>
-      This development prototype release only supports AXFR.
-      IXFR is not implemented.
+      Currently IXFR only works if it gets the zone via
+      <command>b10-xfrin</command> and only on TCP.
     </simpara></note>
 
     <para>
-<!-- TODO: does it really use msgq? what for? -->
       This daemon communicates with BIND 10 over a
       <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
       C-Channel connection.  If this connection is not established,
@@ -100,15 +100,15 @@
     <para>
       <varname>tsig_key_ring</varname>
       A list of TSIG keys (each of which is in the form of
-      name:base64-key[:algorithm]) used for access control on transfer
-      requests.
+      <replaceable>name:base64-key[:algorithm]</replaceable>)
+      used for access control on transfer requests.
       The default is an empty list.
     </para>
     <para>
       <varname>transfer_acl</varname>
       A list of ACL elements that apply to all transfer requests by
-      default (unless overridden in zone_config).  See the BIND 10
-      guide for configuration examples.
+      default (unless overridden in <varname>zone_config</varname>).
+      See the <citetitle>BIND 10 Guide</citetitle> for configuration examples.
       The default is an element that allows any transfer requests.
     </para>
     <para>
@@ -117,9 +117,9 @@
       configuration concerning <command>b10-xfrout</command>.
       The supported names of each object are "origin" (the origin
       name of the zone), "class" (the RR class of the zone, optional,
-      default to "IN"), and "acl_element" (ACL only applicable to
+      default to "IN"), and "transfer_acl" (ACL only applicable to
       transfer requests for that zone).
-      See the BIND 10 guide for configuration examples.
+      See the <citetitle>BIND 10 Guide</citetitle> for configuration examples.
       The default is an empty list, that is, no zone specific configuration.
     </para>
     <para>

+ 170 - 14
src/bin/xfrout/tests/xfrout_test.py.in

@@ -67,10 +67,12 @@ class MySocket():
         self.sendqueue = self.sendqueue[size:]
         return result
 
-    def read_msg(self, parse_options=Message.PARSE_DEFAULT):
+    def read_msg(self, parse_options=Message.PARSE_DEFAULT, need_len=False):
         sent_data = self.readsent()
         get_msg = Message(Message.PARSE)
         get_msg.from_wire(bytes(sent_data[2:]), parse_options)
+        if need_len:
+            return (get_msg, len(sent_data) - 2)
         return get_msg
 
     def clear_send(self):
@@ -93,7 +95,7 @@ class MockDataSrcClient:
             return (isc.datasrc.DataSourceClient.NOTFOUND, None)
         return (isc.datasrc.DataSourceClient.SUCCESS, self)
 
-    def find(self, name, rrtype, target, options):
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
         '''Mock ZoneFinder.find().
 
         (At the moment) this method only handles query for type SOA.
@@ -863,7 +865,150 @@ class TestXfroutSession(TestXfroutSessionBase):
 
         self.assertEqual(len(expected_records), len(actual_records))
         for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_reply_xfrout_query_axfr_maxlen(self):
+        # The test RR(set) has the length of 65535 - 12 (size of hdr) bytes:
+        # owner name = 1 (root), fixed fields (type,class,TTL,RDLEN) = 10
+        # RDATA = 65512 (= 65535 - 12 - 1 - 10)
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65512)
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        # The first message should contain the beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The second message should contain the beginning SOA, and only that RR
+        # The wire format data should have the possible maximum size.
+        r, rlen = self.sock.read_msg(need_len=True)
+        self.assertEqual(65535, rlen)
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(test_rr,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The third message should contain the ending SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def maxlen_test_common_setup(self, tsig=False):
+        '''Common initialization for some of the tests below
+
+        For those tests we use '.' for all owner names and names in RDATA
+        to avoid having unexpected results due to compression.  It returns
+        the created SOA for convenience.
+
+        If tsig is True, also setup TSIG (mock) context.  In our test cases
+        the size of the TSIG RR is 81 bytes (key name = example.com,
+        algorithm = hmac-md5)
+
+        '''
+        soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+        self.mdata = self.create_request_data(zone_name=Name('.'))
+        self.xfrsess._soa = soa
+        if tsig:
+            self.xfrsess._tsig_ctx = \
+                self.create_mock_tsig_ctx(TSIGError.NOERROR)
+            self.xfrsess._tsig_len = 81
+        return soa
+
+    def maxlen_test_common_checks(self, soa_rr, test_rr, expected_n_rr):
+        '''A set of common assertion checks for some tests below.
+
+        In all cases two AXFR response messages should have been created.
+        expected_n_rr is a list of two elements, each specifies the expected
+        number of answer RRs for each message: expected_n_rr[0] is the expected
+        number of the first answer RRs; expected_n_rr[1] is the expected number
+        of the second answer RRs.  The message that contains two RRs should
+        have the maximum possible wire length (65535 bytes).  And, in all
+        cases, the resulting RRs should be in the order of SOA, another RR,
+        SOA.
+
+        '''
+        # Check the first message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[0] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[0],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs = r.get_section(Message.SECTION_ANSWER)[:]
+
+        # Check the second message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[1] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[1],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs.extend(r.get_section(Message.SECTION_ANSWER))
+        for (expected_rr, actual_rr) in zip([soa_rr, test_rr, soa_rr],
+                                            actual_rrs):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa(self):
+        # Similar to the 'maxlen' test, but the first message should be
+        # able to contain both SOA and the large RR.
+        soa = self.maxlen_test_common_setup()
+
+        # The first message will contain the question (5 bytes), so the
+        # test RDATA should allow a room for that.
+        test_rr = create_generic(Name('.'), 65512 - 5 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa_with_tsig(self):
+        # Similar to the previous case, but with TSIG (whose size is 81 bytes).
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 5 - 81 -
+                                 get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa(self):
+        # Similar to the max w/ soa test, but the first message cannot contain
+        # both SOA and the long RR due to the question section.  The second
+        # message should be able to contain both.
+        soa = self.maxlen_test_common_setup()
+        test_rr = create_generic(Name('.'), 65512 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa_with_tsig(self):
+        # Similar to the previous case, but with TSIG.
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 81 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_toobigdata(self):
+        # Similar to the 'maxlen' test, but the RR doesn't even fit in a
+        # single message.
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65513) # 1 byte larger than 'max'
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        # the reply method should fail with exception
+        self.assertRaises(XfroutSessionError, self.xfrsess._reply_xfrout_query,
+                          self.getmsg(), self.sock)
+        # The first message should still have been sent and contain the
+        # beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # And there should have been no other messages sent
+        self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_reply_xfrout_query_ixfr_soa_only(self):
         # Creating an IXFR response that contains only one RR, which is the
@@ -875,7 +1020,8 @@ class TestXfroutSession(TestXfroutSessionBase):
         reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
         answer = reply_msg.get_section(Message.SECTION_ANSWER)
         self.assertEqual(1, len(answer))
-        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answer[0])
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answer[0]))
 
 class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
     '''Tests for XFR-out sessions using an SQLite3 DB.
@@ -899,14 +1045,23 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         # This zone contains two A RRs for the same name with different TTLs.
         # These TTLs should be preseved in the AXFR stream.
         actual_records = response.get_section(Message.SECTION_ANSWER)
-        expected_records = [create_soa(2011112001),
-                            create_ns(self.ns_name),
-                            create_a(Name(self.ns_name), '192.0.2.1', 3600),
-                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
-                            create_soa(2011112001)]
-        self.assertEqual(len(expected_records), len(actual_records))
-        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+        self.assertEqual(5, len(actual_records))
+        # The first and last RR should be the expected SOA
+        expected_soa = create_soa(2011112001)
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[0]))
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[-1]))
+
+        # The ordering of the intermediate RRs can differ depending on the
+        # internal details of the SQLite3 library, so we sort them by a simple
+        # rule sufficient for the purpose here, and then compare them.
+        expected_others = [create_ns(self.ns_name),
+                           create_a(Name(self.ns_name), '192.0.2.1', 3600),
+                           create_a(Name(self.ns_name), '192.0.2.2', 7200)]
+        keyfn = lambda x: (x.get_type(), x.get_ttl())
+        for (expected_rr, actual_rr) in zip(sorted(expected_others, key=keyfn),
+                                            sorted(actual_records[1:4],
+                                                   key=keyfn)):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
 
     def test_axfr_normal_session(self):
         XfroutSession._handle(self.xfrsess)
@@ -945,7 +1100,7 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
                             create_soa(2011112001)]
         self.assertEqual(len(expected_records), len(actual_records))
         for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
 
     def test_ixfr_soa_only(self):
         # The requested SOA serial is the latest one.  The response should
@@ -956,7 +1111,8 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         response = self.sock.read_msg(Message.PRESERVE_ORDER);
         answers = response.get_section(Message.SECTION_ANSWER)
         self.assertEqual(1, len(answers))
-        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answers[0])
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answers[0]))
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):

+ 44 - 14
src/bin/xfrout/xfrout.py.in

@@ -66,6 +66,11 @@ class XfroutConfigError(Exception):
     """
     pass
 
+class XfroutSessionError(Exception):
+    '''An exception raised for some unexpected events during an xfrout session.
+    '''
+    pass
+
 def init_paths():
     global SPECFILE_PATH
     global AUTH_SPECFILE_PATH
@@ -93,7 +98,8 @@ init_paths()
 SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
 AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
 VERBOSE_MODE = False
-XFROUT_MAX_MESSAGE_SIZE = 65535
+XFROUT_DNS_HEADER_SIZE = 12     # protocol constant
+XFROUT_MAX_MESSAGE_SIZE = 65535 # ditto
 
 # borrowed from xfrin.py @ #1298.  We should eventually unify it.
 def format_zone_str(zone_name, zone_class):
@@ -534,32 +540,44 @@ class XfroutSession():
 
     def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
                                     message_upper_len):
-        '''Add the SOA record to the end of message. If it can't be
-        added, a new message should be created to send out the last soa .
+        '''Add the SOA record to the end of message.
+
+        If it would exceed the maximum allowable size of a message, a new
+        message will be created to send out the last SOA.
+
+        We assume a message with a single SOA can always fit the buffer
+        with or without TSIG.  In theory this could be wrong if TSIG is
+        stupidly large, but in practice this assumption should be reasonable.
         '''
-        if (message_upper_len + self._tsig_len + get_rrset_len(rrset_soa) >=
-            XFROUT_MAX_MESSAGE_SIZE):
+        if message_upper_len + get_rrset_len(rrset_soa) > \
+                XFROUT_MAX_MESSAGE_SIZE:
             self._send_message(sock_fd, msg, self._tsig_ctx)
             msg = self._clear_message(msg)
 
-        # If tsig context exist, sign the last packet
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
     def _reply_xfrout_query(self, msg, sock_fd):
-        #TODO, there should be a better way to insert rrset.
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
+        # Reserved space for the fixed header size, the size of the question
+        # section, and TSIG size (when included).  The size of the question
+        # section is the sum of the qname length and the size of the
+        # fixed-length fields (type and class, 2 bytes each).
+        message_upper_len = XFROUT_DNS_HEADER_SIZE + \
+            msg.get_question()[0].get_name().get_length() + 4 + \
+            self._tsig_len
 
         # If the iterator is None, we are responding to IXFR with a single
         # SOA RR.
         if self._iterator is None:
-            self._send_message_with_last_soa(msg, sock_fd, self._soa, 0)
+            self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                             message_upper_len)
             return
 
         # Add the beginning SOA
         msg.add_rrset(Message.SECTION_ANSWER, self._soa)
-        message_upper_len = get_rrset_len(self._soa) + self._tsig_len
+        message_upper_len += get_rrset_len(self._soa)
 
         # Add the rest of the zone/diff contets
         for rrset in self._iterator:
@@ -577,20 +595,33 @@ class XfroutSession():
             # size without compression) and use that to see if we
             # may have reached the limit
             rrset_len = get_rrset_len(rrset)
-            if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
+
+            if message_upper_len + rrset_len <= XFROUT_MAX_MESSAGE_SIZE:
                 msg.add_rrset(Message.SECTION_ANSWER, rrset)
                 message_upper_len += rrset_len
                 continue
 
+            # RR would not fit.  If there are other RRs in the buffer, send
+            # them now and leave this RR to the next message.
             self._send_message(sock_fd, msg, self._tsig_ctx)
 
+            # Create a new message and reserve space for the carried-over
+            # RR (and TSIG space in case it's to be TSIG signed)
             msg = self._clear_message(msg)
+            message_upper_len = XFROUT_DNS_HEADER_SIZE + rrset_len + \
+                self._tsig_len
+
+            # If this RR overflows the buffer all by itself, fail.  In theory
+            # some RRs might fit in a TCP message when compressed even if they
+            # do not fit when uncompressed, but surely we don't want to send
+            # such monstrosities to an unsuspecting slave.
+            if message_upper_len > XFROUT_MAX_MESSAGE_SIZE:
+                raise XfroutSessionError('RR too large for zone transfer (' +
+                                         str(rrset_len) + ' bytes)')
+
             # Add the RRset to the new message
             msg.add_rrset(Message.SECTION_ANSWER, rrset)
 
-            # Reserve tsig space for signed packet
-            message_upper_len = rrset_len + self._tsig_len
-
         # Add and send the trailing SOA
         self._send_message_with_last_soa(msg, sock_fd, self._soa,
                                          message_upper_len)
@@ -782,7 +813,6 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             os.unlink(self._sock_file)
         except Exception as e:
             logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
-            pass
 
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module.

+ 10 - 16
src/bin/zonemgr/b10-zonemgr.xml

@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>May 19, 2011</date>
+    <date>December 8, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -107,15 +107,20 @@
 
     <para>
       <varname>refresh_jitter</varname>
+      is used to provide a time range for randomizing the refresh
+      and retry timers to help avoid many zones needing to do a refresh
+      or retry at the same time.
       This value is a real number.
-      The maximum amount is 0.5.
-      The default is 0.25.
+      The maximum amount is 0.5 (the new timer will be within
+      half the original time).
+      The default is 0.25 (up to a quarter sooner).
+      Set to 0 to disable this jitter.
     </para>
-<!-- TODO: needs to be documented -->
-<!-- TODO:      Set to 0 to disable the jitter.   -->
 
     <para>
       <varname>reload_jitter</varname>
+<!--      is used to provide a slight random variation -->
+<!-- TODO: ask what the purpose of this is and why 0.75. -->
       This value is a real number.
       The default is 0.75.
     </para>
@@ -224,14 +229,6 @@
 
   </refsect1>
 -->
-<!--
-  <refsect1>
-    <title>FILES</title>
-    <para>
-    <filename>/tmp/auth_xfrout_conn</filename>
-    </para>
-  </refsect1>
--->
 
   <refsect1>
     <title>SEE ALSO</title>
@@ -249,9 +246,6 @@
         <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
       </citerefentry>,
       <citerefentry>
-        <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
-      </citerefentry>,
-      <citerefentry>
         <refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum>
       </citerefentry>,
       <citetitle>BIND 10 Guide</citetitle>.

+ 1 - 0
src/bin/zonemgr/tests/Makefile.am

@@ -20,6 +20,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done

+ 82 - 54
src/bin/zonemgr/tests/zonemgr_test.py

@@ -48,28 +48,16 @@ class MySession():
     def group_recvmsg(self, nonblock, seq):
         return None, None
 
-class FakeConfig:
+class FakeCCSession(isc.config.ConfigData):
     def __init__(self):
-        self.zone_list = []
-        self.set_zone_list_from_name_classes([ZONE_NAME_CLASS1_IN,
-                                              ZONE_NAME_CLASS2_CH])
-    def set_zone_list_from_name_classes(self, zones):
-        self.zone_list = map(lambda nc: {"name": nc[0], "class": nc[1]}, zones)
-    def get(self, name):
-        if name == 'lowerbound_refresh':
-            return LOWERBOUND_REFRESH
-        elif name == 'lowerbound_retry':
-            return LOWERBOUND_RETRY
-        elif name == 'max_transfer_timeout':
-            return MAX_TRANSFER_TIMEOUT
-        elif name == 'refresh_jitter':
-            return REFRESH_JITTER
-        elif name == 'reload_jitter':
-            return RELOAD_JITTER
-        elif name == 'secondary_zones':
-            return self.zone_list
+        module_spec = isc.config.module_spec_from_file(SPECFILE_LOCATION)
+        ConfigData.__init__(self, module_spec)
+
+    def get_remote_config_value(self, module_name, identifier):
+        if module_name == "Auth" and identifier == "database_file":
+            return "initdb.file", False
         else:
-            raise ValueError('Uknown config option')
+            return "unknown", False
 
 class MyZonemgrRefresh(ZonemgrRefresh):
     def __init__(self):
@@ -92,7 +80,7 @@ class MyZonemgrRefresh(ZonemgrRefresh):
         sqlite3_ds.get_zone_soa = get_zone_soa
 
         ZonemgrRefresh.__init__(self, MySession(), "initdb.file",
-            self._slave_socket, FakeConfig())
+                                self._slave_socket, FakeCCSession())
         current_time = time.time()
         self._zonemgr_refresh_info = {
          ('example.net.', 'IN'): {
@@ -112,6 +100,7 @@ class TestZonemgrRefresh(unittest.TestCase):
         self.stderr_backup = sys.stderr
         sys.stderr = open(os.devnull, 'w')
         self.zone_refresh = MyZonemgrRefresh()
+        self.cc_session = FakeCCSession()
 
     def test_random_jitter(self):
         max = 100025.120
@@ -458,7 +447,23 @@ class TestZonemgrRefresh(unittest.TestCase):
                     "secondary_zones": [ { "name": "example.net.",
                                            "class": "IN" } ]
                 }
-        self.zone_refresh.update_config_data(config_data)
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
+        self.assertTrue(("example.net.", "IN") in
+                        self.zone_refresh._zonemgr_refresh_info)
+
+        # make sure it does fail if we don't provide a name
+        config_data = {
+                    "secondary_zones": [ { "class": "IN" } ]
+                }
+        self.assertRaises(ZonemgrException,
+                          self.zone_refresh.update_config_data,
+                          config_data, self.cc_session)
+
+        # But not if we don't provide a class
+        config_data = {
+                    "secondary_zones": [ { "name": "example.net." } ]
+                }
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
         self.assertTrue(("example.net.", "IN") in
                         self.zone_refresh._zonemgr_refresh_info)
 
@@ -471,7 +476,7 @@ class TestZonemgrRefresh(unittest.TestCase):
                     "reload_jitter" : 0.75,
                     "secondary_zones": []
                 }
-        self.zone_refresh.update_config_data(config_data)
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
         self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
         self.assertEqual(30, self.zone_refresh._lowerbound_retry)
         self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
@@ -482,7 +487,7 @@ class TestZonemgrRefresh(unittest.TestCase):
         config_data = {
                     "reload_jitter" : 0.35,
                 }
-        self.zone_refresh.update_config_data(config_data)
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
         self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
         self.assertEqual(30, self.zone_refresh._lowerbound_retry)
         self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
@@ -500,7 +505,7 @@ class TestZonemgrRefresh(unittest.TestCase):
                     "secondary_zones": [ { "name": "doesnotexist",
                                            "class": "IN" } ]
                 }
-        self.zone_refresh.update_config_data(config_data)
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
         name_class = ("doesnotexist.", "IN")
         self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
                         is None)
@@ -520,7 +525,7 @@ class TestZonemgrRefresh(unittest.TestCase):
                     "reload_jitter" : 0.75,
                     "secondary_zones": []
                 }
-        self.zone_refresh.update_config_data(config_data)
+        self.zone_refresh.update_config_data(config_data, self.cc_session)
         self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
         self.assertEqual(30, self.zone_refresh._lowerbound_retry)
         self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
@@ -536,45 +541,67 @@ class TestZonemgrRefresh(unittest.TestCase):
         self.assertFalse(listener.is_alive())
 
     def test_secondary_zones(self):
+        def zone_list_from_name_classes(zones):
+            return map(lambda nc: {"name": nc[0], "class": nc[1]}, zones)
+
         """Test that we can modify the list of secondary zones"""
-        config = FakeConfig()
-        config.zone_list = []
+        config = self.cc_session.get_full_config()
+        config['secondary_zones'] = []
         # First, remove everything
-        self.zone_refresh.update_config_data(config)
+        self.zone_refresh.update_config_data(config, self.cc_session)
         self.assertEqual(self.zone_refresh._zonemgr_refresh_info, {})
         # Put something in
-        config.set_zone_list_from_name_classes([ZONE_NAME_CLASS1_IN])
-        self.zone_refresh.update_config_data(config)
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([ZONE_NAME_CLASS1_IN])
+        self.zone_refresh.update_config_data(config, self.cc_session)
         self.assertTrue(("example.net.", "IN") in
                         self.zone_refresh._zonemgr_refresh_info)
-        # This one does not exist
-        config.set_zone_list_from_name_classes(["example.net", "CH"])
-        self.zone_refresh.update_config_data(config)
-        self.assertFalse(("example.net.", "CH") in
-                        self.zone_refresh._zonemgr_refresh_info)
-        # Simply skip loading soa for the zone, the other configs should be updated successful
+        # Reset the data, set to use a different class, and make sure
+        # it does not get set to IN
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([ZONE_NAME_CLASS1_CH])
+        self.zone_refresh.update_config_data(config, self.cc_session)
         self.assertFalse(("example.net.", "IN") in
-                        self.zone_refresh._zonemgr_refresh_info)
+                         self.zone_refresh._zonemgr_refresh_info)
         # Make sure it works even when we "accidentally" forget the final dot
-        config.set_zone_list_from_name_classes([("example.net", "IN")])
-        self.zone_refresh.update_config_data(config)
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("example.net", "IN")])
+        self.zone_refresh.update_config_data(config, self.cc_session)
         self.assertTrue(("example.net.", "IN") in
                         self.zone_refresh._zonemgr_refresh_info)
 
-    def tearDown(self):
-        sys.stderr= self.stderr_backup
-
+        # and with case-insensitive checking
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("Example.NeT.", "in")])
+        self.zone_refresh.update_config_data(config, self.cc_session)
+        self.assertTrue(("example.net.", "IN") in
+                        self.zone_refresh._zonemgr_refresh_info)
 
-class MyCCSession():
-    def __init__(self):
-        pass
-
-    def get_remote_config_value(self, module_name, identifier):
-        if module_name == "Auth" and identifier == "database_file":
-            return "initdb.file", False
-        else:
-            return "unknown", False
+        # Try some bad names
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("example..net", "IN")])
+        self.assertRaises(ZonemgrException,
+                          self.zone_refresh.update_config_data,
+                          config, self.cc_session)
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("", "IN")])
+        self.assertRaises(ZonemgrException,
+                          self.zone_refresh.update_config_data,
+                          config, self.cc_session)
+        # Try a bad class
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("example.net", "BADCLASS")])
+        self.assertRaises(ZonemgrException,
+                          self.zone_refresh.update_config_data,
+                          config, self.cc_session)
+        config['secondary_zones'] = \
+            zone_list_from_name_classes([("example.net", "")])
+        self.assertRaises(ZonemgrException,
+                          self.zone_refresh.update_config_data,
+                          config, self.cc_session)
 
+    def tearDown(self):
+        sys.stderr= self.stderr_backup
 
 class MyZonemgr(Zonemgr):
 
@@ -583,7 +610,7 @@ class MyZonemgr(Zonemgr):
         self._zone_refresh = None
         self._shutdown_event = threading.Event()
         self._cc = MySession()
-        self._module_cc = MyCCSession()
+        self._module_cc = FakeCCSession()
         self._config_data = {
                     "lowerbound_refresh" : 10,
                     "lowerbound_retry" : 5,
@@ -622,7 +649,7 @@ class TestZonemgr(unittest.TestCase):
         self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
         # The zone doesn't exist in database, simply skip loading soa for it and log an warning
         self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
-                                                    config_data1)
+                                                    FakeCCSession())
         config_data1["secondary_zones"] = [{"name": "nonexistent.example",
                                             "class": "IN"}]
         self.assertEqual(self.zonemgr.config_handler(config_data1),
@@ -660,4 +687,5 @@ class TestZonemgr(unittest.TestCase):
         pass
 
 if __name__== "__main__":
+    isc.log.resetUnitTestRootLogger()
     unittest.main()

+ 42 - 9
src/bin/zonemgr/zonemgr.py.in

@@ -28,6 +28,7 @@ import os
 import time
 import signal
 import isc
+import isc.dns
 import random
 import threading
 import select
@@ -98,7 +99,7 @@ class ZonemgrRefresh:
     can be stopped by calling shutdown() in another thread.
     """
 
-    def __init__(self, cc, db_file, slave_socket, config_data):
+    def __init__(self, cc, db_file, slave_socket, module_cc_session):
         self._cc = cc
         self._check_sock = slave_socket
         self._db_file = db_file
@@ -108,7 +109,8 @@ class ZonemgrRefresh:
         self._max_transfer_timeout = None
         self._refresh_jitter = None
         self._reload_jitter = None
-        self.update_config_data(config_data)
+        self.update_config_data(module_cc_session.get_full_config(),
+                                module_cc_session)
         self._running = False
 
     def _random_jitter(self, max, jitter):
@@ -424,7 +426,7 @@ class ZonemgrRefresh:
         self._read_sock = None
         self._write_sock = None
 
-    def update_config_data(self, new_config):
+    def update_config_data(self, new_config, module_cc_session):
         """ update ZonemgrRefresh config """
         # Get a new value, but only if it is defined (commonly used below)
         # We don't use "value or default", because if value would be
@@ -456,11 +458,42 @@ class ZonemgrRefresh:
             if secondary_zones is not None:
                 # Add new zones
                 for secondary_zone in new_config.get('secondary_zones'):
+                    if 'name' not in secondary_zone:
+                        raise ZonemgrException("Secondary zone specified "
+                                               "without a name")
                     name = secondary_zone['name']
-                    # Be tolerant to sclerotic users who forget the final dot
-                    if name[-1] != '.':
-                        name = name + '.'
-                    name_class = (name, secondary_zone['class'])
+
+                    # Convert to Name and back (both to check and to normalize)
+                    try:
+                        name = isc.dns.Name(name, True).to_text()
+                    # Name() can raise a number of different exceptions, just
+                    # catch 'em all.
+                    except Exception as isce:
+                        raise ZonemgrException("Bad zone name '" + name +
+                                               "': " + str(isce))
+
+                    # Currently we use an explicit get_default_value call
+                    # in case the class hasn't been set. Alternatively, we
+                    # could use
+                    # module_cc_session.get_value('secondary_zones[INDEX]/class')
+                    # To get either the value that was set, or the default if
+                    # it wasn't set.
+                    # But the real solution would be to make new_config a type
+                    # that contains default values itself
+                    # (then this entire method can be simplified a lot, and we
+                    # wouldn't need direct access to the ccsession object)
+                    if 'class' in secondary_zone:
+                        rr_class = secondary_zone['class']
+                    else:
+                        rr_class = module_cc_session.get_default_value(
+                                        'secondary_zones/class')
+                    # Convert rr_class to and from RRClass to check its value
+                    try:
+                        name_class = (name, isc.dns.RRClass(rr_class).to_text())
+                    except isc.dns.InvalidRRClass:
+                        raise ZonemgrException("Bad RR class '" +
+                                               rr_class +
+                                               "' for zone " + name)
                     required[name_class] = True
                     # Add it only if it isn't there already
                     if not name_class in self._zonemgr_refresh_info:
@@ -485,7 +518,7 @@ class Zonemgr:
         self._db_file = self.get_db_file()
         # Create socket pair for communicating between main thread and zonemgr timer thread
         self._master_socket, self._slave_socket = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
-        self._zone_refresh = ZonemgrRefresh(self._cc, self._db_file, self._slave_socket, self._config_data)
+        self._zone_refresh = ZonemgrRefresh(self._cc, self._db_file, self._slave_socket, self._module_cc)
         self._zone_refresh.run_timer()
 
         self._lock = threading.Lock()
@@ -540,7 +573,7 @@ class Zonemgr:
         self._config_data_check(complete)
         if self._zone_refresh is not None:
             try:
-                self._zone_refresh.update_config_data(complete)
+                self._zone_refresh.update_config_data(complete, self._module_cc)
             except Exception as e:
                 answer = create_answer(1, str(e))
                 ok = False

+ 1 - 1
src/lib/Makefile.am

@@ -1,3 +1,3 @@
 SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
           asiolink asiodns nsas cache resolve testutils datasrc \
-          server_common python dhcp
+          server_common python dhcp statistics

+ 9 - 9
src/lib/acl/dns.cc

@@ -32,7 +32,6 @@
 #include <acl/logic_check.h>
 
 using namespace std;
-using boost::shared_ptr;
 using namespace isc::dns;
 using namespace isc::data;
 
@@ -78,7 +77,7 @@ internal::RequestCheckCreator::names() const {
     return (supported_names);
 }
 
-shared_ptr<RequestCheck>
+boost::shared_ptr<RequestCheck>
 internal::RequestCheckCreator::create(const string& name,
                                       ConstElementPtr definition,
                                       // unused:
@@ -90,10 +89,10 @@ internal::RequestCheckCreator::create(const string& name,
     }
 
     if (name == "from") {
-        return (shared_ptr<internal::RequestIPCheck>(
+        return (boost::shared_ptr<internal::RequestIPCheck>(
                     new internal::RequestIPCheck(definition->stringValue())));
     } else if (name == "key") {
-        return (shared_ptr<internal::RequestKeyCheck>(
+        return (boost::shared_ptr<internal::RequestKeyCheck>(
                     new internal::RequestKeyCheck(
                         Name(definition->stringValue()))));
     } else {
@@ -116,16 +115,17 @@ getRequestLoader() {
             auto_ptr<RequestLoader>(new RequestLoader(REJECT));
 
         // Register default check creator(s)
-        loader_ptr->registerCreator(shared_ptr<internal::RequestCheckCreator>(
-                                        new internal::RequestCheckCreator()));
         loader_ptr->registerCreator(
-            shared_ptr<NotCreator<RequestContext> >(
+            boost::shared_ptr<internal::RequestCheckCreator>(
+                new internal::RequestCheckCreator()));
+        loader_ptr->registerCreator(
+            boost::shared_ptr<NotCreator<RequestContext> >(
                 new NotCreator<RequestContext>("NOT")));
         loader_ptr->registerCreator(
-            shared_ptr<LogicCreator<AnyOfSpec, RequestContext> >(
+            boost::shared_ptr<LogicCreator<AnyOfSpec, RequestContext> >(
                 new LogicCreator<AnyOfSpec, RequestContext>("ANY")));
         loader_ptr->registerCreator(
-            shared_ptr<LogicCreator<AllOfSpec, RequestContext> >(
+            boost::shared_ptr<LogicCreator<AllOfSpec, RequestContext> >(
                 new LogicCreator<AllOfSpec, RequestContext>("ALL")));
 
         // From this point there shouldn't be any exception thrown

+ 2 - 3
src/lib/acl/tests/acl_test.cc

@@ -18,7 +18,6 @@
 
 using namespace isc::acl;
 using namespace isc::acl::tests;
-using boost::shared_ptr;
 
 namespace {
 
@@ -46,8 +45,8 @@ public:
     Log log_;
     size_t next_check_;
     boost::shared_ptr<Check<Log> > getCheck(bool accepts) {
-        return (shared_ptr<Check<Log> >(new ConstCheck(accepts,
-                                                       next_check_++)));
+        return (boost::shared_ptr<Check<Log> >(new ConstCheck(accepts,
+                                                              next_check_++)));
     }
 };
 

+ 4 - 4
src/lib/acl/tests/ip_check_unittest.cc

@@ -162,7 +162,7 @@ TEST(IPFunctionCheck, SplitIPAddress) {
 
 TEST(IPAddress, constructIPv4) {
     IPAddress ipaddr(tests::getSockAddr("192.0.2.1"));
-    const char expected_data[4] = { 192, 0, 2, 1 };
+    const uint8_t expected_data[4] = { 192, 0, 2, 1 };
     EXPECT_EQ(AF_INET, ipaddr.getFamily());
     EXPECT_EQ(4, ipaddr.getLength());
     EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 4));
@@ -170,9 +170,9 @@ TEST(IPAddress, constructIPv4) {
 
 TEST(IPAddress, constructIPv6) {
     IPAddress ipaddr(tests::getSockAddr("2001:db8:1234:abcd::53"));
-    const char expected_data[16] = { 0x20, 0x01, 0x0d, 0xb8, 0x12, 0x34, 0xab,
-                                     0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-                                     0x00, 0x53 };
+    const uint8_t expected_data[16] = { 0x20, 0x01, 0x0d, 0xb8, 0x12, 0x34, 0xab,
+                                        0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                                        0x00, 0x53 };
     EXPECT_EQ(AF_INET6, ipaddr.getFamily());
     EXPECT_EQ(16, ipaddr.getLength());
     EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 16));

+ 26 - 25
src/lib/acl/tests/loader_test.cc

@@ -70,31 +70,30 @@ public:
     // Some convenience functions to set up
 
     // Create a NamedCreator, convert to shared pointer
-    shared_ptr<NamedCreator> namedCreator(const string& name,
+    boost::shared_ptr<NamedCreator> namedCreator(const string& name,
                                           bool abbreviatedList = true)
     {
-        return (shared_ptr<NamedCreator>(new NamedCreator(name,
-                                                          abbreviatedList)));
+        return (boost::shared_ptr<NamedCreator>(new NamedCreator(name,
+                                                    abbreviatedList)));
     }
     // Create and add a NamedCreator
     void addNamed(const string& name, bool abbreviatedList = true) {
         EXPECT_NO_THROW(loader_.registerCreator(
             namedCreator(name, abbreviatedList)));
     }
-    template<class Result> shared_ptr<Result> loadCheckAny(const string&
-                                                               definition)
+    template<class Result> boost::shared_ptr<Result> loadCheckAny(
+        const string& definition)
     {
         SCOPED_TRACE("Loading check " + definition);
-        shared_ptr<Check<Log> > loaded;
+        boost::shared_ptr<Check<Log> > loaded;
         EXPECT_NO_THROW(loaded = loader_.loadCheck(
                             Element::fromJSON(definition)));
-        shared_ptr<Result> result(dynamic_pointer_cast<Result>(
-            loaded));
+        boost::shared_ptr<Result> result(dynamic_pointer_cast<Result>(loaded));
         EXPECT_TRUE(result);
         return (result);
     }
     // Load a check and convert it to named check to examine it
-    shared_ptr<NamedCheck> loadCheck(const string& definition) {
+    boost::shared_ptr<NamedCheck> loadCheck(const string& definition) {
         return (loadCheckAny<NamedCheck>(definition));
     }
     // The loadCheck throws an exception
@@ -114,11 +113,12 @@ public:
     // Insert the throw, throwcheck and logcheck checks into the loader
     void aclSetup() {
         try {
-            loader_.registerCreator(shared_ptr<ThrowCreator>(new
-                                                             ThrowCreator()));
-            loader_.registerCreator(shared_ptr<ThrowCheckCreator>(
+            loader_.registerCreator(boost::shared_ptr<ThrowCreator>(
+                new ThrowCreator()));
+            loader_.registerCreator(boost::shared_ptr<ThrowCheckCreator>(
                 new ThrowCheckCreator()));
-            loader_.registerCreator(shared_ptr<LogCreator>(new LogCreator()));
+            loader_.registerCreator(boost::shared_ptr<LogCreator>(
+                new LogCreator()));
         }
         // We ignore this exception here, because it happens when we try to
         // insert the creators multiple times. This is harmless.
@@ -133,7 +133,7 @@ public:
     {
         SCOPED_TRACE("Running ACL for " + JSON);
         aclSetup();
-        shared_ptr<ACL<Log> > acl;
+        boost::shared_ptr<ACL<Log> > acl;
         EXPECT_NO_THROW(acl = loader_.load(Element::fromJSON(JSON)));
         EXPECT_EQ(expectedResult, acl->execute(log_));
         log_.checkFirst(logged);
@@ -174,7 +174,7 @@ TEST_F(LoaderTest, CreatorDuplicateUnchanged) {
     names.push_back("name1");
     names.push_back("name3");
     EXPECT_THROW(loader_.registerCreator(
-        shared_ptr<NamedCreator>(new NamedCreator(names))), LoaderError);
+        boost::shared_ptr<NamedCreator>(new NamedCreator(names))), LoaderError);
     // It should now reject both name2 and name3 as not known
     checkException("{\"name2\": null}");
     checkException("{\"name3\": null}");
@@ -183,7 +183,7 @@ TEST_F(LoaderTest, CreatorDuplicateUnchanged) {
 // Test that we can register a creator and load a check with the name
 TEST_F(LoaderTest, SimpleCheckLoad) {
     addNamed("name");
-    shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
+    boost::shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
     EXPECT_EQ("name", check->name_);
     EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
 }
@@ -192,7 +192,7 @@ TEST_F(LoaderTest, SimpleCheckLoad) {
 TEST_F(LoaderTest, MultiCreatorCheckLoad) {
     addNamed("name1");
     addNamed("name2");
-    shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
+    boost::shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
     EXPECT_EQ("name2", check->name_);
     EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
 }
@@ -203,9 +203,9 @@ TEST_F(LoaderTest, MultiNameCheckLoad) {
     vector<string> names;
     names.push_back("name2");
     names.push_back("name3");
-    EXPECT_NO_THROW(loader_.registerCreator(shared_ptr<NamedCreator>(
+    EXPECT_NO_THROW(loader_.registerCreator(boost::shared_ptr<NamedCreator>(
         new NamedCreator(names))));
-    shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
+    boost::shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
     EXPECT_EQ("name3", check->name_);
     EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
 }
@@ -230,7 +230,8 @@ TEST_F(LoaderTest, UnkownName) {
 
 // Exception from the creator is propagated
 TEST_F(LoaderTest, CheckPropagate) {
-    loader_.registerCreator(shared_ptr<ThrowCreator>(new ThrowCreator()));
+    loader_.registerCreator(boost::shared_ptr<ThrowCreator>(
+                                new ThrowCreator()));
     EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"throw\": null}")),
                  TestCreatorError);
 }
@@ -239,7 +240,7 @@ TEST_F(LoaderTest, CheckPropagate) {
 TEST_F(LoaderTest, AndAbbrev) {
     addNamed("name1");
     addNamed("name2");
-    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+    boost::shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
         loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
     // If we don't have anything loaded, the rest would crash. It is already
     // reported from within loadCheckAny if it isn't loaded.
@@ -258,7 +259,7 @@ TEST_F(LoaderTest, AndAbbrev) {
 // The abbreviated form of parameters
 TEST_F(LoaderTest, OrAbbrev) {
     addNamed("name1");
-    shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+    boost::shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
         loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
     // If we don't have anything loaded, the rest would crash. It is already
     // reported from within loadCheckAny if it isn't loaded.
@@ -276,7 +277,7 @@ TEST_F(LoaderTest, OrAbbrev) {
 TEST_F(LoaderTest, BothAbbrev) {
     addNamed("name1");
     addNamed("name2");
-    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+    boost::shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
         loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
     // If we don't have anything loaded, the rest would crash. It is already
     // reported from within loadCheckAny if it isn't loaded.
@@ -302,7 +303,7 @@ TEST_F(LoaderTest, BothAbbrev) {
 // creator
 TEST_F(LoaderTest, ListCheck) {
     addNamed("name1", false);
-    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
+    boost::shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
     EXPECT_EQ("name1", check->name_);
     EXPECT_TRUE(check->data_->equals(*Element::fromJSON("[1, 2]")));
 }
@@ -310,7 +311,7 @@ TEST_F(LoaderTest, ListCheck) {
 // Check the action key is ignored as it should be
 TEST_F(LoaderTest, CheckNoAction) {
     addNamed("name1");
-    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
+    boost::shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
     EXPECT_EQ("name1", check->name_);
     EXPECT_TRUE(check->data_->equals(*Element::fromJSON("1")));
 }

+ 11 - 10
src/lib/acl/tests/logic_check_test.cc

@@ -52,7 +52,7 @@ testCheck(bool emptyResult) {
     EXPECT_EQ(emptyResult, oper.matches(log));
     log.checkFirst(0);
     // Fill it with some subexpressions
-    typedef shared_ptr<ConstCheck> CheckPtr;
+    typedef boost::shared_ptr<ConstCheck> CheckPtr;
     oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
     oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
     // Check what happens when only the default-valued are there
@@ -80,7 +80,7 @@ TEST(LogicOperators, AnyOf) {
 // Fixture for the tests of the creators
 class LogicCreatorTest : public ::testing::Test {
 private:
-    typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+    typedef boost::shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
 public:
     // Register some creators, both tested ones and some auxiliary ones for
     // help
@@ -102,12 +102,12 @@ public:
     // Some convenience shortcut names
     typedef LogicOperator<AnyOfSpec, Log> AnyOf;
     typedef LogicOperator<AllOfSpec, Log> AllOf;
-    typedef shared_ptr<AnyOf> AnyOfPtr;
-    typedef shared_ptr<AllOf> AllOfPtr;
+    typedef boost::shared_ptr<AnyOf> AnyOfPtr;
+    typedef boost::shared_ptr<AllOf> AllOfPtr;
     // Loads the JSON as a check and tries to convert it to the given check
     // subclass
-    template<typename Result> shared_ptr<Result> load(const string& JSON) {
-        shared_ptr<Check<Log> > result;
+    template<typename Result> boost::shared_ptr<Result> load(const string& JSON) {
+        boost::shared_ptr<Check<Log> > result;
         EXPECT_NO_THROW(result = loader_.loadCheck(Element::fromJSON(JSON)));
         /*
          * Optimally, we would use a dynamic_pointer_cast here to both
@@ -122,9 +122,9 @@ public:
          * multiple inheritance.
          */
         EXPECT_STREQ(typeid(Result).name(), typeid(*result.get()).name());
-        shared_ptr<Result>
+        boost::shared_ptr<Result>
             resultConverted(static_pointer_cast<Result>(result));
-        EXPECT_NE(shared_ptr<Result>(), resultConverted);
+        EXPECT_NE(boost::shared_ptr<Result>(), resultConverted);
         return (resultConverted);
     }
 };
@@ -244,7 +244,8 @@ TEST_F(LogicCreatorTest, nested) {
 }
 
 void notTest(bool value) {
-    NotOperator<Log> notOp(shared_ptr<Check<Log> >(new ConstCheck(value, 0)));
+    NotOperator<Log> notOp(boost::shared_ptr<Check<Log> >(
+                                new ConstCheck(value, 0)));
     Log log;
     // It returns negated value
     EXPECT_EQ(!value, notOp.matches(log));
@@ -281,7 +282,7 @@ TEST_F(LogicCreatorTest, notInvalid) {
 }
 
 TEST_F(LogicCreatorTest, notValid) {
-    shared_ptr<NotOperator<Log> > notOp(load<NotOperator<Log> >("{\"NOT\":"
+    boost::shared_ptr<NotOperator<Log> > notOp(load<NotOperator<Log> >("{\"NOT\":"
                                                                 "  {\"logcheck\":"
                                                                 "     [0, true]}}"));
     EXPECT_FALSE(notOp->matches(log_));

+ 3 - 1
src/lib/asiodns/dns_lookup.h

@@ -51,7 +51,9 @@ protected:
     ///
     /// This is intentionally defined as \c protected as this base class
     /// should never be instantiated (except as part of a derived class).
-    DNSLookup() : self_(this) {}
+    DNSLookup() {
+        self_ = this;
+    }
 public:
     /// \brief The destructor
     virtual ~DNSLookup() {}

+ 3 - 1
src/lib/asiodns/dns_server.h

@@ -53,7 +53,9 @@ protected:
     /// This is intentionally defined as \c protected, as this base class
     /// should never be instantiated except as part of a derived class.
     //@{
-    DNSServer() : self_(this) {}
+    DNSServer() {
+        self_ = this;
+    }
 public:
     /// \brief The destructor
     virtual ~DNSServer() {}

+ 2 - 2
src/lib/asiodns/dns_service.cc

@@ -12,14 +12,14 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <config.h>
+
 #include <netinet/in.h>
 #include <sys/socket.h>
 #include <unistd.h>             // for some IPC/network system calls
 
 #include <boost/lexical_cast.hpp>
 
-#include <config.h>
-
 #include <log/dummylog.h>
 
 #include <asio.hpp>

+ 11 - 11
src/lib/asiodns/io_fetch.cc

@@ -175,12 +175,12 @@ struct IOFetchData {
 /// IOFetch Constructor - just initialize the private data
 
 IOFetch::IOFetch(Protocol protocol, IOService& service,
-    const isc::dns::Question& question, const IOAddress& address, uint16_t port,
-    OutputBufferPtr& buff, Callback* cb, int wait)
+    const isc::dns::Question& question, const IOAddress& address,
+    uint16_t port, OutputBufferPtr& buff, Callback* cb, int wait, bool edns)
 {
     MessagePtr query_msg(new Message(Message::RENDER));
     initIOFetch(query_msg, protocol, service, question, address, port, buff,
-                cb, wait);
+                cb, wait, edns);
 }
 
 IOFetch::IOFetch(Protocol protocol, IOService& service,
@@ -214,7 +214,7 @@ void
 IOFetch::initIOFetch(MessagePtr& query_msg, Protocol protocol, IOService& service,
                      const isc::dns::Question& question,
                      const IOAddress& address, uint16_t port,
-                     OutputBufferPtr& buff, Callback* cb, int wait)
+                     OutputBufferPtr& buff, Callback* cb, int wait, bool edns)
 {
     data_ = boost::shared_ptr<IOFetchData>(new IOFetchData(
         protocol, service, address, port, buff, cb, wait));
@@ -224,9 +224,13 @@ IOFetch::initIOFetch(MessagePtr& query_msg, Protocol protocol, IOService& servic
     query_msg->setRcode(Rcode::NOERROR());
     query_msg->setHeaderFlag(Message::HEADERFLAG_RD);
     query_msg->addQuestion(question);
-    EDNSPtr edns_query(new EDNS());
-    edns_query->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
-    query_msg->setEDNS(edns_query);
+
+    if (edns) {
+        EDNSPtr edns_query(new EDNS());
+        edns_query->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
+        query_msg->setEDNS(edns_query);
+    }
+
     MessageRenderer renderer(*data_->msgbuf);
     query_msg->toWire(renderer);
 }
@@ -355,10 +359,6 @@ IOFetch::stop(Result result) {
         // variable should be done inside a mutex (and the stopped_ variable
         // declared as "volatile").
         //
-        // The numeric arguments indicate the debug level, with the lower
-        // numbers indicating the most important information.  The relative
-        // values are somewhat arbitrary.
-        //
         // TODO: Update testing of stopped_ if threads are used.
         data_->stopped = true;
         switch (result) {

+ 7 - 3
src/lib/asiodns/io_fetch.h

@@ -35,7 +35,7 @@ namespace isc {
 namespace asiodns {
 
 // Forward declarations
-class IOFetchData;
+struct IOFetchData;
 
 /// \brief Upstream Fetch Processing
 ///
@@ -131,11 +131,14 @@ public:
     ///        and deleting it if necessary.
     /// \param wait Timeout for the fetch (in ms).  The default value of
     ///        -1 indicates no timeout.
+    /// \param edns true if the request should be EDNS. The default value is
+    ///        true.
     IOFetch(Protocol protocol, isc::asiolink::IOService& service,
         const isc::dns::Question& question,
         const isc::asiolink::IOAddress& address,
         uint16_t port, isc::util::OutputBufferPtr& buff, Callback* cb,
-        int wait = -1);
+        int wait = -1,
+        bool edns = true);
 
     /// \brief Constructor
     ///  This constructor has one parameter "query_message", which
@@ -206,7 +209,8 @@ private:
     void initIOFetch(isc::dns::MessagePtr& query_message, Protocol protocol,
             isc::asiolink::IOService& service, const isc::dns::Question& question,
             const isc::asiolink::IOAddress& address, uint16_t port,
-            isc::util::OutputBufferPtr& buff, Callback* cb, int wait);
+            isc::util::OutputBufferPtr& buff, Callback* cb, int wait,
+            bool edns = true);
 
     /// \brief Log I/O Failure
     ///

+ 1 - 1
src/lib/asiodns/tcp_server.cc

@@ -70,7 +70,7 @@ TCPServer::TCPServer(io_service& io_service,
 }
 
 void
-TCPServer::operator()(error_code ec, size_t length) {
+TCPServer::operator()(asio::error_code ec, size_t length) {
     /// Because the coroutine reentry block is implemented as
     /// a switch statement, inline variable declarations are not
     /// permitted.  Certain variables used below can be declared here.

+ 1 - 1
src/lib/asiodns/tests/dns_server_unittest.cc

@@ -430,7 +430,7 @@ TEST_F(DNSServerTest, stopUDPServerDuringPrepareAnswer) {
 }
 
 static void stopServerManyTimes(DNSServer *server, unsigned int times) {
-    for (int i = 0; i < times; ++i) {
+    for (unsigned int i = 0; i < times; ++i) {
         server->stop();
     }
 }

+ 10 - 5
src/lib/asiodns/tests/io_fetch_unittest.cc

@@ -169,8 +169,10 @@ public:
     ///        sent with the correct QID.
     /// \param length Amount of data received.
     void udpReceiveHandler(udp::endpoint* remote, udp::socket* socket,
-                    error_code ec = error_code(), size_t length = 0,
-                    bool bad_qid = false, bool second_send = false) {
+                           asio::error_code ec = asio::error_code(),
+                           size_t length = 0, bool bad_qid = false,
+                           bool second_send = false)
+    {
         if (debug_) {
             cout << "udpReceiveHandler(): error = " << ec.value() <<
                     ", length = " << length << endl;
@@ -218,7 +220,8 @@ public:
     ///
     /// \param socket Socket on which data will be received
     /// \param ec Boost error code, value should be zero.
-    void tcpAcceptHandler(tcp::socket* socket, error_code ec = error_code())
+    void tcpAcceptHandler(tcp::socket* socket,
+                          asio::error_code ec = asio::error_code())
     {
         if (debug_) {
             cout << "tcpAcceptHandler(): error = " << ec.value() << endl;
@@ -257,7 +260,8 @@ public:
     /// \param ec ASIO error code, completion code of asynchronous I/O issued
     ///        by the "server" to receive data.
     /// \param length Amount of data received.
-    void tcpReceiveHandler(tcp::socket* socket, error_code ec = error_code(),
+    void tcpReceiveHandler(tcp::socket* socket,
+                           asio::error_code ec = asio::error_code(),
                            size_t length = 0)
     {
         if (debug_) {
@@ -386,7 +390,8 @@ public:
     /// \param ec Boost error code, value should be zero.
     /// \param length Number of bytes sent.
     void tcpSendHandler(size_t expected, tcp::socket* socket,
-                        error_code ec = error_code(), size_t length = 0)
+                        asio::error_code ec = asio::error_code(),
+                        size_t length = 0)
     {
         if (debug_) {
             cout << "tcpSendHandler(): error = " << ec.value() <<

+ 1 - 1
src/lib/asiodns/udp_server.cc

@@ -170,7 +170,7 @@ UDPServer::UDPServer(io_service& io_service, const ip::address& addr,
 /// The function operator is implemented with the "stackless coroutine"
 /// pattern; see internal/coroutine.h for details.
 void
-UDPServer::operator()(error_code ec, size_t length) {
+UDPServer::operator()(asio::error_code ec, size_t length) {
     /// Because the coroutine reentry block is implemented as
     /// a switch statement, inline variable declarations are not
     /// permitted.  Certain variables used below can be declared here.

+ 1 - 1
src/lib/asiodns/udp_server.h

@@ -99,7 +99,7 @@ private:
      * This way the overhead of copying is lower, we copy only one shared
      * pointer instead of about 10 of them.
      */
-    class Data;
+    struct Data;
     boost::shared_ptr<Data> data_;
 };
 

+ 3 - 0
src/lib/asiolink/Makefile.am

@@ -14,6 +14,9 @@ CLEANFILES = *.gcno *.gcda
 # with -Werror (our default setting).
 
 lib_LTLIBRARIES = libasiolink.la
+
+libasiolink_la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libasiolink_la_SOURCES  = asiolink.h
 libasiolink_la_SOURCES += dummy_io_cb.h
 libasiolink_la_SOURCES += interval_timer.cc interval_timer.h

+ 1 - 1
src/lib/asiolink/io_address.cc

@@ -38,7 +38,7 @@ namespace asiolink {
 // XXX: we cannot simply construct the address in the initialization list,
 // because we'd like to throw our own exception on failure.
 IOAddress::IOAddress(const string& address_str) {
-    error_code err;
+    asio::error_code err;
     asio_address_ = ip::address::from_string(address_str, err);
     if (err) {
         isc_throw(IOError, "Failed to convert string to address '"

+ 2 - 2
src/lib/asiolink/io_service.cc

@@ -12,12 +12,12 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <config.h>
+
 #include <netinet/in.h>
 #include <sys/socket.h>
 #include <unistd.h>             // for some IPC/network system calls
 
-#include <config.h>
-
 #include <asio.hpp>
 #include <asiolink/io_service.h>
 

+ 3 - 1
src/lib/asiolink/simple_callback.h

@@ -49,7 +49,9 @@ protected:
     ///
     /// This is intentionally defined as \c protected as this base class
     /// should never be instantiated (except as part of a derived class).
-    SimpleCallback() : self_(this) {}
+    SimpleCallback() {
+        self_ = this;
+    }
 public:
     /// \brief The destructor
     virtual ~SimpleCallback() {}

+ 1 - 1
src/lib/asiolink/tcp_socket.h

@@ -276,7 +276,7 @@ TCPSocket<C>::asyncSend(const void* data, size_t length,
             // ... and send it
             socket_.async_send(asio::buffer(send_buffer_->getData(),
                                send_buffer_->getLength()), callback);
-        } catch (boost::numeric::bad_numeric_cast& e) {
+        } catch (boost::numeric::bad_numeric_cast&) {
             isc_throw(BufferTooLarge,
                       "attempt to send buffer larger than 64kB");
         }

+ 1 - 2
src/lib/asiolink/tests/io_endpoint_unittest.cc

@@ -25,11 +25,10 @@
 #include <asiolink/io_endpoint.h>
 #include <asiolink/io_error.h>
 
-using boost::shared_ptr;
 using namespace isc::asiolink;
 
 namespace {
-typedef shared_ptr<const IOEndpoint> ConstIOEndpointPtr;
+typedef boost::shared_ptr<const IOEndpoint> ConstIOEndpointPtr;
 
 TEST(IOEndpointTest, createUDPv4) {
     ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,

+ 1 - 1
src/lib/bench/benchmark.h

@@ -261,7 +261,7 @@ public:
 
         struct timeval beg, end;
         gettimeofday(&beg, NULL);
-        for (int i = 0; i < iterations_; ++i) {
+        for (unsigned int i = 0; i < iterations_; ++i) {
             sub_iterations_ += target_.run();
         }
         gettimeofday(&end, NULL);

+ 1 - 1
src/lib/bench/benchmark_util.cc

@@ -103,7 +103,7 @@ loadQueryData(istream& input, BenchQueries& queries, const RRClass& qclass,
                 static_cast<const unsigned char*>(buffer.getData()) +
                 buffer.getLength());
             queries.push_back(query_data);
-        } catch (const Exception& error) {
+        } catch (const Exception&) {
             if (strict) {
                 isc_throw(BenchMarkError,
                           "failed to parse/create query around line " <<

+ 6 - 3
src/lib/cache/resolver_cache.cc

@@ -164,14 +164,16 @@ ResolverCache::ResolverCache()
 
 ResolverCache::ResolverCache(std::vector<CacheSizeInfo> caches_info)
 {
-    for (int i = 0; i < caches_info.size(); ++i) {
+    for (std::vector<CacheSizeInfo>::size_type i = 0;
+         i < caches_info.size(); ++i) {
         class_caches_.push_back(new ResolverClassCache(caches_info[i]));
     }
 }
 
 ResolverCache::~ResolverCache()
 {
-    for (int i = 0; i < class_caches_.size(); ++i) {
+    for (std::vector<ResolverClassCache*>::size_type i = 0;
+         i < class_caches_.size(); ++i) {
         delete class_caches_[i];
     }
 }
@@ -261,7 +263,8 @@ ResolverCache::update(const isc::dns::ConstRRsetPtr& rrset_ptr) {
 
 ResolverClassCache*
 ResolverCache::getClassCache(const isc::dns::RRClass& cache_class) const {
-    for (int i = 0; i < class_caches_.size(); ++i) {
+    for (std::vector<ResolverClassCache*>::size_type i = 0;
+         i < class_caches_.size(); ++i) {
         if (class_caches_[i]->getClass() == cache_class) {
             return (class_caches_[i]);
         }

+ 9 - 5
src/lib/cc/data.cc

@@ -742,7 +742,7 @@ MapElement::find(const std::string& id, ConstElementPtr t) const {
             t = p;
             return (true);
         }
-    } catch (const TypeError& e) {
+    } catch (const TypeError&) {
         // ignore
     }
     return (false);
@@ -780,11 +780,11 @@ StringElement::equals(const Element& other) const {
 bool
 ListElement::equals(const Element& other) const {
     if (other.getType() == Element::list) {
-        const int s = size();
+        const size_t s = size();
         if (s != other.size()) {
             return (false);
         }
-        for (int i = 0; i < s; ++i) {
+        for (size_t i = 0; i < s; ++i) {
             if (!get(i)->equals(*other.get(i))) {
                 return (false);
             }
@@ -843,10 +843,14 @@ removeIdentical(ElementPtr a, ConstElementPtr b) {
         isc_throw(TypeError, "Non-map Elements passed to removeIdentical");
     }
 
-    const std::map<std::string, ConstElementPtr>& m = a->mapValue();
+    // As maps do not allow entries with multiple keys, we can either iterate
+    // over a checking for identical entries in b or vice-versa.  As elements
+    // are removed from a if a match is found, we choose to iterate over b to
+    // avoid problems with element removal affecting the iterator.
+    const std::map<std::string, ConstElementPtr>& m = b->mapValue();
     for (std::map<std::string, ConstElementPtr>::const_iterator it = m.begin();
          it != m.end() ; ++it) {
-        if (b->contains((*it).first)) {
+        if (a->contains((*it).first)) {
             if (a->get((*it).first)->equals(*b->get((*it).first))) {
                 a->remove((*it).first);
             }

+ 1 - 1
src/lib/cc/session.cc

@@ -367,7 +367,7 @@ Session::recvmsg(ConstElementPtr& env, ConstElementPtr& msg,
     size_t length = impl_->readDataLength();
     if (hasQueuedMsgs()) {
         ConstElementPtr q_el;
-        for (int i = 0; i < impl_->queue_->size(); i++) {
+        for (size_t i = 0; i < impl_->queue_->size(); i++) {
             q_el = impl_->queue_->get(i);
             if (( seq == -1 &&
                   !q_el->get(0)->contains("reply")

+ 11 - 0
src/lib/cc/tests/data_unittests.cc

@@ -523,6 +523,12 @@ TEST(Element, removeIdentical) {
     removeIdentical(a, b);
     EXPECT_EQ(*a, *c);
 
+    a = Element::fromJSON("{ \"a\": 1, \"b\": 2, \"c\": 3 }");
+    b = Element::fromJSON("{ \"c\": 3, \"b\": 2 }");
+    c = Element::fromJSON("{ \"a\": 1 }");
+    removeIdentical(a, b);
+    EXPECT_EQ(*a, *c);
+
     EXPECT_THROW(removeIdentical(Element::create(1), Element::create(2)), TypeError);
 }
 
@@ -567,6 +573,11 @@ TEST(Element, constRemoveIdentical) {
     c = Element::fromJSON("{ \"a\": { \"b\": \"c\" } }");
     EXPECT_EQ(*removeIdentical(a, b), *c);
 
+    a = Element::fromJSON("{ \"a\": 1, \"b\": 2, \"c\": 3 }");
+    b = Element::fromJSON("{ \"c\": 3, \"b\": 2 }");
+    c = Element::fromJSON("{ \"a\": 1 }");
+    EXPECT_EQ(*removeIdentical(a, b), *c);
+
     EXPECT_THROW(removeIdentical(Element::create(1), Element::create(2)),
                  TypeError);
 }

+ 2 - 1
src/lib/cryptolink/Makefile.am

@@ -11,4 +11,5 @@ lib_LTLIBRARIES = libcryptolink.la
 libcryptolink_la_SOURCES = cryptolink.h cryptolink.cc
 libcryptolink_la_SOURCES += crypto_hmac.h crypto_hmac.cc
 
-libcryptolink_la_LIBADD = ${BOTAN_LDFLAGS} ${BOTAN_RPATH}
+libcryptolink_la_LDFLAGS = ${BOTAN_LDFLAGS}
+libcryptolink_la_LIBADD = ${BOTAN_LIBS} ${BOTAN_RPATH}

+ 2 - 2
src/lib/cryptolink/tests/Makefile.am

@@ -16,8 +16,8 @@ TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
 run_unittests_SOURCES += crypto_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS =  $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(GTEST_LDADD) $(BOTAN_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la

+ 2 - 1
src/lib/cryptolink/tests/crypto_unittests.cc

@@ -392,7 +392,8 @@ doRFC4231Tests(HashAlgorithm hash_algorithm,
     ASSERT_EQ(secret_list.size(), data_list.size());
     ASSERT_EQ(secret_list.size(), hmac_list.size());
 
-    for (int i = 0; i < data_list.size(); ++i) {
+    for (std::vector<std::string>::size_type i = 0;
+         i < data_list.size(); ++i) {
         SCOPED_TRACE("RFC4231 HMAC test for algorithm ID: " +
                      lexical_cast<std::string>(hash_algorithm) +
                      ", data ID: " + lexical_cast<std::string>(i));

+ 11 - 2
src/lib/datasrc/Makefile.am

@@ -7,9 +7,15 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
+pkglibexecdir = $(libexecdir)/@PACKAGE@/backends
+
+datasrc_config.h: datasrc_config.h.pre
+	$(SED) -e "s|@@PKGLIBEXECDIR@@|$(pkglibexecdir)|" datasrc_config.h.pre >$@
+
 CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
+CLEANFILES += datasrc_config.h
 
-lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
+lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
 libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
 libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -25,8 +31,11 @@ libdatasrc_la_SOURCES += database.h database.cc
 libdatasrc_la_SOURCES += factory.h factory.cc
 nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
+pkglibexec_LTLIBRARIES =  sqlite3_ds.la memory_ds.la
+
 sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
 sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LDFLAGS += -no-undefined -version-info 1:0:0
 sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 sqlite3_ds_la_LIBADD += libdatasrc.la
 sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
@@ -42,7 +51,7 @@ libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 libdatasrc_la_LIBADD += $(SQLITE_LIBS)
 
-BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
 datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
 	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 

+ 427 - 259
src/lib/datasrc/database.cc

@@ -35,7 +35,6 @@
 
 using namespace isc::dns;
 using namespace std;
-using boost::shared_ptr;
 using namespace isc::dns::rdata;
 
 namespace isc {
@@ -352,7 +351,7 @@ FINAL_TYPES() {
 
 }
 
-RRsetPtr
+ConstRRsetPtr
 DatabaseClient::Finder::findNSECCover(const Name& name) {
     try {
         // Which one should contain the NSEC record?
@@ -387,69 +386,99 @@ DatabaseClient::Finder::findNSECCover(const Name& name) {
             arg(accessor_->getDBName()).arg(name);
     }
     // We didn't find it, return nothing
-    return (RRsetPtr());
+    return (ConstRRsetPtr());
 }
 
-ZoneFinder::FindResult
-DatabaseClient::Finder::find(const isc::dns::Name& name,
-                             const isc::dns::RRType& type,
-                             isc::dns::RRsetList*,
-                             const FindOptions options)
+DatabaseClient::Finder::DelegationSearchResult
+DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name,
+                                            const FindOptions options)
 {
-    // This variable is used to determine the difference between
-    // NXDOMAIN and NXRRSET
-    bool records_found = false;
-    bool glue_ok((options & FIND_GLUE_OK) != 0);
-    const bool dnssec_data((options & FIND_DNSSEC) != 0);
-    bool get_cover(false);
-    isc::dns::RRsetPtr result_rrset;
+    // Result of search
+    isc::dns::ConstRRsetPtr result_rrset;
     ZoneFinder::Result result_status = SUCCESS;
-    FoundRRsets found;
-    logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
-        .arg(accessor_->getDBName()).arg(name).arg(type);
-    // In case we are in GLUE_OK mode and start matching wildcards,
-    // we can't do it under NS, so we store it here to check
-    isc::dns::RRsetPtr first_ns;
-
-    // First, do we have any kind of delegation (NS/DNAME) here?
-    const Name origin(getOrigin());
-    const size_t origin_label_count(origin.getLabelCount());
-    // Number of labels in the last known non-empty domain
-    size_t last_known(origin_label_count);
-    const size_t current_label_count(name.getLabelCount());
-    // This is how many labels we remove to get origin
-    const size_t remove_labels(current_label_count - origin_label_count);
-
-    // Now go trough all superdomains from origin down
-    for (int i(remove_labels); i > 0; --i) {
-        Name superdomain(name.split(i));
-        // Look if there's NS or DNAME (but ignore the NS in origin)
-        found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
-                          i != remove_labels);
-        if (found.first) {
-            // It contains some RRs, so it exists.
-            last_known = superdomain.getLabelCount();
 
+    // Are we searching for glue?
+    const bool glue_ok = ((options & FIND_GLUE_OK) != 0);
+
+    // This next declaration is an optimisation.  When we search the database
+    // for glue records, we generally ignore delegations. (This allows for
+    // the case where e.g. the delegation to zone example.com refers to
+    // nameservers within the zone, e.g. ns1.example.com.  When conducting the
+    // search for ns1.example.com, we have to search past the NS records at
+    // example.com.)
+    //
+    // The one case where this is forbidden is when we search past the zone
+    // cut but the match we find for the glue is a wildcard match.  In that
+    // case, we return the delegation instead (see RFC 1034, section 4.3.3).
+    // To save a new search, we record the location of the delegation cut when
+    // we encounter it here.
+    isc::dns::ConstRRsetPtr first_ns;
+
+    // We want to search from the apex down.  We are given the full domain
+    // name so we have to do some manipulation to ensure that when we start
+    // checking superdomains, we start from the the domain name of the zone
+    // (e.g. if the name is b.a.example.com. and we are in the example.com.
+    // zone, we check example.com., a.example.com. and b.a.example.com.  We
+    // don't need to check com. or .).
+    //
+    // Set the number of labels in the origin (i.e. apex of the zone) and in
+    // the last known non-empty domain (which, at this point, is the origin).
+    const size_t origin_label_count = getOrigin().getLabelCount();
+    size_t last_known = origin_label_count;
+
+    // Set how many labels we remove to get origin: this is the number of
+    // labels we have to process in our search.
+    const size_t remove_labels = name.getLabelCount() - origin_label_count;
+
+    // Go through all superdomains from the origin down searching for nodes
+    // that indicate a delegation (.e. NS or DNAME).
+    for (int i = remove_labels; i > 0; --i) {
+        const Name superdomain(name.split(i));
+
+        // Note if this is the origin. (We don't count NS records at the origin
+        // as a delegation so this controls whether NS RRs are included in
+        // the results of some searches.)
+        const bool not_origin = (i != remove_labels);
+
+        // Look if there's NS or DNAME at this point of the tree, but ignore
+        // the NS RRs at the apex of the zone.
+        const FoundRRsets found = getRRsets(superdomain.toText(),
+                                            DELEGATION_TYPES(), not_origin);
+        if (found.first) {
+            // This node contains either NS or DNAME RRs so it does exist.
             const FoundIterator nsi(found.second.find(RRType::NS()));
             const FoundIterator dni(found.second.find(RRType::DNAME()));
-            // In case we are in GLUE_OK mode, we want to store the
-            // highest encountered NS (but not apex)
-            if (glue_ok && !first_ns && i != remove_labels &&
-                nsi != found.second.end()) {
+
+            // An optimisation.  We know that there is an exact match for
+            // something at this point in the tree so remember it.  If we have
+            // to do a wildcard search, as we search upwards through the tree
+            // we don't need to pass this point, which is an exact match for
+            // the domain name.
+            last_known = superdomain.getLabelCount();
+
+            if (glue_ok && !first_ns && not_origin &&
+                    nsi != found.second.end()) {
+                // If we are searching for glue ("glue OK" mode), store the
+                // highest NS record that we find that is not the apex.  This
+                // is another optimisation for later, where we need the
+                // information if the domain we are looking for matches through
+                // a wildcard.
                 first_ns = nsi->second;
-            } else if (!glue_ok && i != remove_labels &&
-                       nsi != found.second.end()) {
-                // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
-                // delegation in apex
+
+            } else if (!glue_ok && not_origin && nsi != found.second.end()) {
+                // Not searching for glue and we have found an NS RRset that is
+                // not at the apex.  We have found a delegation - return that
+                // fact, there is no need to search further down the tree.
                 LOG_DEBUG(logger, DBG_TRACE_DETAILED,
                           DATASRC_DATABASE_FOUND_DELEGATION).
                     arg(accessor_->getDBName()).arg(superdomain);
                 result_rrset = nsi->second;
                 result_status = DELEGATION;
-                // No need to go lower, found
                 break;
+
             } else if (dni != found.second.end()) {
-                // Very similar with DNAME
+                // We have found a DNAME so again stop searching down the tree
+                // and return the information.
                 LOG_DEBUG(logger, DBG_TRACE_DETAILED,
                           DATASRC_DATABASE_FOUND_DNAME).
                     arg(accessor_->getDBName()).arg(superdomain);
@@ -464,202 +493,344 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
             }
         }
     }
+    return (DelegationSearchResult(result_status, result_rrset, first_ns,
+                                   last_known));
+}
 
-    if (!result_rrset) { // Only if we didn't find a redirect already
-        // Try getting the final result and extract it
-        // It is special if there's a CNAME or NS, DNAME is ignored here
-        // And we don't consider the NS in origin
-
-        WantedTypes final_types(FINAL_TYPES());
-        final_types.insert(type);
-        found = getRRsets(name.toText(), final_types, name != origin);
-        records_found = found.first;
-
-        // NS records, CNAME record and Wanted Type records
-        const FoundIterator nsi(found.second.find(RRType::NS()));
-        const FoundIterator cni(found.second.find(RRType::CNAME()));
-        const FoundIterator wti(found.second.find(type));
-        if (name != origin && !glue_ok && nsi != found.second.end()) {
-            // There's a delegation at the exact node.
-            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                      DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
-                arg(accessor_->getDBName()).arg(name);
-            result_status = DELEGATION;
-            result_rrset = nsi->second;
-        } else if (type != isc::dns::RRType::CNAME() &&
-                   cni != found.second.end()) {
-            // A CNAME here
-            result_status = CNAME;
-            result_rrset = cni->second;
-            if (result_rrset->getRdataCount() != 1) {
-                isc_throw(DataSourceError, "CNAME with " <<
-                          result_rrset->getRdataCount() <<
-                          " rdata at " << name << ", expected 1");
-            }
-        } else if (wti != found.second.end()) {
-            // Just get the answer
-            result_rrset = wti->second;
-        } else if (!records_found) {
-            // Nothing lives here.
-            // But check if something lives below this
-            // domain and if so, pretend something is here as well.
-            if (hasSubdomains(name.toText())) {
-                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                          DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
-                    arg(accessor_->getDBName()).arg(name);
-                records_found = true;
-                get_cover = dnssec_data;
-            } else if ((options & NO_WILDCARD) != 0) {
-                // If wildcard check is disabled, the search will ultimately
-                // terminate with NXDOMAIN. If DNSSEC is enabled, flag that
-                // we need to get the NSEC records to prove this.
-                if (dnssec_data) {
-                    get_cover = true;
-                }
-            } else {
-                // It's not empty non-terminal. So check for wildcards.
-                // We remove labels one by one and look for the wildcard there.
-                // Go up to first non-empty domain.
-                for (size_t i(1); i <= current_label_count - last_known; ++i) {
-                    // Construct the name with *
-                    const Name superdomain(name.split(i));
-                    const string wildcard("*." + superdomain.toText());
-                    const string construct_name(name.toText());
-                    // TODO What do we do about DNAME here?
-                    // The types are the same as with original query
-                    found = getRRsets(wildcard, final_types, true,
+// This method is called when we have not found an exact match and when we
+// know that the name is not an empty non-terminal.  So the only way that
+// the name can match something in the zone is through a wildcard match.
+//
+// During an earlier stage in the search for this name, we made a record of
+// the lowest superdomain for which we know an RR exists. (Note the "we
+// know" qualification - there may be lower superdomains (ones with more
+// labels) that hold an RR, but as we weren't searching for them, we don't
+// know about them.)
+//
+// In the search for a wildcard match (which starts at the given domain
+// name and goes up the tree to successive superdomains), this is the level
+// at which we can stop - there can't be a wildcard at or beyond that
+// point.
+//
+// At each level that can stop the search, we should consider several cases:
+//
+// - If we found a wildcard match for a glue record below a
+// delegation point, we don't return the match,
+// instead we return the delegation.  (Note that if we didn't
+// a wildcard match at all, we would return NXDOMAIN, not the
+// the delegation.)
+//
+// - If we found a wildcard match and we are sure that the match
+// is not an empty non-terminal, return the result taking into account CNAME,
+// on a zone cut, and NXRRSET.
+// (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com revealed no subdomains, so we can use the
+// wilcard match we found.)
+//
+// - If we found a more specified match, the wildcard search
+// is canceled, resulting in NXDOMAIN.  (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com found subdomains.  So b.example.com is
+// an empty non-terminal and so should not be returned in
+// the wildcard matching process.  In other words,
+// b.example.com does exist in the DNS space, it just doesn't
+// have any RRs associated with it.)
+//
+// - If we found a match, but it is an empty non-terminal asterisk (E.g.#
+// subdomain.*.example.com.  is present, but there is nothing at
+// *.example.com.),  return an NXRRSET indication;
+// the wildcard exists in the DNS space, there's just nothing
+// associated with it.  If DNSSEC data is required, return the
+// covering NSEC record.
+//
+// If none of the above applies in any level, the search fails with NXDOMAIN.
+ZoneFinder::FindResult
+DatabaseClient::Finder::findWildcardMatch(
+    const isc::dns::Name& name, const isc::dns::RRType& type,
+    const FindOptions options, const DelegationSearchResult& dresult)
+{
+    // Note that during the search we are going to search not only for the
+    // requested type, but also for types that indicate a delegation -
+    // NS and DNAME.
+    WantedTypes final_types(FINAL_TYPES());
+    final_types.insert(type);
+
+    for (size_t i = 1; i <= (name.getLabelCount() - dresult.last_known); ++i) {
+
+        // Strip off the left-more label(s) in the name and replace with a "*".
+        const Name superdomain(name.split(i));
+        const string wildcard("*." + superdomain.toText());
+        const string construct_name(name.toText());
+
+        // TODO Add a check for DNAME, as DNAME wildcards are discouraged (see
+        // RFC 4592 section 4.4).
+        // Search for a match.  The types are the same as with original query.
+        FoundRRsets found = getRRsets(wildcard, final_types, true,
                                       &construct_name);
-                    if (found.first) {
-                        if (first_ns) {
-                            // In case we are under NS, we don't
-                            // wildcard-match, but return delegation
-                            result_rrset = first_ns;
-                            result_status = DELEGATION;
-                            records_found = true;
-                            // We pretend to switch to non-glue_ok mode
-                            glue_ok = false;
-                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                                      DATASRC_DATABASE_WILDCARD_CANCEL_NS).
-                                arg(accessor_->getDBName()).arg(wildcard).
-                                arg(first_ns->getName());
-                        } else if (!hasSubdomains(name.split(i - 1).toText()))
-                        {
-                            // Nothing we added as part of the * can exist
-                            // directly, as we go up only to first existing
-                            // domain, but it could be empty non-terminal. In
-                            // that case, we need to cancel the match.
-                            records_found = true;
-                            const FoundIterator
-                                cni(found.second.find(RRType::CNAME()));
-                            const FoundIterator
-                                nsi(found.second.find(RRType::NS()));
-                            const FoundIterator
-                                nci(found.second.find(RRType::NSEC()));
-                            const FoundIterator wti(found.second.find(type));
-                            if (cni != found.second.end() &&
-                                type != RRType::CNAME()) {
-                                result_rrset = cni->second;
-                                result_status = WILDCARD_CNAME;
-                            } else if (nsi != found.second.end()) {
-                                result_rrset = nsi->second;
-                                result_status = DELEGATION;
-                            } else if (wti != found.second.end()) {
-                                result_rrset = wti->second;
-                                result_status = WILDCARD;
-                            } else {
-                                // NXRRSET case in the wildcard
-                                result_status = WILDCARD_NXRRSET;
-                                if (dnssec_data &&
-                                    nci != found.second.end()) {
-                                    // User wants a proof the wildcard doesn't
-                                    // contain it
-                                    //
-                                    // However, we need to get the RRset in the
-                                    // name of the wildcard, not the constructed
-                                    // one, so we walk it again
-                                    found = getRRsets(wildcard, NSEC_TYPES(),
-                                                      true);
-                                    result_rrset =
-                                        found.second.find(RRType::NSEC())->
-                                        second;
-                                }
-                            }
-
-                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                                      DATASRC_DATABASE_WILDCARD).
-                                arg(accessor_->getDBName()).arg(wildcard).
-                                arg(name);
-                        } else {
-                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                                      DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
-                                arg(accessor_->getDBName()).arg(wildcard).
-                                arg(name).arg(superdomain);
-                        }
-                        break;
-                    } else if (hasSubdomains(wildcard)) {
-                        // Empty non-terminal asterisk
-                        records_found = true;
-                        LOG_DEBUG(logger, DBG_TRACE_DETAILED,
-                                  DATASRC_DATABASE_WILDCARD_EMPTY).
-                            arg(accessor_->getDBName()).arg(wildcard).
-                            arg(name);
-                        if (dnssec_data) {
-                            result_rrset = findNSECCover(Name(wildcard));
-                            if (result_rrset) {
-                                result_status = WILDCARD_NXRRSET;
-                            }
-                        }
-                        break;
-                    }
-                }
-                // This is the NXDOMAIN case (nothing found anywhere). If
-                // they want DNSSEC data, try getting the NSEC record
-                if (dnssec_data && !records_found) {
-                    get_cover = true;
+        if (found.first) {
+            // Found something - but what?
+
+            if (dresult.first_ns) {
+                // About to use first_ns.  The only way this can be set is if
+                // we are searching for glue, so do a sanity check.
+                if ((options & FIND_GLUE_OK) == 0) {
+                    isc_throw(Unexpected, "Inconsistent conditions during "
+                              "cancel of wilcard search for " <<
+                              name.toText() << ": find_ns non-null when not "
+                              "processing glue request");
                 }
+
+                // Wildcard match for a glue below a delegation point
+                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                          DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+                    arg(accessor_->getDBName()).arg(wildcard).
+                    arg(dresult.first_ns->getName());
+                return (ZoneFinder::FindResult(DELEGATION, dresult.first_ns));
+
+            } else if (!hasSubdomains(name.split(i - 1).toText())) {
+                // The wildcard match is the best one, find the final result
+                // at it.  Note that wildcard should never be the zone origin.
+                return (findOnNameResult(name, type, options, false,
+                                         found, &wildcard));
+            } else {
+
+                // more specified match found, cancel wildcard match
+                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                          DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+                    arg(accessor_->getDBName()).arg(wildcard).
+                    arg(name).arg(superdomain);
+                return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
             }
-        } else if (dnssec_data) {
-            // This is the "usual" NXRRSET case
-            // So in case they want DNSSEC, provide the NSEC
-            // (which should be available already here)
-            result_status = NXRRSET;
-            const FoundIterator nci(found.second.find(RRType::NSEC()));
-            if (nci != found.second.end()) {
-                result_rrset = nci->second;
+
+        } else if (hasSubdomains(wildcard)) {
+            // an empty non-terminal asterisk
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_WILDCARD_EMPTY).
+                arg(accessor_->getDBName()).arg(wildcard).arg(name);
+            if ((options & FIND_DNSSEC) != 0) {
+                ConstRRsetPtr nsec = findNSECCover(Name(wildcard));
+                if (nsec) {
+                    return (ZoneFinder::FindResult(WILDCARD_NXRRSET, nsec));
+                }
             }
+            return (ZoneFinder::FindResult(NXRRSET, ConstRRsetPtr()));
         }
     }
 
-    if (!result_rrset) {
-        if (result_status == SUCCESS) {
-            // Should we look for NSEC covering the name?
-            if (get_cover) {
-                result_rrset = findNSECCover(name);
-                if (result_rrset) {
-                    result_status = NXDOMAIN;
-                }
+    // Nothing found at any level.
+    return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::logAndCreateResult(
+    const Name& name, const string* wildname, const RRType& type,
+    ZoneFinder::Result code, ConstRRsetPtr rrset,
+    const isc::log::MessageID& log_id) const
+{
+    if (rrset) {
+        if (wildname == NULL) {
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+                arg(accessor_->getDBName()).arg(name).arg(type).
+                arg(getClass()).arg(*rrset);
+        } else {
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+                arg(accessor_->getDBName()).arg(name).arg(type).
+                arg(getClass()).arg(*wildname).arg(*rrset);
+        }
+    } else {
+        if (wildname == NULL) {
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+                arg(accessor_->getDBName()).arg(name).arg(type).
+                arg(getClass());
+        } else {
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+                arg(accessor_->getDBName()).arg(name).arg(type).
+                arg(getClass()).arg(*wildname);
+        }
+    }
+    return (ZoneFinder::FindResult(code, rrset));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findOnNameResult(const Name& name,
+                                         const RRType& type,
+                                         const FindOptions options,
+                                         const bool is_origin,
+                                         const FoundRRsets& found,
+                                         const string* wildname)
+{
+    const bool wild = (wildname != NULL);
+
+    // Get iterators for the different types of records we are interested in -
+    // CNAME, NS and Wanted types.
+    const FoundIterator nsi(found.second.find(RRType::NS()));
+    const FoundIterator cni(found.second.find(RRType::CNAME()));
+    const FoundIterator wti(found.second.find(type));
+
+    if (!is_origin && ((options & FIND_GLUE_OK) == 0) &&
+        nsi != found.second.end()) {
+        // A NS RRset was found at the domain we were searching for.  As it is
+        // not at the origin of the zone, it is a delegation and indicates that
+        // this zone is not authoritative for the data. Just return the
+        // delegation information.
+        return (logAndCreateResult(name, wildname, type, DELEGATION,
+                                   nsi->second,
+                                   wild ? DATASRC_DATABASE_WILDCARD_NS :
+                                   DATASRC_DATABASE_FOUND_DELEGATION_EXACT));
+
+    } else if (type != RRType::CNAME() && cni != found.second.end()) {
+        // We are not searching for a CNAME but nevertheless we have found one
+        // at the name we are searching so we return it. (The caller may
+        // want to continue the lookup by replacing the query name with the
+        // canonical name and the original RR type.) First though, do a sanity
+        // check to ensure that there is only one RR in the CNAME RRset.
+        if (cni->second->getRdataCount() != 1) {
+            isc_throw(DataSourceError, "CNAME with " <<
+                      cni->second->getRdataCount() << " rdata at " << name <<
+                      ", expected 1");
+        }
+        return (logAndCreateResult(name, wildname, type,
+                                   wild ? WILDCARD_CNAME : CNAME, cni->second,
+                                   wild ? DATASRC_DATABASE_WILDCARD_CNAME :
+                                   DATASRC_DATABASE_FOUND_CNAME));
+
+    } else if (wti != found.second.end()) {
+        // Found an RR matching the query, so return it.  (Note that this
+        // includes the case where we were explicitly querying for a CNAME and
+        // found it.  It also includes the case where we were querying for an
+        // NS RRset and found it at the apex of the zone.)
+        return (logAndCreateResult(name, wildname, type,
+                                   wild ? WILDCARD : SUCCESS, wti->second,
+                                   wild ? DATASRC_DATABASE_WILDCARD_MATCH :
+                                   DATASRC_DATABASE_FOUND_RRSET));
+    }
+
+    // If we get here, we have found something at the requested name but not
+    // one of the RR types we were interested in. This is the NXRRSET case so
+    // return the appropriate status.  If DNSSEC information was requested,
+    // provide the NSEC records.  If it's for wildcard, we need to get the
+    // NSEC records in the name of the wildcard, not the substituted one,
+    // so we need to search the tree again.
+    ConstRRsetPtr nsec_rrset;   // possibly used with DNSSEC, otherwise NULL
+    if ((options & FIND_DNSSEC) != 0) {
+        if (wild) {
+            const FoundRRsets wfound = getRRsets(*wildname, NSEC_TYPES(),
+                                                 true);
+            const FoundIterator nci = wfound.second.find(RRType::NSEC());
+            if (nci != wfound.second.end()) {
+                nsec_rrset = nci->second;
             }
-            // Something is not here and we didn't decide yet what
-            if (records_found) {
-                logger.debug(DBG_TRACE_DETAILED,
-                             DATASRC_DATABASE_FOUND_NXRRSET)
-                    .arg(accessor_->getDBName()).arg(name)
-                    .arg(getClass()).arg(type);
-                result_status = NXRRSET;
-            } else {
-                logger.debug(DBG_TRACE_DETAILED,
-                             DATASRC_DATABASE_FOUND_NXDOMAIN)
-                    .arg(accessor_->getDBName()).arg(name)
-                    .arg(getClass()).arg(type);
-                result_status = NXDOMAIN;
+        } else {
+            const FoundIterator nci = found.second.find(RRType::NSEC());
+            if (nci != found.second.end()) {
+                nsec_rrset = nci->second;
             }
         }
+    }
+    if (nsec_rrset) {
+        // This log message covers both normal and wildcard cases, so we pass
+        // NULL for 'wildname'.
+        return (logAndCreateResult(name, NULL, type,
+                                   wild ? WILDCARD_NXRRSET : NXRRSET,
+                                   nsec_rrset,
+                                   DATASRC_DATABASE_FOUND_NXRRSET_NSEC));
+    }
+    return (logAndCreateResult(name, wildname, type,
+                               wild ? WILDCARD_NXRRSET : NXRRSET, nsec_rrset,
+                               wild ? DATASRC_DATABASE_WILDCARD_NXRRSET :
+                               DATASRC_DATABASE_FOUND_NXRRSET));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
+                                         FindOptions options,
+                                         const DelegationSearchResult& dresult)
+{
+    const bool dnssec_data = ((options & FIND_DNSSEC) != 0);
+
+    // On entry to this method, we know that the database doesn't have any
+    // entry for this name.  Before returning NXDOMAIN, we need to check
+    // for special cases.
+
+    if (hasSubdomains(name.toText())) {
+        // Does the domain have a subdomain (i.e. it is an empty non-terminal)?
+        // If so, return NXRRSET instead of NXDOMAIN (as although the name does
+        // not exist in the database, it does exist in the DNS tree).
+        LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                  DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+            arg(accessor_->getDBName()).arg(name);
+        return (FindResult(NXRRSET, dnssec_data ? findNSECCover(name) :
+                           ConstRRsetPtr()));
+
+    } else if ((options & NO_WILDCARD) == 0) {
+        // It's not an empty non-terminal and wildcard matching is not
+        // disabled, so check for wildcards. If there is a wildcard match
+        // (i.e. all results except NXDOMAIN) return it; otherwise fall
+        // through to the NXDOMAIN case below.
+        const ZoneFinder::FindResult wresult =
+            findWildcardMatch(name, type, options, dresult);
+        if (wresult.code != NXDOMAIN) {
+            return (FindResult(wresult.code, wresult.rrset));
+        }
+    }
+
+    // All avenues to find a match are now exhausted, return NXDOMAIN (plus
+    // NSEC records if requested).
+    LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_NO_MATCH).
+              arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+    return (FindResult(NXDOMAIN, dnssec_data ? findNSECCover(name) :
+                           ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+                             const isc::dns::RRType& type,
+                             isc::dns::RRsetList*,
+                             const FindOptions options)
+{
+    LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+              .arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+
+    // First, go through all superdomains from the origin down, searching for
+    // nodes that indicate a delegation (i.e. NS or DNAME, ignoring NS records
+    // at the apex).  If one is found, the search stops there.
+    //
+    // (In fact there could be RRs in the database corresponding to subdomains
+    // of the delegation.  The reason we do the search for the delegations
+    // first is because the delegation means that another zone is authoritative
+    // for the data and so should be consulted to retrieve it.  RRs below
+    // this delegation point can be found in a search for glue but not
+    // otherwise; in the latter case they are said to be occluded by the
+    // presence of the delegation.)
+    const DelegationSearchResult dresult = findDelegationPoint(name, options);
+    if (dresult.rrset) {
+        return (FindResult(dresult.code, dresult.rrset));
+    }
+
+    // If there is no delegation, look for the exact match to the request
+    // name/type/class.  However, there are special cases:
+    // - Requested name has a singleton CNAME record associated with it
+    // - Requested name is a delegation point (NS only but not at the zone
+    //   apex - DNAME is ignored here as it redirects DNS names subordinate to
+    //   the owner name - the owner name itself is not redirected.)
+    const bool is_origin = (name == getOrigin());
+    WantedTypes final_types(FINAL_TYPES());
+    final_types.insert(type);
+    const FoundRRsets found = getRRsets(name.toText(), final_types,
+                                        !is_origin);
+
+    if (found.first) {
+        // Something found at the domain name.  Look into it further to get
+        // the final result.
+        return (findOnNameResult(name, type, options, is_origin, found, NULL));
     } else {
-        logger.debug(DBG_TRACE_DETAILED,
-                     DATASRC_DATABASE_FOUND_RRSET)
-                    .arg(accessor_->getDBName()).arg(*result_rrset);
+        // Did not find anything at all at the domain name, so check for
+        // subdomains or wildcards.
+        return (findNoNameResult(name, type, options, dresult));
     }
-    return (FindResult(result_status, result_rrset));
 }
 
 Name
@@ -669,10 +840,9 @@ DatabaseClient::Finder::findPreviousName(const Name& name) const {
     try {
         return (Name(str));
     }
-    /*
-     * To avoid having the same code many times, we just catch all the
-     * exceptions and handle them in a common code below
-     */
+
+    // To avoid having the same code many times, we just catch all the
+    // exceptions and handle them in a common code below
     catch (const isc::dns::EmptyLabel&) {}
     catch (const isc::dns::TooLongLabel&) {}
     catch (const isc::dns::BadLabelType&) {}
@@ -695,17 +865,15 @@ DatabaseClient::Finder::getClass() const {
 
 namespace {
 
-/*
- * This needs, beside of converting all data from textual representation, group
- * together rdata of the same RRsets. To do this, we hold one row of data ahead
- * of iteration. When we get a request to provide data, we create it from this
- * data and load a new one. If it is to be put to the same rrset, we add it.
- * Otherwise we just return what we have and keep the row as the one ahead
- * for next time.
- */
+/// This needs, beside of converting all data from textual representation, group
+/// together rdata of the same RRsets. To do this, we hold one row of data ahead
+/// of iteration. When we get a request to provide data, we create it from this
+/// data and load a new one. If it is to be put to the same rrset, we add it.
+/// Otherwise we just return what we have and keep the row as the one ahead
+/// for next time.
 class DatabaseIterator : public ZoneIterator {
 public:
-    DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
+    DatabaseIterator(boost::shared_ptr<DatabaseAccessor> accessor,
                      const Name& zone_name,
                      const RRClass& rrclass,
                      bool separate_rrs) :
@@ -801,7 +969,7 @@ private:
     }
 
     // The dedicated accessor
-    shared_ptr<DatabaseAccessor> accessor_;
+    boost::shared_ptr<DatabaseAccessor> accessor_;
     // The context
     DatabaseAccessor::IteratorContextPtr context_;
     // Class of the zone
@@ -837,13 +1005,13 @@ DatabaseClient::getIterator(const isc::dns::Name& name,
 //
 class DatabaseUpdater : public ZoneUpdater {
 public:
-    DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
+    DatabaseUpdater(boost::shared_ptr<DatabaseAccessor> accessor, int zone_id,
             const Name& zone_name, const RRClass& zone_class,
             bool journaling) :
         committed_(false), accessor_(accessor), zone_id_(zone_id),
         db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
         zone_class_(zone_class), journaling_(journaling),
-        diff_phase_(NOT_STARTED),
+        diff_phase_(NOT_STARTED), serial_(0),
         finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
     {
         logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -883,7 +1051,7 @@ private:
     typedef DatabaseAccessor Accessor;
 
     bool committed_;
-    shared_ptr<DatabaseAccessor> accessor_;
+    boost::shared_ptr<DatabaseAccessor> accessor_;
     const int zone_id_;
     const string db_name_;
     const string zone_name_;
@@ -896,7 +1064,7 @@ private:
         ADD
     };
     DiffPhase diff_phase_;
-    uint32_t serial_;
+    Serial serial_;
     boost::scoped_ptr<DatabaseClient::Finder> finder_;
 
     // This is a set of validation checks commonly used for addRRset() and
@@ -985,8 +1153,8 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
         columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_ADD,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_ADD, journal);
         }
         accessor_->addRecordToZone(columns);
     }
@@ -1023,8 +1191,8 @@ DatabaseUpdater::deleteRRset(const RRset& rrset) {
         params[Accessor::DEL_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_DELETE,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_DELETE, journal);
         }
         accessor_->deleteRecordInZone(params);
     }
@@ -1060,7 +1228,7 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
         isc_throw(isc::BadValue, "Can't store journal and replace the whole "
                   "zone at the same time");
     }
-    shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
+    boost::shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
     const std::pair<bool, int> zone(update_accessor->startUpdateZone(
                                         name.toText(), replace));
     if (!zone.first) {
@@ -1080,7 +1248,7 @@ private:
     // A shortcut typedef to keep the code concise.
     typedef DatabaseAccessor Accessor;
 public:
-    DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+    DatabaseJournalReader(boost::shared_ptr<Accessor> accessor, const Name& zone,
                           int zone_id, const RRClass& rrclass, uint32_t begin,
                           uint32_t end) :
         accessor_(accessor), zone_(zone), rrclass_(rrclass),
@@ -1128,7 +1296,7 @@ public:
     }
 
 private:
-    shared_ptr<Accessor> accessor_;
+    boost::shared_ptr<Accessor> accessor_;
     const Name zone_;
     const RRClass rrclass_;
     Accessor::IteratorContextPtr context_;
@@ -1143,7 +1311,7 @@ DatabaseClient::getJournalReader(const isc::dns::Name& zone,
                                  uint32_t begin_serial,
                                  uint32_t end_serial) const
 {
-    shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+    boost::shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
     const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
     if (!zoneinfo.first) {
         return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(

Fichier diff supprimé car celui-ci est trop grand
+ 639 - 483
src/lib/datasrc/database.h


+ 31 - 0
src/lib/datasrc/datasrc_config.h.pre.in

@@ -0,0 +1,31 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_CONFIG_H
+#define __DATASRC_CONFIG_H 1
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Default directory to find the loadable data source libraries
+///
+/// This is the directory where, once installed, loadable backend libraries
+/// such as memory_ds.so and sqlite3_ds.so are found. It is used by the
+/// DataSourceClient loader if no absolute path is used and
+/// B10_FROM_BUILD is not set in the environment.
+const char* const BACKEND_LIBRARY_PATH = "@@PKGLIBEXECDIR@@/";
+
+} // end namespace datasrc
+} // end namespace isc
+
+#endif // __DATASRC_CONFIG_H

+ 87 - 20
src/lib/datasrc/datasrc_messages.mes

@@ -68,7 +68,7 @@ The datasource tried to provide an NSEC proof that the named domain does not
 exist, but the database backend doesn't support DNSSEC. No proof is included
 in the answer as a result.
 
-% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4
 Debug information. The database data source is looking up records with the given
 name and type in the database.
 
@@ -78,11 +78,17 @@ different TTL values. This isn't allowed on the wire and is considered
 an error, so we set it to the lowest value we found (but we don't modify the
 database). The data in database should be checked and fixed.
 
+% DATASRC_DATABASE_FOUND_CNAME search in datasource %1 for %2/%3/%4 found CNAME, resulting in %5
+When searching the domain for a name a CNAME was found at that name.
+Even though it was not the RR type being sought, it is returned.  (The
+caller may want to continue the lookup by replacing the query name with
+the canonical name and restarting the query with the original RR type.)
+
 % DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
 When searching for a domain, the program met a delegation to a different zone
 at the given domain name. It will return that one instead.
 
-% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT search in datasource %1 for %2/%3/%4 found delegation at %5
 The program found the domain requested, but it is a delegation point to a
 different zone, therefore it is not authoritative for this domain name.
 It will return the NS record instead.
@@ -93,19 +99,25 @@ place in the domain space at the given domain name. It will return that one
 instead.
 
 % DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
-The domain name doesn't have any RRs, so it doesn't exist in the database.
-However, it has a subdomain, so it exists in the DNS address space. So we
-return NXRRSET instead of NXDOMAIN.
+The domain name does not have any RRs associated with it, so it doesn't
+exist in the database.  However, it has a subdomain, so it does exist
+in the DNS address space. This type of domain is known an an "empty
+non-terminal" and so we return NXRRSET instead of NXDOMAIN.
 
 % DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
 The data returned by the database backend did not contain any data for the given
 domain name, class and type.
 
-% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 for %2/%3/%4 resulted in NXRRSET
 The data returned by the database backend contained data for the given domain
 name and class, but not for the given type.
 
-% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+% DATASRC_DATABASE_FOUND_NXRRSET_NSEC search in datasource %1 for %2/%3/%4 resulted in RRset %5
+A search in the database for RRs for the specified name, type and class has
+located RRs that match the name and class but not the type.  DNSSEC information
+has been requested and returned.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5
 The data returned by the database backend contained data for the given domain
 name, and it either matches the type or has a relevant type. The RRset that is
 returned is printed.
@@ -127,11 +139,46 @@ were found to be different. This isn't allowed on the wire and is considered
 an error, so we set it to the lowest value we found (but we don't modify the
 database). The data in database should be checked and fixed.
 
-% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
-The database doesn't contain directly matching domain, but it does contain a
-wildcard one which is being used to synthesize the answer.
+% DATASRC_DATABASE_NO_MATCH not match for %2/%3/%4 in %1
+No match (not even a wildcard) was found in the named data source for the given
+name/type/class in the data source.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+% DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
 
-% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+% DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %3 because %2 contains NS (data source %1)
 The database was queried to provide glue data and it didn't find direct match.
 It could create it from given wildcard, but matching wildcards is forbidden
 under a zone cut, which was found. Therefore the delegation will be returned
@@ -143,11 +190,31 @@ exists, therefore this name is something like empty non-terminal (actually,
 from the protocol point of view, it is empty non-terminal, but the code
 discovers it differently).
 
-% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
-The given wildcard exists implicitly in the domainspace, as empty nonterminal
-(eg. there's something like subdomain.*.example.org, so *.example.org exists
-implicitly, but is empty). This will produce NXRRSET, because the constructed
-domain is empty as well as the wildcard.
+% DATASRC_DATABASE_WILDCARD_CNAME search in datasource %1 for %2/%3/%4 found wildcard CNAME at %5, resulting in %6
+The database doesn't contain directly matching name.  When searching
+for a wildcard match, a CNAME RR was found at a wildcard record
+matching the name.  This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_EMPTY found subdomains of %2 which is a wildcard match for %3 in %1
+The given wildcard matches the name being sough but it as an empty
+nonterminal (e.g. there's nothing at *.example.com but something like
+subdomain.*.example.org, do exist: so *.example.org exists in the
+namespace but has no RRs assopciated with it). This will produce NXRRSET.
+
+% DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6
+The database doesn't contain directly matching name.  When searching
+for a wildcard match, a wildcard record matching the name and type of
+the query was found. The data at this point is returned.
+
+% DATASRC_DATABASE_WILDCARD_NS search in datasource %1 for %2/%3/%4 found wildcard delegation at %5, resulting in %6
+The database doesn't contain directly matching name.  When searching
+for a wildcard match, an NS RR was found at a wildcard record matching
+the name.  This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_NXRRSET search in datasource %1 for %2/%3/%4 resulted in wildcard NXRRSET at %5
+The database doesn't contain directly matching name.  When searching
+for a wildcard match, a matching wildcard entry was found but it did
+not contain RRs the requested type.  AN NXRRSET indication is returned.
 
 % DATASRC_DO_QUERY handling query for '%1/%2'
 A debug message indicating that a query for the given name and RR type is being
@@ -259,7 +326,7 @@ Debug information. The requested record was found.
 
 % DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
 Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+domain. The domain is an empty nonterminal, therefore it is treated  as NXRRSET
 case (eg. the domain exists, but it doesn't have the requested record type).
 
 % DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
@@ -487,12 +554,12 @@ enough information for it.  The code is 1 for error, 2 for not implemented.
 % DATASRC_SQLITE_CLOSE closing SQLite database
 Debug information. The SQLite data source is closing the database file.
 
-% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
-The database file is being opened so it can start providing data.
-
 % DATASRC_SQLITE_CONNCLOSE Closing sqlite database
 The database file is no longer needed and is being closed.
 
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
 % DATASRC_SQLITE_CREATE SQLite data source created
 Debug information. An instance of SQLite data source is being created.
 

+ 51 - 2
src/lib/datasrc/factory.cc

@@ -19,13 +19,59 @@
 #include "sqlite3_accessor.h"
 #include "memory_datasrc.h"
 
+#include "datasrc_config.h"
+
 #include <datasrc/logger.h>
 
 #include <dlfcn.h>
+#include <cstdlib>
 
+using namespace std;
 using namespace isc::data;
 using namespace isc::datasrc;
 
+namespace {
+// This helper function takes the 'type' string as passed to
+// the DataSourceClient container below, and, unless it
+// already specifies a specific loadable .so file, will
+// convert the short-name to the full file.
+// I.e. it will add '_ds.so' (if necessary), and prepend
+// it with an absolute path (if necessary).
+// Returns the resulting string to use with LibraryContainer.
+const std::string
+getDataSourceLibFile(const std::string& type) {
+    if (type.empty()) {
+        isc_throw(DataSourceLibraryError,
+                  "DataSourceClient container called with empty type value");
+    }
+    if (type == ".so") {
+        isc_throw(DataSourceLibraryError, "DataSourceClient container called "
+                                          "with bad type or file name");
+    }
+
+    // Type can be either a short name, in which case we need to
+    // append "_ds.so", or it can be a direct .so library.
+    std::string lib_file = type;
+    const int ext_pos = lib_file.rfind(".so");
+    if (ext_pos == std::string::npos || ext_pos + 3 != lib_file.length()) {
+        lib_file.append("_ds.so");
+    }
+    // And if it is not an absolute path, prepend it with our
+    // loadable backend library path
+    if (type[0] != '/') {
+        // When running from the build tree, we do NOT want
+        // to load the installed loadable library
+        if (getenv("B10_FROM_BUILD") != NULL) {
+            lib_file = std::string(getenv("B10_FROM_BUILD")) +
+                       "/src/lib/datasrc/.libs/" + lib_file;
+        } else {
+            lib_file = isc::datasrc::BACKEND_LIBRARY_PATH + lib_file;
+        }
+    }
+    return (lib_file);
+}
+} // end anonymous namespace
+
 namespace isc {
 namespace datasrc {
 
@@ -34,7 +80,10 @@ LibraryContainer::LibraryContainer(const std::string& name) {
     // are recognized as such
     ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
     if (ds_lib_ == NULL) {
-        isc_throw(DataSourceLibraryError, dlerror());
+        // This may cause the filename to appear twice in the actual
+        // error, but the output of dlerror is implementation-dependent
+        isc_throw(DataSourceLibraryError, "dlopen failed for " << name << 
+                                          ": " << dlerror());
     }
 }
 
@@ -61,7 +110,7 @@ LibraryContainer::getSym(const char* name) {
 
 DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
                                                      ConstElementPtr config)
-: ds_lib_(type + "_ds.so")
+: ds_lib_(getDataSourceLibFile(type))
 {
     // We are casting from a data to a function pointer here
     // Some compilers (rightfully) complain about that, but

+ 10 - 1
src/lib/datasrc/factory.h

@@ -68,7 +68,7 @@ public:
     ///             the library path.
     ///
     /// \exception DataSourceLibraryError If the library cannot be found or
-    ///            cannot be loaded.
+    ///            cannot be loaded, or if name is an empty string.
     LibraryContainer(const std::string& name);
 
     /// \brief Destructor
@@ -115,6 +115,15 @@ private:
 /// easy recognition and to reduce potential mistakes.
 /// For example, the sqlite3 implementation has the type 'sqlite3', and the
 /// derived filename 'sqlite3_ds.so'
+/// The value of type can be a specific loadable library; if it already ends
+/// with '.so', the loader will not add '_ds.so'.
+/// It may also be an absolute path; if it starts with '/', nothing is
+/// prepended. If it does not, the loadable library will be taken from the
+/// installation directory, see the value of
+/// isc::datasrc::BACKEND_LIBRARY_PATH in datasrc_config.h for the exact path.
+///
+/// \note When 'B10_FROM_BUILD' is set in the environment, the build
+///       directory is used instead of the install directory.
 ///
 /// There are of course some demands to an implementation, not all of which
 /// can be verified compile-time. It must provide a creator and destructor

+ 0 - 0
src/lib/datasrc/rbtree.h


Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff