Browse Source

Merge branch 'master' into work/grow

Conflicts:
	src/lib/datasrc/memory/zone_writer.cc
	src/lib/datasrc/tests/memory/Makefile.am
	src/lib/datasrc/tests/memory/zone_data_updater_unittest.cc
	src/lib/datasrc/tests/memory/zone_finder_unittest.cc
Michal 'vorner' Vaner 12 years ago
parent
commit
025659c532
100 changed files with 4546 additions and 4416 deletions
  1. 1 0
      AUTHORS
  2. 217 0
      ChangeLog
  3. 2 7
      Makefile.am
  4. 6 0
      README
  5. 20 39
      configure.ac
  6. 382 0
      doc/design/ipc-high.txt
  7. 1 0
      doc/devel/mainpage.dox
  8. 86 26
      doc/guide/bind10-guide.xml
  9. 1 1
      examples/host/host.cc
  10. 2 0
      m4macros/Makefile.am
  11. 25 0
      m4macros/ax_sqlite3_for_bind10.m4
  12. 33 5
      src/bin/auth/auth_messages.mes
  13. 42 16
      src/bin/auth/auth_srv.cc
  14. 10 1
      src/bin/auth/datasrc_clients_mgr.h
  15. 2 2
      src/bin/auth/query.h
  16. 143 124
      src/bin/auth/tests/auth_srv_unittest.cc
  17. 1 1
      src/bin/auth/tests/config_unittest.cc
  18. 47 8
      src/bin/auth/tests/datasrc_clients_builder_unittest.cc
  19. 4 3
      src/bin/bind10/init.py.in
  20. 2 2
      src/bin/bind10/init_messages.mes
  21. 1 1
      src/bin/bind10/run_bind10.sh.in
  22. 1 1
      src/bin/bind10/tests/Makefile.am
  23. 11 1
      src/bin/bind10/tests/init_test.py.in
  24. 31 34
      src/bin/bindctl/bindcmd.py
  25. 1 1
      src/bin/bindctl/run_bindctl.sh.in
  26. 1 1
      src/bin/bindctl/tests/Makefile.am
  27. 44 16
      src/bin/bindctl/tests/bindctl_test.py
  28. 14 3
      src/bin/cfgmgr/plugins/datasrc.spec.pre.in
  29. 4 1
      src/bin/cfgmgr/plugins/tests/Makefile.am
  30. 96 1
      src/bin/cfgmgr/plugins/tests/datasrc_test.py
  31. 1 1
      src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
  32. 1 1
      src/bin/cfgmgr/tests/Makefile.am
  33. 1 1
      src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
  34. 2 4
      src/bin/cmdctl/Makefile.am
  35. 72 27
      src/bin/cmdctl/cmdctl.py.in
  36. 1 1
      src/bin/cmdctl/run_b10-cmdctl.sh.in
  37. 2 2
      src/bin/cmdctl/tests/Makefile.am
  38. 1 4
      src/bin/cmdctl/tests/b10-certgen_test.py
  39. 225 19
      src/bin/cmdctl/tests/cmdctl_test.py
  40. 1 1
      src/bin/dbutil/run_dbutil.sh.in
  41. 6 0
      src/bin/dbutil/tests/Makefile.am
  42. 21 21
      src/bin/dbutil/tests/dbutil_test.sh.in
  43. 2 2
      src/bin/ddns/ddns.py.in
  44. 1 1
      src/bin/ddns/ddns_messages.mes
  45. 1 1
      src/bin/ddns/tests/Makefile.am
  46. 208 1552
      src/bin/dhcp4/config_parser.cc
  47. 11 13
      src/bin/dhcp4/config_parser.h
  48. 1 1
      src/bin/dhcp4/ctrl_dhcp4_srv.h
  49. 1 1
      src/bin/dhcp4/dhcp4_messages.mes
  50. 28 6
      src/bin/dhcp4/dhcp4_srv.cc
  51. 4 2
      src/bin/dhcp4/dhcp4_srv.h
  52. 1 1
      src/bin/dhcp4/tests/Makefile.am
  53. 12 10
      src/bin/dhcp4/tests/config_parser_unittest.cc
  54. 12 14
      src/bin/dhcp4/tests/ctrl_dhcp4_srv_unittest.cc
  55. 191 235
      src/bin/dhcp4/tests/dhcp4_srv_unittest.cc
  56. 252 1581
      src/bin/dhcp6/config_parser.cc
  57. 10 3
      src/bin/dhcp6/config_parser.h
  58. 1 1
      src/bin/dhcp6/ctrl_dhcp6_srv.h
  59. 6 0
      src/bin/dhcp6/dhcp6.spec
  60. 36 10
      src/bin/dhcp6/dhcp6_srv.cc
  61. 2 2
      src/bin/dhcp6/dhcp6_srv.h
  62. 1 1
      src/bin/dhcp6/tests/Makefile.am
  63. 104 6
      src/bin/dhcp6/tests/config_parser_unittest.cc
  64. 8 10
      src/bin/dhcp6/tests/ctrl_dhcp6_srv_unittest.cc
  65. 343 50
      src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
  66. 1 1
      src/bin/loadzone/run_loadzone.sh.in
  67. 1 1
      src/bin/loadzone/tests/Makefile.am
  68. 1 1
      src/bin/loadzone/tests/correct/Makefile.am
  69. 1 1
      src/bin/loadzone/tests/correct/correct_test.sh.in
  70. 1 1
      src/bin/loadzone/tests/correct/ttlext.db
  71. 1 0
      src/bin/msgq/Makefile.am
  72. 16 15
      src/bin/msgq/msgq.py.in
  73. 1 1
      src/bin/msgq/run_msgq.sh.in
  74. 4 4
      src/bin/msgq/tests/Makefile.am
  75. 278 0
      src/bin/msgq/tests/msgq_run_test.py
  76. 3 3
      src/bin/msgq/tests/msgq_test.py
  77. 1 1
      src/bin/resolver/Makefile.am
  78. 25 0
      src/bin/resolver/bench/Makefile.am
  79. 28 0
      src/bin/resolver/bench/dummy_work.cc
  80. 36 0
      src/bin/resolver/bench/dummy_work.h
  81. 172 0
      src/bin/resolver/bench/fake_resolution.cc
  82. 228 0
      src/bin/resolver/bench/fake_resolution.h
  83. 27 0
      src/bin/resolver/bench/main.cc
  84. 66 0
      src/bin/resolver/bench/naive_resolver.cc
  85. 44 0
      src/bin/resolver/bench/naive_resolver.h
  86. 1 1
      src/bin/sockcreator/README
  87. 1 1
      src/bin/sockcreator/sockcreator.h
  88. 1 1
      src/bin/stats/stats.py.in
  89. 6 5
      src/bin/stats/stats_httpd.py.in
  90. 2 2
      src/bin/stats/stats_httpd_messages.mes
  91. 2 2
      src/bin/stats/tests/Makefile.am
  92. 137 92
      src/bin/stats/tests/b10-stats-httpd_test.py
  93. 201 149
      src/bin/stats/tests/b10-stats_test.py
  94. 118 247
      src/bin/stats/tests/test_utils.py
  95. 1 1
      src/bin/tests/Makefile.am
  96. 1 1
      src/bin/usermgr/b10-cmdctl-usermgr.py.in
  97. 126 0
      src/bin/xfrin/b10-xfrin.xml
  98. 1 1
      src/bin/xfrin/tests/Makefile.am
  99. 212 4
      src/bin/xfrin/tests/xfrin_test.py
  100. 0 0
      src/bin/xfrin/xfrin.py.in

+ 1 - 0
AUTHORS

@@ -14,6 +14,7 @@ Michael Graff
 Michal Vaner
 Mukund Sivaraman
 Naoki Kambe
+Paul Selkirk
 Shane Kerr
 Shen Tingting
 Stephen Morris

+ 217 - 0
ChangeLog

@@ -1,3 +1,220 @@
+613.	[func]		jinmei
+	datasrc: Error handling in loading zones into memory is now more
+	consistent and convenient: data source configuration does not fail
+	due to zones configured to be loaded into memory but not available
+	in the data source, just like the case of missing zone file for
+	the MasterFiles type of data source.  Also, zones that aren't
+	loaded into memory due to errors can now be reloaded for b10-auth
+	using the bindctl Auth loadzone command after fixing the error,
+	without reconfiguring the entire data source.
+	(Trac #2851, git a3d4fe8a32003534150ed076ea0bbf80e1fcc43c)
+
+612.	[func]		tomek
+	b10-dhcp6: Support for relayed DHCPv6 traffic has been added.
+
+611.	[func]		naokikambe
+	Added Xfrin statistics items such as the number of successful
+	transfers.  These are per-zone type counters.  Their values can be
+	obtained with zone names by invoking "Stats show Xfrin" via bindctl
+	while Xfrin is running.
+	(Trac #2252, git e1a0ea8ef5c51b9b25afa111fbfe9347afbe5413)
+
+bind10-1.0.0beta2 released on May 10, 2013
+
+610.	[bug]		muks
+	When the sqlite3 program is not available on the system (in
+	PATH), we no longer attempt to run some tests which depend
+	on it.
+	(Trac #1909, git f85b274b85b57a094d33ca06dfbe12ae67bb47df)
+
+609.	[bug]		jinmei
+	Handled some rare error cases in DNS server classes correctly.
+	This fix specifically solves occasional crash of b10-auth due to
+	errors caused by TCP DNS clients.  Also, as a result of cleanups
+	with the fix, b10-auth should now be a little bit faster in
+	handling UDP queries: in some local experiments it ran about 5%
+	faster.
+	(Trac #2903, git 6d3e0f4b36a754248f8a03a29e2c36aef644cdcc)
+
+608.	[bug]		jinmei
+	b10-cmdctl: fixed a hangup problem on receiving the shutdown
+	command from bindctl.  Note, however, that cmdctl is defined as
+	a "needed" module by default, so shutting down cmdctl would cause
+	shutdown of the entire BIND 10 system anyway, and is therefore
+	still not very useful in practice.
+	(Trac #2712, git fa392e8eb391a17d30550d4b290c975710651d98)
+
+607.	[bug]		jinmei
+	Worked around some unit test regressions on FreeBSD 9.1 due to
+	a binary compatibility issue between standard and system
+	libraries (http://www.freebsd.org/cgi/query-pr.cgi?pr=175453).
+	While not all tests still pass, main BIND 10 programs should
+	generally work correctly.  Still, there can be odd run time
+	behavior such as abrupt crash instead of graceful shutdown
+	when some fatal event happens, so it's generally discouraged to
+	use BIND 10 on FreeBSD 9.1 RELEASE.  According to the above
+	bug report for FreeBSD, it seems upgrading or downgrading the
+	FreeBSD version will solve this problem.
+	(Trac #2887, git 69dfb4544d9ded3c10cffbbfd573ae05fdeb771f)
+
+606.	[bug]		jinmei
+	b10-xfrout now correctly stops sending notify requests once it
+	receives a valid response.  It previously handled it as if the
+	requests are timed out and resent it a few times in a short
+	period.
+	(Trac #2879, git 4c45f29f28ae766a9f7dc3142859f1d0000284e1)
+
+605.	[bug]		tmark
+	Modified perfdhcp to calculate the times displayed for packet sent
+	and received as time elapsed since perfdhcp process start time.
+	Previously these were times since the start of the epoch.
+	However the large numbers involved caused loss of precision
+	in the calculation of the test statistics.
+	(Trac #2785, git e9556924dcd1cf285dc358c47d65ed7c413e02cf)
+
+604.	[func]		marcin
+	libdhcp++: abstracted methods which open sockets and send/receive
+	DHCP4 packets to a separate class. Other classes will be derived
+	from it to implement OS-specific methods of DHCPv4 packets filtering.
+	The primary purpose for this change is to add support for Direct
+	DHCPv4 response to a client which doesn't have an address yet on
+	different OSes.
+	(Trac #991, git 33ffc9a750cd3fb34158ef676aab6b05df0302e2)
+
+603.	[func]		tmark
+	The directory in which the b10-dchp4 and b10-dhcp6 server id files has
+	been changed from the local state directory (set by the "configure"
+	--localstatedir switch) to the "bind10" subdirectory of it. After an
+	upgrade, server id files in the former location will be orphaned and
+	should be manually removed.
+	(Trac #2770, git a622140d411b3f07a68a1451e19df36118a80650)
+
+602.	[bug]		tmark
+	Perfdhcp will now exit gracefully if the command line argument for
+	IP version (-4 or -6) does not match the command line argument
+	given for the server. Prior to this perfdhcp would core when given
+	an IP version of -6 but a valid IPv4 address for server.
+	(Trac #2784, git 96b66c0c79dccf9a0206a45916b9b23fe9b94f74)
+
+601.	[bug]*		jinmei, vorner
+	The "delete record" interface of the database based data source
+	was extended so that the parameter includes reversed name in
+	addition to the actual name.  This may help the underlying
+	accessor implementation if reversed names are more convenient
+	for the delete operation.  This was the case for the SQLite3
+	accessor implementation, and it now performs delete operations
+	much faster.  At a higher level, this means IXFR and DDNS Updates
+	to the sqlite3 database are no longer so slow on large zones as
+	they were before.
+	(Trac #2877, git 33bd949ac7288c61ed0a664b7329b50b36d180e5)
+
+600.	[bug]		tmark
+	Changed mysql_lease_mgr to set the SQL mode option to STRICT. This
+	causes mysql it to treat invalid input data as an error. Rather than
+	"successfully" inserting a too large value by truncating it, the
+	insert will fail, and the lease manager will throw an exception.
+	Also, attempts to create a HWAddr (hardware address) object with
+	too long an array of data now throw an exception.
+	(Trac #2387, git cac02e9290600407bd6f3071c6654c1216278616)
+
+599.	[func]		tomek
+	libdhcp++: Pkt6 class is now able to parse and build relayed DHCPv6
+	messages.
+	(Trac #2827, git 29c3f7f4e82d7e85f0f5fb692345fd55092796b4)
+
+bind10-1.0.0beta1 released on April 4, 2013
+
+598.	[func]*		jinmei
+	The separate "static" data source is now deprecated as it can be
+	served in the more generic "MasterFiles" type of data source.
+	This means existing configuration may not work after an update.
+	If "config show data_sources/classes/CH[0]" on bindctl contains a
+	"static" type of data source, you'll need to update it as follows:
+	> config set data_sources/classes/CH[0]/type MasterFiles
+	> config set data_sources/classes/CH[0]/params {"BIND": =>
+	  "<the value of current data_sources/classes/CH[0]/params>"}
+	> config set data_sources/classes/CH[0]/cache-enable true
+	> config commit
+	(Same for CH[1], CH[2], IN[0], etc, if applicable, although it
+	should be very unlikely in practice.  Also note: '=>' above
+	indicates the next line is actually part of the command.  Do
+	not type in this "arrow").
+	(Part of Trac #2833, git 0363b4187fe3c1a148ad424af39e12846610d2d7)
+
+597.	[func]		tmark
+	b10-dhcp6: Added unit tests for handling requests when no
+	IPv6 subnets are configured/defined. Testing these conditions
+	was overlooked during implementation of Trac #2719.
+	(Trac #2721, git ce7f53b2de60e2411483b4aa31c714763a36da64)
+
+596.	[bug]		jinmei
+	Added special handling for the case where b10-auth receives a
+	NOTIFY message, but zonemgr isn't running. Previously this was
+	logged as a communications problem at the ERROR level, resulting
+	in increasing noise when zonemgr is intentionally stopped. Other
+	than the log level there is no change in externally visible
+	behavior.
+	(Trac #2562, git 119eed9938b17cbad3a74c823aa9eddb7cd337c2)
+
+595.	[bug]		tomek
+	All DHCP components now gracefully refuse to handle too short
+	DUIDs and client-id.
+	(Trac #2723, git a043d8ecda6aff57922fe98a33c7c3f6155d5d64)
+
+594.	[func]		muks, pselkirk
+	libdns++: the NSEC, DS, DLV, and AFSDB Rdata classes now use the
+	generic lexer in constructors from text.  This means that the name
+	fields in such RRs in a zone file can now be non-absolute (the
+	origin name in that context will be used), e.g., when loaded by
+	b10-loadzone.
+	(Trac #2386, git dc0f34afb1eccc574421a802557198e6cd2363fa)
+	(Trac #2391, git 1450d8d486cba3bee8be46e8001d66898edd370c)
+
+593.	[func]		jelte
+	Address + port output and logs is now consistent according to our
+	coding guidelines, e.g. <address>:<port> in the case of IPv4, and
+	[<address>]:<port> in the case of IPv6, instead of <address>#<port>
+	(Trac #1086, git bcefe1e95cdd61ee4a09b20522c3c56b315a1acc)
+
+592.	[bug]		jinmei
+	b10-auth and zonemgr now handle some uncommon NOTIFY messages more
+	gracefully: auth immediately returns a NOTAUTH response if the
+	server does not have authority for the zone (the behavior
+	compatible with BIND 9) without bothering zonemgr; zonemgr now
+	simply skips retransfer if the specified zone is not in its
+	secondary zone list, instead of producing noisy error logs.
+	(Trac #1938, git 89d7de8e2f809aef2184b450e7dee1bfec98ad14)
+
+591.	[func]		vorner
+	Ported the remaining tests from the old shell/perl based system to
+	lettuce. Make target `systest' is now gone. Currently, the lettuce
+	tests are in git only, not part of the release tarball.
+	(Trac #2624, git df1c5d5232a2ab551cd98b77ae388ad568a683ad)
+
+590.	[bug]		tmark
+	Modified "include" statements in DHCP MySQL lease manager code to
+	fix build problems if MySQL is installed in a non-standard location.
+	(Trac #2825, git 4813e06cf4e0a9d9f453890557b639715e081eca)
+
+589.	[bug]		jelte
+	b10-cmdctl now automatically re-reads the user accounts file when
+	it is updated.
+	(Trac #2710, git 16e8be506f32de668699e6954f5de60ca9d14ddf)
+
+588.	[bug]*		jreed
+	b10-xfrout: Log message id XFROUT_QUERY_QUOTA_EXCCEEDED
+	changed to XFROUT_QUERY_QUOTA_EXCEEDED.
+	(git be41be890f1349ae4c870a887f7acd99ba1eaac5)
+
+587.	[bug]		jelte
+	When used from python, the dynamic datasource factory now
+	explicitely loads the logging messages dictionary, so that correct
+	logging messages does not depend on incidental earlier import
+	statements. Also, the sqlite3-specific log messages have been moved
+	from the general datasource library to the sqlite3 datasource
+	(which also explicitely loads its messages).
+	(Trac #2746, git 1c004d95a8b715500af448683e4a07e9b66ea926)
+
 586.	[func]		marcin
 	libdhcp++: Removed unnecesary calls to the function which
 	validates option definitions used to create instances of options

+ 2 - 7
Makefile.am

@@ -2,7 +2,7 @@ ACLOCAL_AMFLAGS = -I m4macros -I examples/m4 ${ACLOCAL_FLAGS}
 # ^^^^^^^^ This has to be the first line and cannot come later in this
 # Makefile.am due to some bork in some versions of autotools.
 
-SUBDIRS = compatcheck doc . src tests
+SUBDIRS = compatcheck doc . src tests m4macros
 USE_LCOV=@USE_LCOV@
 LCOV=@LCOV@
 GENHTML=@GENHTML@
@@ -46,7 +46,7 @@ endif
 clean-cpp-coverage:
 	@if [ $(USE_LCOV) = yes ] ; then \
 		$(LCOV) --directory . --zerocounters; \
-		rm -rf coverage/; \
+		rm -rf $(abs_top_srcdir)/coverage-cpp-html/; \
 	else \
 		echo "C++ code coverage not enabled at configuration time." ; \
 		echo "Use: ./configure --with-lcov" ; \
@@ -115,11 +115,6 @@ cppcheck:
 		--template '{file}:{line}: check_fail: {message} ({severity},{id})' \
 		src
 
-# system tests
-systest:
-	cd tests/system; \
-	sh $(abs_srcdir)/tests/system/runall.sh
-
 ### include tool to generate documentation from log message specifications
 ### in the distributed tarball:
 EXTRA_DIST = tools/system_messages.py

+ 6 - 0
README

@@ -25,6 +25,12 @@ the DHCPv4 and DHCPv6 servers must be considered experimental.
 Limitations and known issues with this DHCP release can be found
 at http://bind10.isc.org/wiki/KeaKnownIssues
 
+NOTE: The API/ABI provided by libraries in BIND 10 may change in future
+point releases. So please do not assume currently that any code that you
+compile for a particular version of a BIND 10 library will work in
+future versions of the library. We aim to stabilize the public API/ABI
+interface of BIND 10 libraries in future releases.
+
 Documentation is included with the source. See doc/guide/bind10-guide.txt
 (or bind10-guide.html) for installation instructions.  The
 documentation is also available via the BIND 10 website at

+ 20 - 39
configure.ac

@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10, 20130221, bind10-dev@isc.org)
+AC_INIT(bind10, 20130510, bind10-dev@isc.org)
 AC_CONFIG_SRCDIR(README)
 # serial-tests is not available in automake version before 1.13. In
 # automake 1.13 and higher, AM_PROG_INSTALL is undefined, so we'll check
@@ -323,9 +323,9 @@ if test -x ${PYTHON}-config; then
 	# so we only go through the flag if it's contained; also, protecting
 	# the output with [] seems necessary for environment to avoid getting
 	# an empty output accidentally.
-	python_config_ldflags=[`${PYTHON}-config --ldflags | sed -ne 's/\([ \t]*-L\)[ ]*\([^ \t]*[ \t]*\)/\1\2/pg'`]
+	python_config_ldflags=[`${PYTHON}-config --ldflags | ${SED} -ne 's/\([ \t]*-L\)[ ]*\([^ \t]*[ \t]*\)/\1\2/gp'`]
 	for flag in $python_config_ldflags; do
-		flag=`echo $flag | sed -ne 's/^\(\-L.*\)$/\1/p'`
+		flag=`echo $flag | ${SED} -ne 's/^\(\-L.*\)$/\1/p'`
 		if test "X${flag}" != X; then
 			PYTHON_LDFLAGS="$PYTHON_LDFLAGS ${flag}"
 		fi
@@ -351,7 +351,7 @@ fi
 if test "x$ISC_RPATH_FLAG" != "x"; then
 	python_rpath=
 	for flag in ${PYTHON_LDFLAGS}; do
-		python_rpath="${python_rpath} `echo $flag | sed -ne "s/^\(\-L\)/${ISC_RPATH_FLAG}/p"`"
+		python_rpath="${python_rpath} `echo $flag | ${SED} -ne "s/^\(\-L\)/${ISC_RPATH_FLAG}/p"`"
 	done
 	PYTHON_LDFLAGS="${PYTHON_LDFLAGS} ${python_rpath}"
 fi
@@ -388,8 +388,6 @@ In this case we will continue, but naming of python processes will not work.])
     fi
 fi
 
-# TODO: check for _sqlite3.py module
-
 # (g++ only check)
 # Python 3.2 has an unused parameter in one of its headers. This
 # has been reported, but not fixed as of yet, so we check if we need
@@ -536,7 +534,7 @@ if test "$lcov" != "no"; then
 		AC_MSG_ERROR([Cannot find lcov.])
 	fi
 	# is genhtml always in the same directory?
-	GENHTML=`echo "$LCOV" | sed s/lcov$/genhtml/`
+	GENHTML=`echo "$LCOV" | ${SED} s/lcov$/genhtml/`
 	if test ! -x $GENHTML; then
 		AC_MSG_ERROR([genhtml not found, needed for lcov])
 	fi
@@ -712,15 +710,15 @@ fi
 BOTAN_LDFLAGS=
 BOTAN_NEWLIBS=
 for flag in ${BOTAN_LIBS}; do
-    BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
-    BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+    BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | ${SED} -ne '/^\(\-L\)/p'`"
+    BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | ${SED} -ne '/^\(\-l\)/p'`"
 done
 
 # See python_rpath for some info on why we do this
 if test "x$ISC_RPATH_FLAG" != "x"; then
     BOTAN_RPATH=
     for flag in ${BOTAN_LIBS}; do
-            BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne "s/^\(\-L\)/${ISC_RPATH_FLAG}/p"`"
+            BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | ${SED} -ne "s/^\(\-L\)/${ISC_RPATH_FLAG}/p"`"
     done
 AC_SUBST(BOTAN_RPATH)
 
@@ -890,9 +888,12 @@ AC_ARG_WITH(shared-memory,
     [Build with Boost shared memory support; for large scale authoritative DNS servers]),
     [use_shared_memory=$withval])
 if test X$use_shared_memory = Xyes -a "$BOOST_MAPPED_FILE_WOULDFAIL" = "yes"; then
-    AC_MSG_ERROR([Boost shared memory does not compile on this system.  If you don't need it (most normal users won't) build without it; using a different compiler or a different version of Boost may also help.])
+    AC_MSG_ERROR([Boost shared memory does not compile on this system.  If you don't need it (most normal users won't) build without it by rerunning this script with --without-shared-memory; using a different compiler or a different version of Boost may also help.])
 fi
 AM_CONDITIONAL([USE_SHARED_MEMORY], [test x$use_shared_memory = xyes])
+if test "x$use_shared_memory" = "xyes"; then
+    AC_DEFINE(USE_SHARED_MEMORY, 1, [Define to 1 if shared memory support is enabled])
+fi
 AC_SUBST(BOOST_MAPPED_FILE_CXXFLAG)
 
 # Add some default CPP flags needed for Boost, identified by the AX macro.
@@ -1043,12 +1044,16 @@ AC_SUBST(GTEST_LDFLAGS)
 AC_SUBST(GTEST_LDADD)
 AC_SUBST(GTEST_SOURCE)
 
-dnl check for pkg-config itself so we don't try the m4 macro without pkg-config
+dnl check for pkg-config itself
 AC_CHECK_PROG(HAVE_PKG_CONFIG, pkg-config, yes, no)
 if test "x$HAVE_PKG_CONFIG" = "xno" ; then
   AC_MSG_ERROR(Please install pkg-config)
 fi
-PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
+
+AX_SQLITE3_FOR_BIND10
+if test "x$have_sqlite" = "xyes" ; then
+  enable_features="$enable_features SQLite3"
+fi
 
 #
 # ASIO: we extensively use it as the C++ event management module.
@@ -1201,6 +1206,7 @@ AC_CONFIG_FILES([Makefile
                  src/bin/dhcp4/tests/Makefile
                  src/bin/resolver/Makefile
                  src/bin/resolver/tests/Makefile
+                 src/bin/resolver/bench/Makefile
                  src/bin/sysinfo/Makefile
                  src/bin/sockcreator/Makefile
                  src/bin/sockcreator/tests/Makefile
@@ -1316,13 +1322,13 @@ AC_CONFIG_FILES([Makefile
                  src/lib/statistics/Makefile
                  src/lib/statistics/tests/Makefile
                  tests/Makefile
-                 tests/system/Makefile
                  tests/tools/Makefile
                  tests/tools/badpacket/Makefile
                  tests/tools/badpacket/tests/Makefile
                  tests/tools/perfdhcp/Makefile
                  tests/tools/perfdhcp/tests/Makefile
                  tests/tools/perfdhcp/tests/testdata/Makefile
+                 m4macros/Makefile
                  dns++.pc
                ])
 AC_OUTPUT([doc/version.ent
@@ -1400,23 +1406,6 @@ AC_OUTPUT([doc/version.ent
            src/lib/util/python/gen_wiredata.py
            src/lib/server_common/tests/data_path.h
            tests/lettuce/setup_intree_bind10.sh
-           tests/system/conf.sh
-           tests/system/run.sh
-           tests/system/glue/setup.sh
-           tests/system/glue/nsx1/b10-config.db
-           tests/system/bindctl/nsx1/b10-config.db.template
-           tests/system/ixfr/db.example.n0
-           tests/system/ixfr/db.example.n2
-           tests/system/ixfr/db.example.n2.refresh
-           tests/system/ixfr/db.example.n4
-           tests/system/ixfr/db.example.n6
-           tests/system/ixfr/ixfr_init.sh
-           tests/system/ixfr/b10-config.db
-           tests/system/ixfr/common_tests.sh
-           tests/system/ixfr/in-1/setup.sh
-           tests/system/ixfr/in-2/setup.sh
-           tests/system/ixfr/in-3/setup.sh
-           tests/system/ixfr/in-4/setup.sh
           ], [
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
@@ -1447,14 +1436,6 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/lib/util/python/mkpywrapper.py
            chmod +x src/lib/util/python/gen_wiredata.py
            chmod +x src/lib/python/isc/log/tests/log_console.py
-           chmod +x tests/system/conf.sh
-           chmod +x tests/system/run.sh
-           chmod +x tests/system/ixfr/ixfr_init.sh
-           chmod +x tests/system/ixfr/common_tests.sh
-           chmod +x tests/system/ixfr/in-1/setup.sh
-           chmod +x tests/system/ixfr/in-2/setup.sh
-           chmod +x tests/system/ixfr/in-3/setup.sh
-           chmod +x tests/system/ixfr/in-4/setup.sh
           ])
 AC_OUTPUT
 

+ 382 - 0
doc/design/ipc-high.txt

@@ -0,0 +1,382 @@
+The IPC protocol
+================
+
+While the cc-protocol.txt describes the low-level primitives, here we
+describe how the whole IPC should work and how to use it.
+
+Definitions
+-----------
+
+system::
+  The system that moves data between the users and does bookkeeping.
+  In our current implementation, it is implemented as the MsgQ daemon,
+  which the users connect to and it routes the data.
+user::
+  Usually a process; generally an entity that wants to communicate
+  with the other users.
+session::
+  Session is the interface by which the user communicates with the
+  system. Single user may have multiple sessions, a session belongs to
+  single user.
+message::
+  A data blob sent by one user. The recipient might be the system
+  itself, other session or set of sessions (called group, see below,
+  it is possibly empty). Message is either a response or an original
+  message (TODO: Better name?).
+group::
+  A named set of sessions. Conceptually, all the possible groups
+  exist, there's no explicit creation and deletion of groups.
+session id::
+  Unique identifier of a session. It is not reused for the whole
+  lifetime of the system. Historically called `lname` in the code.
+undelivery signal::
+  While sending an original message, a client may request an
+  undelivery signal. If the recipient specification yields no
+  sessions to deliver the message to, the system informs user about
+  the situation.
+sequence number::
+  Each message sent through the system carries a sequence number. The
+  number should be unique per sender. It can be used to pair a
+  response to the original message, since the response specifies which
+  sequence number had the message it response to. Even responses and
+  messages not expecting answer have their sequence number, but it is
+  generally unused.
+non-blocking operation::
+  Operation that will complete without waiting for anything.
+fast operation::
+  Operation that may wait for other process, but only for a very short
+  time. Generally, this includes communication between the user and
+  system, but not between two clients. It can be expected to be fast
+  enough to use this inside an interactive session, but may be too
+  heavy in the middle of query processing, for example. Every
+  non-blocking operation is considered fast.
+
+The session
+-----------
+
+The session interface allows for several operations interacting with
+the system. In the code, it is represented by a class.
+
+Possible operations include:
+
+Opening a session::
+  The session is created and connects to the system. This operation is
+  fast. The session receives session id from the system.
+
+Group management::
+  A user may subscribe (become member) of a group, or unsubscribe from
+  a group. These are fast operations.
+
+Send::
+  A user may send a message, addressed to the system, or other
+  session(s). This operation is expected to be non-blocking
+  (current implementation is based on assumption of how OS handles the
+  sends, which may need to be revisited if it turns out to be false).
+
+Receive synchronously::
+  User may wait for an incoming message in blocking mode. It is
+  possible to specify the kind of message to wait for, either original
+  message or response to a message. This interface has a timeout.
+
+Receive asynchronously::
+  Similar to previous, but non-blocking. It terminates immediately.
+  The user provides a callback that is invoked when the requested
+  message arrives.
+
+Terminate::
+  A session may be terminated. No more messages are sent or received
+  over it, the session is automatically unsubscribed from all the
+  groups. This operation is non-blocking. A session is terminated
+  automatically if the user exits.
+
+Assumptions
+-----------
+
+We assume reliability and order of delivery. Messages sent from user A
+to B are all delivered unchanged in original order as long as B
+exists.
+
+All above operations are expected to always succeed. If there's an
+error reported, it should be considered fatal and user should
+exit. In case a user still wants to continue, the session must be
+considered terminated and a new one must be created. Care must be
+taken not to use any information obtained from the previous session,
+since the state in other users and the system may have changed during
+the reconnect.
+
+Addressing
+----------
+
+Addressing happens in three ways:
+
+By group name::
+  The message is routed to all the sessions subscribed to this group.
+  It is legal to address an empty group; such message is then
+  delivered to no sessions.
+By session ID::
+  The message is sent to the single session, if it is still alive.
+By an alias::
+  A session may have any number of aliases - well known names. Only
+  single session may hold given alias (but it is not yet enforced by
+  the system). The message is delivered to the one session owning the
+  alias, if any. Internally, the aliases are implemented as groups
+  with single subscribed session, so it is the same as the first
+  option on the protocol level, but semantically it is different.
+
+The system
+----------
+
+The system performs these goals:
+
+ * Maintains the open sessions and allows creating new ones.
+ * Keeps information about groups and which sessions are subscribed to
+   which group.
+ * Routes the messages between users.
+
+Also, the system itself is a user of the system. It can be reached by
+the alias `Msgq` and provides following high-level services (see
+below):
+
+Notifications about sessions::
+  When a session is opened to the system or when a session is
+  terminated, a notification is sent to interested users. The
+  notification contains the session ID of the session in question.
+  The termination notification is probably more useful (if a user
+  communicated with a given session before, it might be interested it
+  is no longer available), the opening notification is provided mostly
+  for completeness.
+Notifications about group subscriptions::
+  When a session subscribes to a group or unsubscribes from a group, a
+  notification is sent to interested users. The notification contains
+  both the session ID of the session subscribing/unsubscribing and
+  name of the group. This includes notifications about aliases (since
+  aliases are groups internally).
+Commands to list sessions::
+  There's a command to list session IDs of all currently opened sessions
+  and a command to list session IDs of all sessions subscribed to a
+  given group. Note that using these lists might need some care, as
+  the information might be outdated at the time it is delivered to the
+  user.
+
+User shows interest in notifications about sessions and group
+subscriptions by subscribing to a group with well-known name (as with
+any notification).
+
+Note that due to implementation details, the `Msgq` alias is not yet
+available during early stage of the bootstrap of bind10 system. This
+means some very core services can't rely on the above services of the
+system. The alias is guaranteed to be working before the first
+non-core module is started.
+
+Higher-level services
+---------------------
+
+While the system is able to send any kind of data, the payload sent by
+users in bind10 is structured data encoded as JSON. The messages sent
+are of three general types:
+
+Command::
+  A message sent to single destination, with the undeliverable
+  signal turned on and expecting an answer. This is a request
+  to perform some operation on the recipient (it can have side effects
+  or not). The command is identified by a name and it can have
+  parameters. A command with the same name may behave differently (or
+  have different parameters) on different receiving users.
+Reply::
+  An answer to the `Command`. It is sent directly to the session where
+  the command originated from, does not expect further answer and the
+  undeliverable notification is not set. It either confirms the
+  command was run successfully and contains an optional result, or
+  notifies the sender of failure to run the command. Success and
+  failure differ only in the payload sent through the system, not in
+  the way it is sent. The undeliverable signal is failure
+  reply sent by the system on behalf of the missing recipient.
+Notification::
+  A message sent to any number of destinations (eg. sent to a group),
+  not expecting an answer. It notifies other users about an event or
+  change of state.
+
+Details of the higher-level
+---------------------------
+
+While there are libraries implementing the communication in convenient
+way, it is useful to know what happens inside.
+
+The notifications are probably the simplest. Users interested in
+receiving notifications of some family subscribe to corresponding
+group. Then, a client sends a message to the group. For example, if
+clients `receiver-A` and `receiver-B` want to receive notifications
+about changes to zone data, they'd subscribe to the
+`Notifications/ZoneUpdates` group. Then, other client (let's say
+`XfrIn`, with session ID `s12345`) would send something like:
+
+  s12345 -> Notifications/ZoneUpdates
+  {"notification": ["zone-update", {
+      "class": "IN",
+      "origin": "example.org.",
+      "serial": 123456
+  }]}
+
+Both receivers would receive the message and know that the
+`example.org` zone is now at version 123456. Note that multiple users
+may produce the same kind of notification. Also, single group may be
+used to send multiple notification names (but they should be related;
+in our example, the `Notifications/ZoneUpdates` could be used for
+`zone-update`, `zone-available` and `zone-unavailable` notifications
+for change in zone data, configuration of new zone in the system and
+removal of a zone from configuration).
+
+Sending a command to single recipient is slightly more complex. The
+sending user sends a message to the receiving one, addressed either by
+session ID or by an alias (group to which at most one session may be
+subscribed). The message contains the name of the command and
+parameters. It is sent with the undeliverable signals turned on.
+The user also starts a timer (with reasonably long timeout). The
+sender also subscribes to notifications about terminated sessions or
+unsubscription from the alias group.
+
+The receiving user gets the message, runs the command and sends a
+response back, with the result. The response has the undeliverable
+signal turned off and it is marked as response to the message
+containing the command. The sending user receives the answer and pairs
+it with the command.
+
+There are several things that may go wrong.
+
+* There might be an error on the receiving user (bad parameters, the
+  operation failed, the recipient doesn't know command of that name).
+  The receiving side sends the response as previous, the only
+  difference is the content of the payload. The sending user is
+  notified about it, without delays.
+* The recipient user doesn't exist (either the session ID is wrong or
+  terminated already, or the alias is empty). The system sends a
+  failure response and the sending user knows immediately the command
+  failed.
+* The recipient disconnects while processing the command (possibly
+  crashes). The sender gets a notification about disconnection or
+  unsubscription from the alias group and knows the answer won't come.
+* The recipient ``blackholes'' the command. It receives it, but never
+  answers. The timeout in sender times out. As this is a serious
+  programmer error in the recipient and should be rare, the sender
+  should at least log an error to notify about the case.
+
+One example would be asking the question of life, universe and
+everything (all the examples assume the sending user is already
+subscribed to the notifications):
+
+  s12345 -> DeepThought
+  {"command": ["question", {
+      "what": ["Life", "Universe", "*"]
+  }]}
+  s23456 -> s12345
+  {"reply": [0, 42]}
+
+The deep thought had an alias. But the answer is sent from its session
+ID. The `0` in the reply means ``success''.
+
+Another example might be asking for some data at a bureau and getting
+an error:
+
+  s12345 -> Burreau
+  {"command": ["provide-information", {
+      "about": "me",
+      "topic": "taxes"
+  }]}
+  s23456 -> s12345
+  {"reply": [1, "You need to fill in other form"]}
+
+And, in this example, the sender is trying to reach an non-existent
+session. The `msgq` here is not the alias `Msgq`, but a special
+``phantom'' session ID that is not listed anywhere.
+
+  s12345 -> s0
+  {"command": ["ping"]}
+  msgq -> s12345
+  {"reply": [-1, "No such recipient"]}
+
+Last, an example when the other user disconnects while processing the
+command.
+
+  s12345 -> s23456
+  {"command": ["shutdown"]}
+  msgq -> s12345
+  {"notification": ["disconnected", {
+    "lname": "s23456"
+  }]}
+
+The system does not support sending a command to multiple users
+directly. It can be accomplished as this:
+
+* The sending user calls a command on the system to get list of
+  sessions in given group. This is command to alias, so it can be done
+  by the previous way.
+* After receiving the list of session IDs, multiple copies of the
+  command are sent by the sending user, one to each of the session
+  IDs.
+* Successes and failures are handled the same as above, since these
+  are just single-recipient commands.
+
+So, this would be an example with unhelpful war council.
+
+  s12345 -> Msgq
+  {"command": ["get-subscriptions", {
+      "group": "WarCouncil"
+  }]}
+  msgq -> s12345
+  {"reply": [0, ["s1", "s2", "s3"]]}
+  s12345 -> s1
+  {"command": ["advice", {
+      "topic": "Should we attack?"
+  }]}
+  s12345 -> s2
+  {"command": ["advice", {
+      "topic": "Should we attack?"
+  }]}
+  s12345 -> s3
+  {"command": ["advice", {
+      "topic": "Should we attack?"
+  }]}
+  s1 -> s12345
+  {"reply": [0, true]}
+  s2 -> s12345
+  {"reply": [0, false]}
+  s3 -> s12345
+  {"reply": [1, "Advice feature not implemented"]}
+
+Users
+-----
+
+While there's a lot of flexibility for the behaviour of a user, it
+usually comes to something like this (during the lifetime of the
+user):
+
+* The user starts up.
+* Then it creates one or more sessions (there may be technical reasons
+  to have more than one session, such as threads, but it is not
+  required by the system).
+* It subscribes to some groups to receive notifications in future.
+* It binds to some aliases if it wants to be reachable by others by a
+  nice name.
+* It invokes some start-up commands (to get the configuration, for
+  example).
+* During the lifetime, it listens for notifications and answers
+  commands. It also invokes remote commands and sends notifications
+  about things that are happening.
+* Eventually, the user terminates, closing all the sessions it had
+  opened.
+
+Known limitations
+-----------------
+
+It is meant mostly as signalling protocol. Sending millions of
+messages or messages of several tens of megabytes is probably a bad
+idea. While there's no architectural limitation with regards of the
+number of transferred messages and the maximum size of message is 4GB,
+the code is not optimised and it would probably be very slow.
+
+We currently expect the system not to be at heavy load. Therefore, we
+expect the system to keep up with users sending messages. The
+libraries write in blocking mode, which is no problem if the
+expectation is true, as the write buffers will generally be empty and
+the write wouldn't block, but if it turns out it is not the case, we
+might need to reconsider.

+ 1 - 0
doc/devel/mainpage.dox

@@ -30,6 +30,7 @@
  *   - @subpage dhcpv6ConfigInherit
  * - @subpage libdhcp
  *   - @subpage libdhcpIntro
+ *   - @subpage libdhcpRelay
  *   - @subpage libdhcpIfaceMgr
  * - @subpage libdhcpsrv
  *   - @subpage leasemgr

+ 86 - 26
doc/guide/bind10-guide.xml

@@ -772,6 +772,16 @@ as a dependency earlier -->
             </listitem>
           </varlistentry>
 
+          <varlistentry>
+            <term>--without-werror</term>
+            <listitem>
+              <simpara>Disable the default use of the
+		<option>-Werror</option> compiler flag so that
+		compiler warnings aren't build failures.
+              </simpara>
+            </listitem>
+          </varlistentry>
+
           </variablelist>
           <note>
             <para>
@@ -2487,8 +2497,8 @@ can use various data source backends.
       <para>
         The configuration is located in data_sources/classes. Each item there
         represents one RR class and a list used to answer queries for that
-        class. The default contains two classes. The CH class contains a static
-        data source &mdash; one that serves things like
+        class. The default contains two classes. The CH class contains a
+        built-in data source &mdash; one that serves things like
         <quote>AUTHORS.BIND.</quote>. The IN class contains single SQLite3
         data source with database file located at
         <filename>/usr/local/var/bind10/zone.sqlite3</filename>.
@@ -2555,7 +2565,7 @@ can use various data source backends.
         </para>
 
         <para>
-          First, let's disable the static data source
+          First, let's disable the built-in data source
           (<quote>VERSION.BIND</quote> and friends). As it is the only
           data source in the CH class, we can remove the whole class.
 
@@ -4100,7 +4110,7 @@ Dhcp4/subnet4	[]	list	(default)
 &gt; <userinput>config commit</userinput>
 </screen>
     Even though the "container" option does not carry any data except
-    sub-options, the "data" field must be explictly set to an empty value.
+    sub-options, the "data" field must be explicitly set to an empty value.
     This is required because in the current version of BIND 10 DHCP, the
     default configuration values are not propagated to the configuration parsers:
     if the "data" is not set the parser will assume that this
@@ -4812,7 +4822,7 @@ should include options from the isc option space:
 &gt; <userinput>config commit</userinput>
 </screen>
     Even though the "container" option does not carry any data except
-    sub-options, the "data" field must be explictly set to an empty value.
+    sub-options, the "data" field must be explicitly set to an empty value.
     This is required because in the current version of BIND 10 DHCP, the
     default configuration values are not propagated to the configuration parsers:
     if the "data" is not set the parser will assume that this
@@ -4832,29 +4842,22 @@ should include options from the isc option space:
       <section id="dhcp6-config-subnets">
         <title>Subnet Selection</title>
           <para>
-          The DHCPv6 server may receive requests from local (connected
-          to the same subnet as the server) and remote (connecting via
-          relays) clients.
-          <note>
-          <para>
-          Currently relayed DHCPv6 traffic is not supported.  The server will
-          only respond to local DHCPv6 requests - see <xref linkend="dhcp6-limit"/>
-          </para>
-          </note>
-          As it may have many subnet configurations defined, it
-          must select appropriate subnet for a given request. To do this, the server first
+          The DHCPv6 server may receive requests from local (connected to the
+          same subnet as the server) and remote (connecting via relays) clients.
+          As server may have many subnet configurations defined, it must select
+          appropriate subnet for a given request. To do this, the server first
           checks if there is only one subnet defined and source of the packet is
-          link-local. If this is the case, the server assumes that the only subnet
-          defined is local and client is indeed connected to it. This check
-          simplifies small deployments.
+          link-local. If this is the case, the server assumes that the only
+          subnet defined is local and client is indeed connected to it. This
+          check simplifies small deployments.
           </para>
           <para>
           If there are two or more subnets defined, the server can not assume
           which of those (if any) subnets are local. Therefore an optional
-          "interface" parameter is available within a subnet definition to designate that a given subnet
-          is local, i.e. reachable directly over specified interface. For example
-          the server that is intended to serve a local subnet over eth0 may be configured
-          as follows:
+          "interface" parameter is available within a subnet definition to
+          designate that a given subnet is local, i.e. reachable directly over
+          specified interface. For example the server that is intended to serve
+          a local subnet over eth0 may be configured as follows:
 <screen>
 &gt; <userinput>config add Dhcp6/subnet6</userinput>
 &gt; <userinput>config set Dhcp6/subnet6[1]/subnet "2001:db8:beef::/48"</userinput>
@@ -4865,6 +4868,66 @@ should include options from the isc option space:
         </para>
       </section>
 
+      <section id="dhcp6-relays">
+        <title>DHCPv6 Relays</title>
+        <para>
+          A DHCPv6 server with multiple subnets defined must select the
+          appropriate subnet when it receives a request from client.  For clients
+          connected via relays, two mechanisms are used:
+        </para>
+        <para>
+          The first uses the linkaddr field in the RELAY_FORW message. The name
+          of this field is somewhat misleading in that it does not contain a link-layer
+          address: instead, it holds an address (typically a global address) that is
+          used to identify a link. The DHCPv6 server checks if the address belongs
+          to a defined subnet and, if it does, that subnet is selected for the client's
+          request.
+        </para>
+        <para>
+          The second mechanism is based on interface-id options. While forwarding a client's
+          message, relays may insert an interface-id option into the message that
+          identifies the interface on the relay that received the message. (Some
+          relays allow configuration of that parameter, but it is sometimes
+          hardcoded and may range from the very simple (e.g. "vlan100") to the very cryptic:
+          one example seen on real hardware was "ISAM144|299|ipv6|nt:vp:1:110"). The
+          server can use this information to select the appropriate subnet.
+          The information is also returned to the relay which then knows the
+          interface to use to transmit the response to the client. In order for
+          this to work successfully, the relay interface IDs must be unique within
+          the network and the server configuration must match those values.
+        </para>
+        <para>
+          When configuring the DHCPv6 server, it should be noted that two
+          similarly-named parameters can be configured for a subnet:
+          <itemizedlist>
+            <listitem><simpara>
+              "interface" defines which local network interface can be used
+              to access a given subnet.
+            </simpara></listitem>
+            <listitem><simpara>
+              "interface-id" specifies the content of the interface-id option
+              used by relays to identify the interface on the relay to which
+              the response packet is sent.
+            </simpara></listitem>
+          </itemizedlist>
+          The two are mutually exclusive: a subnet cannot be both reachable locally
+          (direct traffic) and via relays (remote traffic). Specifying both is a
+          configuration error and the DHCPv6 server will refuse such a configuration.
+        </para>
+
+        <para>
+          To specify interface-id with value "vlan123", the following commands can
+          be used:
+          <screen>
+&gt; <userinput>config add Dhcp6/subnet6</userinput>
+&gt; <userinput>config set Dhcp6/subnet6[0]/subnet "2001:db8:beef::/48"</userinput>
+&gt; <userinput>config set Dhcp6/subnet6[0]/pool [ "2001:db8:beef::/48" ]</userinput>
+&gt; <userinput>config set Dhcp6/subnet6[0]/interface-id "vland123"</userinput>
+&gt; <userinput>config commit</userinput>
+</screen>
+        </para>
+      </section>
+
    </section>
 
     <section id="dhcp6-serverid">
@@ -4931,9 +4994,6 @@ Dhcp6/renew-timer	1000	integer	(default)
           </para>
         </listitem>
         <listitem>
-          <simpara>Relayed traffic is not supported.</simpara>
-        </listitem>
-        <listitem>
           <simpara>Temporary addresses are not supported.</simpara>
         </listitem>
         <listitem>

+ 1 - 1
examples/host/host.cc

@@ -95,7 +95,7 @@ host_lookup(const char* const name, const char* const dns_class,
         cout << "Name: " << server << "\n";
         // TODO: I guess I have to do a lookup to get that address and aliases
         // too
-        //cout << "Address: " << address << "\n" ; // "#" << port << "\n";
+        //cout << "Address: " << address << "\n" ;
         //cout << "Aliases: " << server << "\n";
     }
 

+ 2 - 0
m4macros/Makefile.am

@@ -0,0 +1,2 @@
+EXTRA_DIST  = ax_boost_for_bind10.m4
+EXTRA_DIST += ax_sqlite3_for_bind10.m4

+ 25 - 0
m4macros/ax_sqlite3_for_bind10.m4

@@ -0,0 +1,25 @@
+dnl @synopsis AX_SQLITE3_FOR_BIND10
+dnl
+dnl Test for the sqlite3 library and program, intended to be used within
+dnl BIND 10, and to test BIND 10.
+dnl
+dnl We use pkg-config to look for the sqlite3 library, so the sqlite3
+dnl development package with the .pc file must be installed.
+dnl
+dnl This macro sets SQLITE_CFLAGS and SQLITE_LIBS. It also sets
+dnl SQLITE3_PROGRAM to the path of the sqlite3 program, if it is found
+dnl in PATH.
+
+AC_DEFUN([AX_SQLITE3_FOR_BIND10], [
+
+PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9,
+    have_sqlite="yes",
+    have_sqlite="no (sqlite3 not detected)")
+
+# Check for sqlite3 program
+AC_PATH_PROG(SQLITE3_PROGRAM, sqlite3, no)
+AM_CONDITIONAL(HAVE_SQLITE3_PROGRAM, test "x$SQLITE3_PROGRAM" != "xno")
+
+# TODO: check for _sqlite3.py module
+
+])dnl AX_SQLITE3_FOR_BIND10

+ 33 - 5
src/bin/auth/auth_messages.mes

@@ -266,9 +266,16 @@ bug ticket for this issue.
 This is a debug message issued when the authoritative server has received
 a command on the command channel.
 
-% AUTH_RECEIVED_NOTIFY received incoming NOTIFY for zone name %1, zone class %2
+% AUTH_RECEIVED_NOTIFY received incoming NOTIFY for zone %1/%2 from %3
 This is a debug message reporting that an incoming NOTIFY was received.
 
+% AUTH_RECEIVED_NOTIFY_NOTAUTH received bad NOTIFY for zone %1/%2 from %3
+The authoritative server received a NOTIFY message, but the specified zone
+doesn't match any of the zones served by the server.  The server doesn't
+process the message further, and returns a response with the Rcode being
+NOTAUTH.  Note: RFC 1996 does not specify the server behavior in this case;
+responding with Rcode of NOTAUTH follows BIND 9's behavior.
+
 % AUTH_RESPONSE_FAILURE exception while building response to query: %1
 This is a debug message, generated by the authoritative server when an
 attempt to create a response to a received DNS packet has failed. The
@@ -367,12 +374,33 @@ XFRIN (Transfer-in) process.  It is issued during server startup is an
 indication that the initialization is proceeding normally.
 
 % AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
-This is a debug message output during the processing of a NOTIFY request.
+This is an internal error during the processing of a NOTIFY request.
 An error (listed in the message) has been encountered whilst communicating
 with the zone manager. The NOTIFY request will not be honored.
+This may be some temporary failure, but is generally an unexpected
+event and is quite likely a bug.  It's probably worth filing a report.
 
 % AUTH_ZONEMGR_ERROR received error response from zone manager: %1
-This is a debug message output during the processing of a NOTIFY
-request. The zone manager component has been informed of the request,
+The zone manager component has been informed of the request,
 but has returned an error response (which is included in the message). The
-NOTIFY request will not be honored.
+NOTIFY request will not be honored.  As of this writing, this can only
+happen due to a bug inside the Zonemgr implementation.  Zonemgr itself
+may log more detailed cause of this, and these are probably worth
+filing a bug report.
+
+% AUTH_ZONEMGR_NOTEXIST received NOTIFY but Zonemgr does not exist
+This is a debug message produced by the authoritative server when it
+receives a NOTIFY message but the Zonemgr component is not running at
+that time.  Not running Zonemgr is completely valid for, e.g., primary
+only servers, so this is not necessarily a problem.  If this message
+is logged even if Zonemgr is supposed to be running, it's encouraged
+to check other logs to identify why that happens.  It may or may not
+be a real problem (for example, if it's immediately after the system
+startup, it's possible that Auth has started up and is running but
+Zonemgr is not yet).  Even if this is indeed an unexpected case,
+Zonemgr should normally be restarted by the Init process, so unless
+this repeats too often it may be negligible in practice (still it's
+worth filing a bug report).  In any case, the authoritative server
+simply drops the NOTIFY message; if it's a temporary failure or
+delayed startup, subsequently resent messages will eventually reach
+Zonemgr.

+ 42 - 16
src/bin/auth/auth_srv.cc

@@ -22,6 +22,7 @@
 #include <config/ccsession.h>
 
 #include <cc/data.h>
+#include <cc/proto_defs.h>
 
 #include <exceptions/exceptions.h>
 
@@ -41,7 +42,7 @@
 
 #include <asiodns/dns_service.h>
 
-#include <datasrc/data_source.h>
+#include <datasrc/exceptions.h>
 #include <datasrc/client_list.h>
 
 #include <xfr/xfrout_client.h>
@@ -747,6 +748,8 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, Message& message,
                            std::auto_ptr<TSIGContext> tsig_context,
                            MessageAttributes& stats_attrs)
 {
+    const IOEndpoint& remote_ep = io_message.getRemoteEndpoint(); // for logs
+
     // The incoming notify must contain exactly one question for SOA of the
     // zone name.
     if (message.getRRCount(Message::SECTION_QUESTION) != 1) {
@@ -769,23 +772,34 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, Message& message,
     // on, but we don't check these conditions.  This behavior is compatible
     // with BIND 9.
 
-    // TODO check with the conf-mgr whether current server is the auth of the
-    // zone
-
-    // In the code that follows, we simply ignore the notify if any internal
-    // error happens rather than returning (e.g.) SERVFAIL.  RFC 1996 is
-    // silent about such cases, but there doesn't seem to be anything we can
-    // improve at the primary server side by sending an error anyway.
-    if (xfrin_session_ == NULL) {
-        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
-        return (false);
+    // See if we have the specified zone in our data sources; if not return
+    // NOTAUTH, following BIND 9 (this is not specified in RFC 1996).
+    bool is_auth = false;
+    {
+        auth::DataSrcClientsMgr::Holder datasrc_holder(datasrc_clients_mgr_);
+        const shared_ptr<datasrc::ClientList> dsrc_clients =
+            datasrc_holder.findClientList(question->getClass());
+        is_auth = dsrc_clients &&
+            dsrc_clients->find(question->getName(), true, false).exact_match_;
+    }
+    if (!is_auth) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RECEIVED_NOTIFY_NOTAUTH)
+            .arg(question->getName()).arg(question->getClass()).arg(remote_ep);
+        makeErrorMessage(renderer_, message, buffer, Rcode::NOTAUTH(),
+                         stats_attrs, tsig_context);
+        return (true);
     }
 
     LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RECEIVED_NOTIFY)
-      .arg(question->getName()).arg(question->getClass());
+        .arg(question->getName()).arg(question->getClass()).arg(remote_ep);
 
-    const string remote_ip_address =
-        io_message.getRemoteEndpoint().getAddress().toText();
+    // xfrin_session_ should have been set and never be replaced except in
+    // tests; otherwise it's an internal bug.  assert() may be too strong,
+    // but processMessage() will catch all exceptions, so there's no better
+    // way.
+    assert(xfrin_session_);
+
+    const string remote_ip_address = remote_ep.getAddress().toText();
     static const string command_template_start =
         "{\"command\": [\"notify\", {\"zone_name\" : \"";
     static const string command_template_master = "\", \"master\" : \"";
@@ -800,12 +814,24 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, Message& message,
                 command_template_end);
         const unsigned int seq =
             xfrin_session_->group_sendmsg(notify_command, "Zonemgr",
-                                          "*", "*");
+                                          CC_INSTANCE_WILDCARD,
+                                          CC_INSTANCE_WILDCARD, true);
         ConstElementPtr env, answer, parsed_answer;
         xfrin_session_->group_recvmsg(env, answer, false, seq);
         int rcode;
         parsed_answer = parseAnswer(rcode, answer);
-        if (rcode != 0) {
+        if (rcode == CC_REPLY_NO_RECPT) {
+            // This can happen when Zonemgr is not running.  When we support
+            // notification-based membership framework, we should check if it's
+            // supposed to be running and shouldn't even send the command if
+            // not.  Until then, we log this event at the debug level as we
+            // don't know whether it's a real trouble or intentional
+            // configuration.  (Also, when it's done, maybe we should simply
+            // propagate the exception and return SERVFAIL to suppress further
+            // NOTIFY).
+            LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_ZONEMGR_NOTEXIST);
+            return (false);
+        } else if (rcode != CC_REPLY_SUCCESS) {
             LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
                       .arg(parsed_answer->str());
             return (false);

+ 10 - 1
src/bin/auth/datasrc_clients_mgr.h

@@ -25,7 +25,7 @@
 
 #include <cc/data.h>
 
-#include <datasrc/data_source.h>
+#include <datasrc/exceptions.h>
 #include <datasrc/client_list.h>
 #include <datasrc/memory/zone_writer.h>
 
@@ -639,12 +639,21 @@ DataSrcClientsBuilderBase<MutexType, CondVarType>::getZoneWriter(
                   AUTH_DATASRC_CLIENTS_BUILDER_LOAD_ZONE_NOCACHE)
             .arg(origin).arg(rrclass);
         break;                  // return NULL below
+    case datasrc::ConfigurableClientList::CACHE_NOT_WRITABLE:
+        // This is an internal error. Auth server should skip reloading zones
+        // on non writable caches.
+        isc_throw(InternalCommandError, "failed to load zone " << origin
+                  << "/" << rrclass << ": internal failure, in-memory cache "
+                  "is not writable");
     case datasrc::ConfigurableClientList::CACHE_DISABLED:
         // This is an internal error. Auth server must have the cache
         // enabled.
         isc_throw(InternalCommandError, "failed to load zone " << origin
                   << "/" << rrclass << ": internal failure, in-memory cache "
                   "is somehow disabled");
+    default:                    // other cases can really never happen
+        isc_throw(Unexpected, "Impossible result in getting data source "
+                  "ZoneWriter: " << writerpair.first);
     }
 
     return (boost::shared_ptr<datasrc::memory::ZoneWriter>());

+ 2 - 2
src/bin/auth/query.h

@@ -329,8 +329,8 @@ public:
 
     /// \short Bad zone data encountered.
     ///
-    /// This is thrown when process encounteres misconfigured zone in a way
-    /// it can't continue. This throws, not sets the Rcode, because such
+    /// This is thrown when a process encounters a misconfigured zone in a
+    /// way it can't continue. This throws, not sets the Rcode, because such
     /// misconfigured zone should not be present in the data source and
     /// should have been rejected sooner.
     struct BadZone : public isc::Exception {

+ 143 - 124
src/bin/auth/tests/auth_srv_unittest.cc

@@ -26,6 +26,8 @@
 #include <dns/rdataclass.h>
 #include <dns/tsig.h>
 
+#include <cc/proto_defs.h>
+
 #include <server_common/portconfig.h>
 #include <server_common/keyring.h>
 
@@ -76,7 +78,6 @@ using namespace isc::asiolink;
 using namespace isc::testutils;
 using namespace isc::server_common::portconfig;
 using namespace isc::auth::unittest;
-using isc::datasrc::memory::ZoneTableSegment;
 using isc::UnitTestUtil;
 using boost::scoped_ptr;
 using isc::auth::statistics::Counters;
@@ -244,6 +245,60 @@ createBuiltinVersionResponse(const qid_t qid, vector<uint8_t>& data) {
                 renderer.getLength());
 }
 
+void
+installDataSrcClientLists(AuthSrv& server, ClientListMapPtr lists) {
+    // For now, we use explicit swap than reconfigure() because the latter
+    // involves a separate thread and cannot guarantee the new config is
+    // available for the subsequent test.
+    server.getDataSrcClientsMgr().setDataSrcClientLists(lists);
+}
+
+void
+updateDatabase(AuthSrv& server, const char* params) {
+    const ConstElementPtr config(Element::fromJSON("{"
+        "\"IN\": [{"
+        "    \"type\": \"sqlite3\","
+        "    \"params\": " + string(params) +
+        "}]}"));
+    installDataSrcClientLists(server, configureDataSource(config));
+}
+
+void
+updateInMemory(AuthSrv& server, const char* origin, const char* filename,
+               bool with_static = true)
+{
+    string spec_txt = "{"
+        "\"IN\": [{"
+        "   \"type\": \"MasterFiles\","
+        "   \"params\": {"
+        "       \"" + string(origin) + "\": \"" + string(filename) + "\""
+        "   },"
+        "   \"cache-enable\": true"
+        "}]";
+    if (with_static) {
+        spec_txt += ", \"CH\": [{"
+        "   \"type\": \"MasterFiles\","
+        "   \"cache-enable\": true,"
+        "   \"params\": {\"BIND\": \"" + string(STATIC_DSRC_FILE) + "\"}"
+            "}]";
+    }
+    spec_txt += "}";
+
+    const ConstElementPtr config(Element::fromJSON(spec_txt));
+    installDataSrcClientLists(server, configureDataSource(config));
+}
+
+void
+updateBuiltin(AuthSrv& server) {
+    const ConstElementPtr config(Element::fromJSON("{"
+        "\"CH\": [{"
+        "   \"type\": \"MasterFiles\","
+        "   \"cache-enable\": true,"
+        "   \"params\": {\"BIND\": \"" + string(STATIC_DSRC_FILE) + "\"}"
+        "}]}"));
+    installDataSrcClientLists(server, configureDataSource(config));
+}
+
 // We did not configure any client lists. Therefore it should be REFUSED
 TEST_F(AuthSrvTest, noClientList) {
     UnitTestUtil::createRequestMessage(request_message, Opcode::QUERY(),
@@ -647,8 +702,10 @@ TEST_F(AuthSrvTest, IXFRDisconnectFail) {
 }
 
 TEST_F(AuthSrvTest, notify) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -664,7 +721,7 @@ TEST_F(AuthSrvTest, notify) {
                   stringValue());
     ConstElementPtr notify_args =
         notify_session.getSentMessage()->get("command")->get(1);
-    EXPECT_EQ("example.com.", notify_args->get("zone_name")->stringValue());
+    EXPECT_EQ("example.", notify_args->get("zone_name")->stringValue());
     EXPECT_EQ(DEFAULT_REMOTE_ADDRESS,
               notify_args->get("master")->stringValue());
     EXPECT_EQ("IN", notify_args->get("zone_class")->stringValue());
@@ -675,7 +732,7 @@ TEST_F(AuthSrvTest, notify) {
 
     // The question must be identical to that of the received notify
     ConstQuestionPtr question = *parse_message->beginQuestion();
-    EXPECT_EQ(Name("example.com"), question->getName());
+    EXPECT_EQ(Name("example"), question->getName());
     EXPECT_EQ(RRClass::IN(), question->getClass());
     EXPECT_EQ(RRType::SOA(), question->getType());
 
@@ -691,9 +748,12 @@ TEST_F(AuthSrvTest, notify) {
 }
 
 TEST_F(AuthSrvTest, notifyForCHClass) {
-    // Same as the previous test, but for the CH RRClass.
+    // Same as the previous test, but for the CH RRClass (so we install the
+    // builtin (static) data source.
+    updateBuiltin(server);
+
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("bind"),
                                        RRClass::CH(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -773,9 +833,11 @@ TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
 }
 
 TEST_F(AuthSrvTest, notifyWithoutAA) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     // implicitly leave the AA bit off.  our implementation will accept it.
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, *parse_message, *response_obuffer,
@@ -786,8 +848,10 @@ TEST_F(AuthSrvTest, notifyWithoutAA) {
 }
 
 TEST_F(AuthSrvTest, notifyWithErrorRcode) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     request_message.setRcode(Rcode::SERVFAIL());
@@ -799,11 +863,15 @@ TEST_F(AuthSrvTest, notifyWithErrorRcode) {
                 Opcode::NOTIFY().getCode(), QR_FLAG | AA_FLAG, 1, 0, 0, 0);
 }
 
-TEST_F(AuthSrvTest, notifyWithoutSession) {
-    server.setXfrinSession(NULL);
+TEST_F(AuthSrvTest, notifyWithoutRecipient) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
+    // Emulate the case where msgq tells auth there's no Zonemgr module.
+    notify_session.setMessage(isc::config::createAnswer(CC_REPLY_NO_RECPT,
+                                                        "no recipient"));
 
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -812,14 +880,19 @@ TEST_F(AuthSrvTest, notifyWithoutSession) {
     // happens.
     server.processMessage(*io_message, *parse_message, *response_obuffer,
                           &dnsserv);
+    // want_answer should have been set to true so auth can catch it if zonemgr
+    // is not running.
+    EXPECT_TRUE(notify_session.wasAnswerWanted());
     EXPECT_FALSE(dnsserv.hasAnswer());
 }
 
 TEST_F(AuthSrvTest, notifySendFail) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     notify_session.disableSend();
 
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -830,10 +903,12 @@ TEST_F(AuthSrvTest, notifySendFail) {
 }
 
 TEST_F(AuthSrvTest, notifyReceiveFail) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     notify_session.disableReceive();
 
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -843,10 +918,12 @@ TEST_F(AuthSrvTest, notifyReceiveFail) {
 }
 
 TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     notify_session.setMessage(Element::fromJSON("{\"foo\": 1}"));
 
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -856,11 +933,13 @@ TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
 }
 
 TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
     notify_session.setMessage(
         Element::fromJSON("{\"result\": [1, \"FAIL\"]}"));
 
     UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
-                                       default_qid, Name("example.com"),
+                                       default_qid, Name("example"),
                                        RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -869,58 +948,55 @@ TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
     EXPECT_FALSE(dnsserv.hasAnswer());
 }
 
-void
-installDataSrcClientLists(AuthSrv& server, ClientListMapPtr lists) {
-    // For now, we use explicit swap than reconfigure() because the latter
-    // involves a separate thread and cannot guarantee the new config is
-    // available for the subsequent test.
-    server.getDataSrcClientsMgr().setDataSrcClientLists(lists);
-}
+TEST_F(AuthSrvTest, notifyNotAuth) {
+    // If the server doesn't have authority of the specified zone in NOTIFY,
+    // it will return NOTAUTH
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
 
-void
-updateDatabase(AuthSrv& server, const char* params) {
-    const ConstElementPtr config(Element::fromJSON("{"
-        "\"IN\": [{"
-        "    \"type\": \"sqlite3\","
-        "    \"params\": " + string(params) +
-        "}]}"));
-    installDataSrcClientLists(server, configureDataSource(config));
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
+    request_message.setHeaderFlag(Message::HEADERFLAG_AA);
+    createRequestPacket(request_message, IPPROTO_UDP);
+    server.processMessage(*io_message, *parse_message, *response_obuffer,
+                          &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    headerCheck(*parse_message, default_qid, Rcode::NOTAUTH(),
+                Opcode::NOTIFY().getCode(), QR_FLAG /* no AA */, 1, 0, 0, 0);
 }
 
-void
-updateInMemory(AuthSrv& server, const char* origin, const char* filename) {
-    const ConstElementPtr config(Element::fromJSON("{"
-        "\"IN\": [{"
-        "   \"type\": \"MasterFiles\","
-        "   \"params\": {"
-        "       \"" + string(origin) + "\": \"" + string(filename) + "\""
-        "   },"
-        "   \"cache-enable\": true"
-        "}],"
-        "\"CH\": [{"
-        "   \"type\": \"static\","
-        "   \"params\": \"" + string(STATIC_DSRC_FILE) + "\""
-        "}]}"));
-    installDataSrcClientLists(server, configureDataSource(config));
+TEST_F(AuthSrvTest, notifyNotAuthSubDomain) {
+    // Similar to the previous case, but checking partial match doesn't confuse
+    // the processing.
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("child.example"),
+                                       RRClass::IN(), RRType::SOA());
+    createRequestPacket(request_message, IPPROTO_UDP);
+    server.processMessage(*io_message, *parse_message, *response_obuffer,
+                          &dnsserv);
+    headerCheck(*parse_message, default_qid, Rcode::NOTAUTH(),
+                Opcode::NOTIFY().getCode(), QR_FLAG, 1, 0, 0, 0);
 }
 
-void
-updateBuiltin(AuthSrv& server) {
-    const ConstElementPtr config(Element::fromJSON("{"
-        "\"CH\": [{"
-        "   \"type\": \"static\","
-        "   \"params\": \"" + string(STATIC_DSRC_FILE) + "\""
-        "}]}"));
-    installDataSrcClientLists(server, configureDataSource(config));
+TEST_F(AuthSrvTest, notifyNotAuthNoClass) {
+    // Likewise, and there's not even a data source in the specified class.
+    updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE, false);
+
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example"),
+                                       RRClass::CH(), RRType::SOA());
+    createRequestPacket(request_message, IPPROTO_UDP);
+    server.processMessage(*io_message, *parse_message, *response_obuffer,
+                          &dnsserv);
+    headerCheck(*parse_message, default_qid, Rcode::NOTAUTH(),
+                Opcode::NOTIFY().getCode(), QR_FLAG, 1, 0, 0, 0);
 }
 
 // Try giving the server a TSIG signed request and see it can anwer signed as
 // well
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_TSIGSigned) { // Needs builtin
-#else
 TEST_F(AuthSrvTest, TSIGSigned) {
-#endif
     // Prepare key, the client message, etc
     updateBuiltin(server);
     const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
@@ -978,11 +1054,7 @@ TEST_F(AuthSrvTest, TSIGSigned) {
 // authoritative only server in terms of performance, and it's quite likely
 // we need to drop it for the authoritative server implementation.
 // At that point we can drop this test, too.
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_builtInQueryViaDNSServer) {
-#else
 TEST_F(AuthSrvTest, builtInQueryViaDNSServer) {
-#endif
     updateBuiltin(server);
     UnitTestUtil::createRequestMessage(request_message, Opcode::QUERY(),
                                        default_qid, Name("VERSION.BIND."),
@@ -1010,11 +1082,7 @@ TEST_F(AuthSrvTest, builtInQueryViaDNSServer) {
 
 // The most primitive check: checking the result of the processMessage()
 // method
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_builtInQuery) {
-#else
 TEST_F(AuthSrvTest, builtInQuery) {
-#endif
     updateBuiltin(server);
     UnitTestUtil::createRequestMessage(request_message, Opcode::QUERY(),
                                        default_qid, Name("VERSION.BIND."),
@@ -1031,11 +1099,7 @@ TEST_F(AuthSrvTest, builtInQuery) {
 }
 
 // Same type of test as builtInQueryViaDNSServer but for an error response.
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_iqueryViaDNSServer) { // Needs builtin
-#else
-TEST_F(AuthSrvTest, iqueryViaDNSServer) { // Needs builtin
-#endif
+TEST_F(AuthSrvTest, iqueryViaDNSServer) {
     updateBuiltin(server);
     createDataFromFile("iquery_fromWire.wire");
     (*server.getDNSLookupProvider())(*io_message, parse_message,
@@ -1146,11 +1210,7 @@ TEST_F(AuthSrvTest, updateWithInMemoryClient) {
                 opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
 }
 
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_queryWithInMemoryClientNoDNSSEC) {
-#else
 TEST_F(AuthSrvTest, queryWithInMemoryClientNoDNSSEC) {
-#endif
     // In this example, we do simple check that query is handled from the
     // query handler class, and confirm it returns no error and a non empty
     // answer section.  Detailed examination on the response content
@@ -1166,11 +1226,7 @@ TEST_F(AuthSrvTest, queryWithInMemoryClientNoDNSSEC) {
                 opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 2, 1);
 }
 
-#ifdef USE_STATIC_LINK
-TEST_F(AuthSrvTest, DISABLED_queryWithInMemoryClientDNSSEC) {
-#else
 TEST_F(AuthSrvTest, queryWithInMemoryClientDNSSEC) {
-#endif
     // Similar to the previous test, but the query has the DO bit on.
     // The response should contain RRSIGs, and should have more RRs than
     // the previous case.
@@ -1185,14 +1241,7 @@ TEST_F(AuthSrvTest, queryWithInMemoryClientDNSSEC) {
                 opcode.getCode(), QR_FLAG | AA_FLAG, 1, 2, 3, 3);
 }
 
-TEST_F(AuthSrvTest,
-#ifdef USE_STATIC_LINK
-       DISABLED_chQueryWithInMemoryClient
-#else
-       chQueryWithInMemoryClient
-#endif
-    )
-{
+TEST_F(AuthSrvTest, chQueryWithInMemoryClient) {
     // Set up the in-memory
     updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE);
 
@@ -1665,9 +1714,7 @@ public:
              real_list, ThrowWhen throw_when, bool isc_exception,
              ConstRRsetPtr fake_rrset = ConstRRsetPtr()) :
         ConfigurableClientList(RRClass::IN()),
-        real_(real_list),
-        config_(Element::fromJSON("{}")),
-        ztable_segment_(ZoneTableSegment::create(*config_, RRClass::IN()))
+        real_(real_list)
     {
         BOOST_FOREACH(const DataSourceInfo& info, real_->getDataSources()) {
              const isc::datasrc::DataSourceClientPtr
@@ -1679,13 +1726,13 @@ public:
              data_sources_.push_back(
                  DataSourceInfo(client.get(),
                                 isc::datasrc::DataSourceClientContainerPtr(),
-                                false, RRClass::IN(), ztable_segment_));
+                                boost::shared_ptr<
+                                isc::datasrc::internal::CacheConfig>(),
+                                RRClass::IN(), ""));
         }
     }
 private:
     const boost::shared_ptr<isc::datasrc::ConfigurableClientList> real_;
-    const ConstElementPtr config_;
-    boost::shared_ptr<ZoneTableSegment> ztable_segment_;
     vector<isc::datasrc::DataSourceClientPtr> clients_;
 };
 
@@ -1695,14 +1742,7 @@ private:
 //
 // Set the proxies to never throw, this should have the same result as
 // queryWithInMemoryClientNoDNSSEC, and serves to test the two proxy classes
-TEST_F(AuthSrvTest,
-#ifdef USE_STATIC_LINK
-       DISABLED_queryWithInMemoryClientProxy
-#else
-       queryWithInMemoryClientProxy
-#endif
-    )
-{
+TEST_F(AuthSrvTest, queryWithInMemoryClientProxy) {
     // Set real inmem client to proxy
     updateInMemory(server, "example.", CONFIG_INMEMORY_EXAMPLE);
     boost::shared_ptr<isc::datasrc::ConfigurableClientList> list;
@@ -1749,14 +1789,7 @@ setupThrow(AuthSrv& server, ThrowWhen throw_when, bool isc_exception,
     mgr.setDataSrcClientLists(lists);
 }
 
-TEST_F(AuthSrvTest,
-#ifdef USE_STATIC_LINK
-       DISABLED_queryWithThrowingProxyServfails
-#else
-       queryWithThrowingProxyServfails
-#endif
-    )
-{
+TEST_F(AuthSrvTest, queryWithThrowingProxyServfails) {
     // Test the common cases, all of which should simply return SERVFAIL
     // Use THROW_NEVER as end marker
     ThrowWhen throws[] = { THROW_AT_FIND_ZONE,
@@ -1780,14 +1813,7 @@ TEST_F(AuthSrvTest,
 
 // Throw isc::Exception in getClass(). (Currently?) getClass is not called
 // in the processMessage path, so this should result in a normal answer
-TEST_F(AuthSrvTest,
-#ifdef USE_STATIC_LINK
-       DISABLED_queryWithInMemoryClientProxyGetClass
-#else
-       queryWithInMemoryClientProxyGetClass
-#endif
-    )
-{
+TEST_F(AuthSrvTest, queryWithInMemoryClientProxyGetClass) {
     createDataFromFile("nsec3query_nodnssec_fromWire.wire");
     setupThrow(server, THROW_AT_GET_CLASS, true);
 
@@ -1800,14 +1826,7 @@ TEST_F(AuthSrvTest,
                 opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 2, 1);
 }
 
-TEST_F(AuthSrvTest,
-#ifdef USE_STATIC_LINK
-       DISABLED_queryWithThrowingInToWire
-#else
-       queryWithThrowingInToWire
-#endif
-    )
-{
+TEST_F(AuthSrvTest, queryWithThrowingInToWire) {
     // Set up a faked data source.  It will return an empty RRset for the
     // query.
     ConstRRsetPtr empty_rrset(new RRset(Name("foo.example"),

+ 1 - 1
src/bin/auth/tests/config_unittest.cc

@@ -21,7 +21,7 @@
 
 #include <cc/data.h>
 
-#include <datasrc/data_source.h>
+#include <datasrc/exceptions.h>
 
 #include <xfr/xfrout_client.h>
 

+ 47 - 8
src/bin/auth/tests/datasrc_clients_builder_unittest.cc

@@ -406,6 +406,22 @@ TEST_F(DataSrcClientsBuilderTest,
     EXPECT_EQ(orig_lock_count + 1, map_mutex.lock_count);
     EXPECT_EQ(orig_unlock_count + 1, map_mutex.unlock_count);
 
+    // zone doesn't exist in the data source
+    const ConstElementPtr config_nozone(Element::fromJSON("{"
+        "\"IN\": [{"
+        "    \"type\": \"sqlite3\","
+        "    \"params\": {\"database_file\": \"" + test_db + "\"},"
+        "    \"cache-enable\": true,"
+        "    \"cache-zones\": [\"nosuchzone.example\"]"
+        "}]}"));
+    clients_map = configureDataSource(config_nozone);
+    EXPECT_THROW(
+        builder.handleCommand(
+            Command(LOADZONE, Element::fromJSON(
+                        "{\"class\": \"IN\","
+                        " \"origin\": \"nosuchzone.example\"}"))),
+        TestDataSrcClientsBuilder::InternalCommandError);
+
     // basically impossible case: in-memory cache is completely disabled.
     // In this implementation of manager-builder, this should never happen,
     // but it catches it like other configuration error and keeps going.
@@ -503,14 +519,6 @@ TEST_F(DataSrcClientsBuilderTest, loadZoneInvalidParams) {
             }, "");
     }
 
-    // zone doesn't exist in the data source
-    EXPECT_THROW(
-        builder.handleCommand(
-            Command(LOADZONE,
-                    Element::fromJSON(
-                        "{\"class\": \"IN\", \"origin\": \"xx\"}"))),
-        TestDataSrcClientsBuilder::InternalCommandError);
-
     // origin is bogus
     EXPECT_THROW(builder.handleCommand(
                      Command(LOADZONE,
@@ -524,4 +532,35 @@ TEST_F(DataSrcClientsBuilderTest, loadZoneInvalidParams) {
                  isc::data::TypeError);
 }
 
+// This works only if mapped memory segment is compiled.
+// Note also that this test case may fail as we make b10-auth more aware
+// of shared-memory cache.
+TEST_F(DataSrcClientsBuilderTest,
+#ifdef USE_SHARED_MEMORY
+       loadInNonWritableCache
+#else
+       DISABLED_loadInNonWritableCache
+#endif
+    )
+{
+    const ConstElementPtr config = Element::fromJSON(
+        "{"
+        "\"IN\": [{"
+        "   \"type\": \"MasterFiles\","
+        "   \"params\": {"
+        "       \"test1.example\": \"" +
+        std::string(TEST_DATA_BUILDDIR "/test1.zone.copied") + "\"},"
+        "   \"cache-enable\": true,"
+        "   \"cache-type\": \"mapped\""
+        "}]}");
+    clients_map = configureDataSource(config);
+
+    EXPECT_THROW(builder.handleCommand(
+                     Command(LOADZONE,
+                             Element::fromJSON(
+                                 "{\"origin\": \"test1.example\","
+                                 " \"class\": \"IN\"}"))),
+                 TestDataSrcClientsBuilder::InternalCommandError);
+}
+
 } // unnamed namespace

+ 4 - 3
src/bin/bind10/init.py.in

@@ -38,6 +38,7 @@ __main__.
 
 import sys; sys.path.append ('@@PYTHONPATH@@')
 import os
+from isc.util.address_formatter import AddressFormatter
 
 # If B10_FROM_SOURCE is set in the environment, we use data files
 # from a directory relative to that, otherwise we use the ones
@@ -151,7 +152,7 @@ class ProcessInfo:
         """Function used before running a program that needs to run as a
         different user."""
         # First, put us into a separate process group so we don't get
-        # SIGINT signals on Ctrl-C (b10-init will shut everthing down by
+        # SIGINT signals on Ctrl-C (b10-init will shut everything down by
         # other means).
         os.setpgrp()
 
@@ -222,7 +223,7 @@ class Init:
         self.component_config = {}
         # Some time in future, it may happen that a single component has
         # multple processes (like a pipeline-like component). If so happens,
-        # name "components" may be inapropriate. But as the code isn't probably
+        # name "components" may be inappropriate. But as the code isn't probably
         # completely ready for it, we leave it at components for now. We also
         # want to support multiple instances of a single component. If it turns
         # out that we'll have a single component with multiple same processes
@@ -428,7 +429,7 @@ class Init:
                         port)
         else:
             logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
-                        self.curproc, address, port)
+                        self.curproc, AddressFormatter((address, port)))
 
     def log_started(self, pid = None):
         """

+ 2 - 2
src/bin/bind10/init_messages.mes

@@ -285,9 +285,9 @@ The b10-init module is starting the given process.
 The b10-init module is starting the given process, which will listen on the
 given port number.
 
-% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2)
 The b10-init module is starting the given process, which will listen on the
-given address and port number (written as <address>#<port>).
+given address and port number (written as <address>:<port>).
 
 % BIND10_STARTUP_COMPLETE BIND 10 started
 All modules have been successfully started, and BIND 10 is now running.

File diff suppressed because it is too large
+ 1 - 1
src/bin/bind10/run_bind10.sh.in


File diff suppressed because it is too large
+ 1 - 1
src/bin/bind10/tests/Makefile.am


+ 11 - 1
src/bin/bind10/tests/init_test.py.in

@@ -1595,7 +1595,7 @@ class TestInitComponents(unittest.TestCase):
         init.components[53] = component
 
         # case where the returned pid is unknown to us. nothing should
-        # happpen then.
+        # happen then.
         init.get_process_exit_status_called = False
         init._get_process_exit_status = init._get_process_exit_status_unknown_pid
         init.components_to_restart = []
@@ -2339,6 +2339,16 @@ class SocketSrvTest(unittest.TestCase):
         self.assertEqual({}, self.__b10_init._unix_sockets)
         self.assertTrue(sock.closed)
 
+    def test_log_starting(self):
+        """
+        Checks the log_starting call doesn't raise any errors
+        (does not check actual log output)
+        """
+        self.__b10_init.log_starting("foo")
+        self.__b10_init.log_starting("foo", 1)
+        self.__b10_init.log_starting("foo", 1, "192.0.2.1")
+
+
 class TestFunctions(unittest.TestCase):
     def setUp(self):
         self.lockfile_testpath = \

+ 31 - 34
src/bin/bindctl/bindcmd.py

@@ -33,6 +33,7 @@ import inspect
 import pprint
 import ssl, socket
 import os, time, random, re
+import os.path
 import getpass
 from hashlib import sha1
 import csv
@@ -147,9 +148,9 @@ class BindCmdInterpreter(Cmd):
         # is processed by a script that expects a specific format.
         if my_readline == sys.stdin.readline and sys.stdin.isatty():
             sys.stdout.write("""\
-WARNING: Python readline module isn't available, so the command line editor
-         (including command history management) does not work.  See BIND 10
-         guide for more details.\n\n""")
+WARNING: The Python readline module isn't available, so some command line
+         editing features (including command history management) will not
+         work.  See the BIND 10 guide for more details.\n\n""")
 
         try:
             if not self.login_to_cmdctl():
@@ -214,22 +215,16 @@ WARNING: Python readline module isn't available, so the command line editor
 
         return True
 
-    def __print_check_ssl_msg(self):
-        self._print("Please check the logs of b10-cmdctl, there may "
-                    "be a problem accepting SSL connections, such "
-                    "as a permission problem on the server "
-                    "certificate file.")
-
     def _try_login(self, username, password):
         '''
-        Attempts to log in to cmdctl by sending a POST with
-        the given username and password.
-        On success of the POST (mind, not the login, only the network
-        operation), returns a tuple (response, data).
-        On failure, raises a FailToLogin exception, and prints some
-        information on the failure.
-        This call is essentially 'private', but made 'protected' for
-        easier testing.
+        Attempts to log into cmdctl by sending a POST with the given
+        username and password. On success of the POST (not the login,
+        but the network operation), it returns a tuple (response, data).
+        We check for some failures such as SSL errors and socket errors
+        which could happen due to the environment in which BIND 10 runs.
+        On failure, it raises a FailToLogin exception and prints some
+        information on the failure.  This call is essentially 'private',
+        but made 'protected' for easier testing.
         '''
         param = {'username': username, 'password' : password}
         try:
@@ -237,16 +232,8 @@ WARNING: Python readline module isn't available, so the command line editor
             data = response.read().decode()
             # return here (will raise error after try block)
             return (response, data)
-        except ssl.SSLError as err:
-            self._print("SSL error while sending login information: ", err)
-            if err.errno == ssl.SSL_ERROR_EOF:
-                self.__print_check_ssl_msg()
-        except socket.error as err:
-            self._print("Socket error while sending login information: ", err)
-            # An SSL setup error can also bubble up as a plain CONNRESET...
-            # (on some systems it usually does)
-            if err.errno == errno.ECONNRESET:
-                self.__print_check_ssl_msg()
+        except (ssl.SSLError, socket.error) as err:
+            self._print('Error while sending login information:', err)
             pass
         raise FailToLogin()
 
@@ -270,9 +257,21 @@ WARNING: Python readline module isn't available, so the command line editor
 
         # No valid logins were found, prompt the user for a username/password
         count = 0
-        self._print('No stored password file found, please see sections '
-              '"Configuration specification for b10-cmdctl" and "bindctl '
-              'command-line options" of the BIND 10 guide.')
+        if not os.path.exists(self.csv_file_dir + CSV_FILE_NAME):
+            self._print('\nNo stored password file found.\n\n'
+                        'When the system is first set up you need to create '
+                        'at least one user account.\n'
+                        'For information on how to set up a BIND 10 system, '
+                        'please check see the\n'
+                        'BIND 10 Guide: \n\n'
+                        'http://bind10.isc.org/docs/bind10-guide.html#quick-start-auth-dns\n\n'
+
+                        'If a user account has been set up, please check the '
+                        'b10-cmdctl log for other\n'
+                        'information.\n')
+        else:
+            self._print('Login failed: either the user name or password is '
+                        'invalid.\n')
         while True:
             count = count + 1
             if count > 3:
@@ -317,14 +316,14 @@ WARNING: Python readline module isn't available, so the command line editor
             return {}
 
 
-    def send_POST(self, url, post_param = None):
+    def send_POST(self, url, post_param=None):
         '''Send POST request to cmdctl, session id is send with the name
         'cookie' in header.
         Format: /module_name/command_name
         parameters of command is encoded as a map
         '''
         param = None
-        if (len(post_param) != 0):
+        if post_param is not None and len(post_param) != 0:
             param = json.dumps(post_param)
 
         headers = {"cookie" : self.session_id}
@@ -938,5 +937,3 @@ WARNING: Python readline module isn't available, so the command line editor
         if data != "" and data != "{}":
             self._print(json.dumps(json.loads(data), sort_keys=True,
                                    indent=4))
-
-

+ 1 - 1
src/bin/bindctl/run_bindctl.sh.in

@@ -27,7 +27,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/threads/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 

File diff suppressed because it is too large
+ 1 - 1
src/bin/bindctl/tests/Makefile.am


+ 44 - 16
src/bin/bindctl/tests/bindctl_test.py

@@ -26,6 +26,7 @@ import http.client
 import pwd
 import getpass
 import re
+import json
 from optparse import OptionParser
 from isc.config.config_data import ConfigData, MultiConfigData
 from isc.config.module_spec import ModuleSpec
@@ -386,7 +387,7 @@ class TestConfigCommands(unittest.TestCase):
             self.tool.send_POST = send_POST_raiseImmediately
             self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
             expected_printed_messages.append(
-                'Socket error while sending login information:  test error')
+                'Error while sending login information: test error')
             self.__check_printed_messages(expected_printed_messages)
 
             def create_send_POST_raiseOnRead(exception):
@@ -405,7 +406,7 @@ class TestConfigCommands(unittest.TestCase):
                 create_send_POST_raiseOnRead(socket.error("read error"))
             self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
             expected_printed_messages.append(
-                'Socket error while sending login information:  read error')
+                'Error while sending login information: read error')
             self.__check_printed_messages(expected_printed_messages)
 
             # connection reset
@@ -415,13 +416,7 @@ class TestConfigCommands(unittest.TestCase):
                 create_send_POST_raiseOnRead(exc)
             self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
             expected_printed_messages.append(
-                'Socket error while sending login information:  '
-                'connection reset')
-            expected_printed_messages.append(
-                'Please check the logs of b10-cmdctl, there may be a '
-                'problem accepting SSL connections, such as a permission '
-                'problem on the server certificate file.'
-            )
+                'Error while sending login information: connection reset')
             self.__check_printed_messages(expected_printed_messages)
 
             # 'normal' SSL error
@@ -430,7 +425,7 @@ class TestConfigCommands(unittest.TestCase):
                 create_send_POST_raiseOnRead(exc)
             self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
             expected_printed_messages.append(
-                'SSL error while sending login information:  .*')
+                'Error while sending login information: .*')
             self.__check_printed_messages(expected_printed_messages)
 
             # 'EOF' SSL error
@@ -440,12 +435,7 @@ class TestConfigCommands(unittest.TestCase):
                 create_send_POST_raiseOnRead(exc)
             self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
             expected_printed_messages.append(
-                'SSL error while sending login information: .*')
-            expected_printed_messages.append(
-                'Please check the logs of b10-cmdctl, there may be a '
-                'problem accepting SSL connections, such as a permission '
-                'problem on the server certificate file.'
-            )
+                'Error while sending login information: .*')
             self.__check_printed_messages(expected_printed_messages)
 
             # any other exception should be passed through
@@ -457,6 +447,44 @@ class TestConfigCommands(unittest.TestCase):
         finally:
             self.tool.send_POST = orig_send_POST
 
+    def test_try_login_calls_cmdctl(self):
+        # Make sure _try_login() makes the right API call to cmdctl.
+        orig_conn = self.tool.conn
+        try:
+            class MyConn:
+                def __init__(self):
+                    self.method = None
+                    self.url = None
+                    self.param = None
+                    self.headers = None
+
+                def request(self, method, url, param, headers):
+                    self.method = method
+                    self.url = url
+                    self.param = param
+                    self.headers = headers
+
+                def getresponse(self):
+                    class MyResponse:
+                        def __init__(self):
+                            self.status = http.client.OK
+                        def read(self):
+                            class MyData:
+                                def decode(self):
+                                    return json.dumps(True)
+                            return MyData()
+                    return MyResponse()
+
+            self.tool.conn = MyConn()
+            self.assertTrue(self.tool._try_login('user32', 'pass64'))
+            self.assertEqual(self.tool.conn.method, 'POST')
+            self.assertEqual(self.tool.conn.url, '/login')
+            self.assertEqual(json.loads(self.tool.conn.param),
+                             {"password": "pass64", "username": "user32"})
+            self.assertIn('cookie', self.tool.conn.headers)
+        finally:
+            self.tool.conn = orig_conn
+
     def test_run(self):
         def login_to_cmdctl():
             return True

+ 14 - 3
src/bin/cfgmgr/plugins/datasrc.spec.pre.in

@@ -18,9 +18,9 @@
                     ],
                     "CH": [
                         {
-                            "type": "static",
-                            "cache-enable": false,
-                            "params": "@@STATIC_ZONE_FILE@@"
+                            "type": "MasterFiles",
+                            "cache-enable": true,
+                            "params": {"BIND": "@@STATIC_ZONE_FILE@@"}
                         }
                     ]
                 },
@@ -63,6 +63,17 @@
                                     "item_optional": false,
                                     "item_default": ""
                                 }
+                            },
+                            {
+                                "item_name": "name",
+                                "item_type": "string",
+                                "item_optional": true
+                            },
+                            {
+                                "item_name": "cache-type",
+                                "item_type": "string",
+                                "item_optional": true,
+                                "item_default": "local"
                             }
                         ]
                     }

File diff suppressed because it is too large
+ 4 - 1
src/bin/cfgmgr/plugins/tests/Makefile.am


+ 96 - 1
src/bin/cfgmgr/plugins/tests/datasrc_test.py

@@ -142,7 +142,7 @@ class DatasrcTest(unittest.TestCase):
 
     def test_no_such_file_mem(self):
         """
-        We also check the existance of master files. Not the actual content,
+        We also check the existence of master files. Not the actual content,
         though.
         """
         self.reject({"IN": [{
@@ -153,6 +153,101 @@ class DatasrcTest(unittest.TestCase):
             }
         }]})
 
+    def test_names_present(self):
+        """
+        Test we don't choke on configuration with the "name" being present on
+        some items.
+        """
+        self.accept({"IN": [{
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {},
+            "name": "Whatever"
+        }]})
+
+    def test_names_default_classes(self):
+        """
+        Test we can have a client of the same type in different classes
+        without specified name. The defaults should be derived both from
+        the type and the class.
+        """
+        self.accept({
+        "IN": [{
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {}
+        }],
+        "CH": [{
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {}
+        }]})
+
+    def test_names_collision(self):
+        """
+        Reject when two names are the same.
+
+        Cases are:
+        - Explicit names.
+        - Two default names turn out to be the same (same type and class).
+        - One explicit is set to the same as the default one.
+        """
+        self.reject({"IN": [
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {},
+            "name": "Whatever"
+        },
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {},
+            "name": "Whatever"
+        }]})
+        # The same, but across different classes is allowed (we would
+        # identify the data source by class+name tuple)
+        self.accept({
+        "IN": [
+            {
+                "type": "MasterFiles",
+                "cache-enable": True,
+                "params": {},
+                "name": "Whatever"
+            }
+        ],
+        "CH": [
+            {
+                "type": "MasterFiles",
+                "cache-enable": True,
+                "params": {},
+                "name": "Whatever"
+            }
+        ]})
+        self.reject({"IN": [
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {}
+        },
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {}
+        }]})
+        self.reject({"IN": [
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {},
+            "name": "MasterFiles"
+        },
+        {
+            "type": "MasterFiles",
+            "cache-enable": True,
+            "params": {}
+        }]})
+
 if __name__ == '__main__':
     isc.log.init("bind10")
     isc.log.resetUnitTestRootLogger()

+ 1 - 1
src/bin/cfgmgr/plugins/tests/tsig_keys_test.py

@@ -92,7 +92,7 @@ class TSigKeysTest(unittest.TestCase):
     def test_bad_format(self):
         """
         Test we fail on bad format. We don't really care much how here, though,
-        as this should not get in trough config manager anyway.
+        as this should not get in through config manager anyway.
         """
         self.assertNotEqual(None, tsig_keys.check({'bad_name': {}}))
         self.assertNotEqual(None, tsig_keys.check({'keys': 'not_list'}))

File diff suppressed because it is too large
+ 1 - 1
src/bin/cfgmgr/tests/Makefile.am


+ 1 - 1
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in

@@ -70,7 +70,7 @@ class TestPlugins(unittest.TestCase):
 class TestConfigManagerStartup(unittest.TestCase):
     def test_cfgmgr(self):
         # some creative module use;
-        # b10-cfgmgr has a hypen, so we use __import__
+        # b10-cfgmgr has a hyphen, so we use __import__
         # this also gives us the chance to override the imported
         # module ConfigManager in it.
         b = __import__("b10-cfgmgr")

+ 2 - 4
src/bin/cmdctl/Makefile.am

@@ -11,20 +11,18 @@ pylogmessagedir = $(pyexecdir)/isc/log_messages/
 
 b10_cmdctldir = $(pkgdatadir)
 
-USERSFILES = cmdctl-accounts.csv
 CERTFILES = cmdctl-keyfile.pem cmdctl-certfile.pem
 
 b10_cmdctl_DATA = cmdctl.spec
 
-EXTRA_DIST = $(USERSFILES)
-
 CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
 CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
 CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
 
 man_MANS = b10-cmdctl.8 b10-certgen.1
 DISTCLEANFILES = $(man_MANS) cmdctl-certfile.pem cmdctl-keyfile.pem
-EXTRA_DIST += $(man_MANS) b10-certgen.xml b10-cmdctl.xml cmdctl_messages.mes
+EXTRA_DIST  = $(man_MANS) b10-certgen.xml b10-cmdctl.xml cmdctl_messages.mes
+EXTRA_DIST += cmdctl-accounts.csv
 
 if GENERATE_DOCS
 

+ 72 - 27
src/bin/cmdctl/cmdctl.py.in

@@ -42,6 +42,7 @@ import random
 import time
 import signal
 from isc.config import ccsession
+import isc.cc.proto_defs
 import isc.util.process
 import isc.net.parse
 from optparse import OptionParser, OptionValueError
@@ -97,6 +98,11 @@ def check_file(file_name):
 class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
     '''https connection request handler.
     Currently only GET and POST are supported.  '''
+    def __init__(self, request, client_address, server):
+        http.server.BaseHTTPRequestHandler.__init__(self, request,
+                                                    client_address, server)
+        self.session_id = None
+
     def do_GET(self):
         '''The client should send its session id in header with
         the name 'cookie'
@@ -121,7 +127,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
         return self.server.get_reply_data_for_GET(id, module)
 
     def _is_session_valid(self):
-        return self.session_id
+        return self.session_id is not None
 
     def _is_user_logged_in(self):
         login_time = self.server.user_sessions.get(self.session_id)
@@ -171,7 +177,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
         is_user_valid, error_info = self._check_user_name_and_pwd()
         if is_user_valid:
             self.server.save_user_session_id(self.session_id)
-            return http.client.OK, ["login success "]
+            return http.client.OK, ["login success"]
         else:
             return http.client.UNAUTHORIZED, error_info
 
@@ -241,6 +247,7 @@ class CommandControl():
         CommandControl to communicate with other modules. '''
         self._verbose = verbose
         self._httpserver = httpserver
+        self.__msg_handler_thread = None # set in _start_msg_handle_thread
         self._lock = threading.Lock()
         self._setup_session()
         self.modules_spec = self._get_modules_specification()
@@ -320,7 +327,26 @@ class CommandControl():
                     self._cmdctl_config_data[key] = new_config[key]
         return answer
 
+    def _get_current_thread(self):
+        """A simple wrapper of returning the 'current' thread object.
+
+        This is extracted as a 'protected' method so tests can override for
+        their convenience.
+
+        """
+        return threading.currentThread()
+
     def command_handler(self, command, args):
+        """Handle commands from other modules.
+
+        This method must not be called by any other threads than
+        __msg_handler_thread invoked at the intialization of the class;
+        otherwise it would cause critical race or dead locks.
+
+        """
+        # Check the restriction described above.
+        assert self._get_current_thread() == self.__msg_handler_thread
+
         answer = ccsession.create_answer(0)
         if command == ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE:
             # The 'value' of a specification update can be either
@@ -356,6 +382,7 @@ class CommandControl():
         ''' Start one thread to handle received message from msgq.'''
         td = threading.Thread(target=self._handle_msg_from_msgq)
         td.daemon = True
+        self.__msg_handler_thread = td
         td.start()
 
     def _handle_msg_from_msgq(self):
@@ -396,7 +423,7 @@ class CommandControl():
         rcode, reply = self.send_command('ConfigManager', ccsession.COMMAND_GET_MODULE_SPEC)
         return self._parse_command_result(rcode, reply)
 
-    def send_command_with_check(self, module_name, command_name, params = None):
+    def send_command_with_check(self, module_name, command_name, params=None):
         '''Before send the command to modules, check if module_name, command_name
         parameters are legal according the spec file of the module.
         Return rcode, dict. TODO, the rcode should be defined properly.
@@ -418,31 +445,34 @@ class CommandControl():
 
         return self.send_command(module_name, command_name, params)
 
-    def send_command(self, module_name, command_name, params = None):
-        '''Send the command from bindctl to proper module. '''
+    def send_command(self, module_name, command_name, params=None):
+        """Send the command from bindctl to proper module.
+
+        Note that commands sent to Cmdctl itself are also delivered via the
+        CC session.  Since this method is called from a thread handling a
+        particular HTTP session, it cannot directly call command_handler().
+
+        """
         errstr = 'unknown error'
         answer = None
         logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_SEND_COMMAND,
                      command_name, module_name)
 
-        if module_name == self._module_name:
-            # Process the command sent to cmdctl directly.
-            answer = self.command_handler(command_name, params)
-        else:
-            # FIXME: Due to the fact that we use a separate session
-            # from the module one (due to threads and blocking), and
-            # because the plain cc session does not have the high-level
-            # rpc-call method, we use the low-level way and create the
-            # command ourselves.
-            msg = ccsession.create_command(command_name, params)
-            seq = self._cc.group_sendmsg(msg, module_name, want_answer=True)
-            logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_COMMAND_SENT,
-                         command_name, module_name)
-            #TODO, it may be blocked, msqg need to add a new interface waiting in timeout.
-            try:
-                answer, env = self._cc.group_recvmsg(False, seq)
-            except isc.cc.session.SessionTimeout:
-                errstr = "Module '%s' not responding" % module_name
+        # FIXME: Due to the fact that we use a separate session
+        # from the module one (due to threads and blocking), and
+        # because the plain cc session does not have the high-level
+        # rpc-call method, we use the low-level way and create the
+        # command ourselves.
+        msg = ccsession.create_command(command_name, params)
+        seq = self._cc.group_sendmsg(msg, module_name, want_answer=True)
+        logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_COMMAND_SENT, command_name,
+                     module_name)
+        # TODO, it may be blocked, msqg need to add a new interface waiting
+        # in timeout.
+        try:
+            answer, env = self._cc.group_recvmsg(False, seq)
+        except isc.cc.session.SessionTimeout:
+            errstr = "Module '%s' not responding" % module_name
 
         if answer:
             try:
@@ -454,7 +484,8 @@ class CommandControl():
                     else:
                         return rcode, {}
                 else:
-                    errstr = str(answer['result'][1])
+                    errstr = \
+                        str(answer[isc.cc.proto_defs.CC_PAYLOAD_RESULT][1])
             except ccsession.ModuleCCSessionError as mcse:
                 errstr = str("Error in ccsession answer:") + str(mcse)
 
@@ -502,12 +533,25 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
         self._verbose = verbose
         self._lock = threading.Lock()
         self._user_infos = {}
-        self._accounts_file = None
+        self.__accounts_file = None
+        self.__accounts_file_mtime = 0
 
     def _create_user_info(self, accounts_file):
         '''Read all user's name and its' salt, hashed password
         from accounts file.'''
-        if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
+
+        # If the file does not exist, set accounts to empty, and return
+        if not os.path.exists(accounts_file):
+            self._user_infos = {}
+            self.__accounts_file = None
+            self.__accounts_file_mtime = 0
+            return
+
+        # If the filename hasn't changed, and the file itself
+        # has neither, do nothing
+        accounts_file_mtime = os.stat(accounts_file).st_mtime
+        if self.__accounts_file == accounts_file and\
+           accounts_file_mtime <= self.__accounts_file_mtime:
             return
 
         with self._lock:
@@ -525,7 +569,8 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
                 if csvfile:
                     csvfile.close()
 
-        self._accounts_file = accounts_file
+        self.__accounts_file = accounts_file
+        self.__accounts_file_mtime = accounts_file_mtime
         if len(self._user_infos) == 0:
             logger.error(CMDCTL_NO_USER_ENTRIES_READ)
 

+ 1 - 1
src/bin/cmdctl/run_b10-cmdctl.sh.in

@@ -26,7 +26,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-        @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+        @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/threads/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
         export @ENV_LIBRARY_PATH@
 fi
 

File diff suppressed because it is too large
+ 2 - 2
src/bin/cmdctl/tests/Makefile.am


+ 1 - 4
src/bin/cmdctl/tests/b10-certgen_test.py

@@ -172,10 +172,7 @@ class TestCertGenTool(unittest.TestCase):
         """
         Tests a few pre-created certificates with the -c option
         """
-        if ('CMDCTL_SRC_PATH' in os.environ):
-            path = os.environ['CMDCTL_SRC_PATH'] + "/tests/testdata/"
-        else:
-            path = "testdata/"
+        path = os.environ['CMDCTL_SRC_PATH'] + '/tests/testdata/'
         self.validate_certificate(10, path + 'expired-certfile.pem')
         self.validate_certificate(100, path + 'mangled-certfile.pem')
         self.validate_certificate(17, path + 'noca-certfile.pem')

+ 225 - 19
src/bin/cmdctl/tests/cmdctl_test.py

@@ -17,6 +17,7 @@
 import unittest
 import socket
 import tempfile
+import time
 import stat
 import sys
 from cmdctl import *
@@ -33,7 +34,7 @@ BUILD_FILE_PATH = os.environ['CMDCTL_BUILD_PATH'] + os.sep
 # Rewrite the class for unittest.
 class MySecureHTTPRequestHandler(SecureHTTPRequestHandler):
     def __init__(self):
-        pass
+        self.session_id = None
 
     def send_response(self, rcode):
         self.rcode = rcode
@@ -41,19 +42,6 @@ class MySecureHTTPRequestHandler(SecureHTTPRequestHandler):
     def end_headers(self):
         pass
 
-    def do_GET(self):
-        self.wfile = open('tmp.file', 'wb')
-        super().do_GET()
-        self.wfile.close()
-        os.remove('tmp.file')
-
-    def do_POST(self):
-        self.wfile = open("tmp.file", 'wb')
-        super().do_POST()
-        self.wfile.close()
-        os.remove('tmp.file')
-
-
 class FakeSecureHTTPServer(SecureHTTPServer):
     def __init__(self):
         self.user_sessions = {}
@@ -84,6 +72,26 @@ class UnreadableFile:
     def __exit__(self, type, value, traceback):
         os.chmod(self.file_name, self.orig_mode)
 
+class TmpTextFile:
+    """
+    Context class for temporarily creating a text file with some
+    lines of content.
+
+    The file is automatically deleted if the context is left, so
+    make sure to not use the path of an existing file!
+    """
+    def __init__(self, path, contents):
+        self.__path = path
+        self.__contents = contents
+
+    def __enter__(self):
+        with open(self.__path, 'w') as f:
+            f.write("\n".join(self.__contents) + "\n")
+
+    def __exit__(self, type, value, traceback):
+        os.unlink(self.__path)
+
+
 class TestSecureHTTPRequestHandler(unittest.TestCase):
     def setUp(self):
         self.old_stdout = sys.stdout
@@ -93,13 +101,22 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
         self.handler.server.user_sessions = {}
         self.handler.server._user_infos = {}
         self.handler.headers = {}
-        self.handler.rfile = open("check.tmp", 'w+b')
+        self.handler.rfile = open('input.tmp', 'w+b')
+        self.handler.wfile = open('output.tmp', 'w+b')
 
     def tearDown(self):
         sys.stdout.close()
         sys.stdout = self.old_stdout
+        self.handler.wfile.close()
+        os.remove('output.tmp')
         self.handler.rfile.close()
-        os.remove('check.tmp')
+        os.remove('input.tmp')
+
+    def test_is_session_valid(self):
+        self.assertIsNone(self.handler.session_id)
+        self.assertFalse(self.handler._is_session_valid())
+        self.handler.session_id = 4234
+        self.assertTrue(self.handler._is_session_valid())
 
     def test_parse_request_path(self):
         self.handler.path = ''
@@ -160,7 +177,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
             self.handler.do_GET()
             self.assertEqual(self.handler.rcode, http.client.OK)
 
-    def test_user_logged_in(self):
+    def test_is_user_logged_in(self):
         self.handler.server.user_sessions = {}
         self.handler.session_id = 12345
         self.assertTrue(self.handler._is_user_logged_in() == False)
@@ -294,7 +311,92 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
         rcode, reply = self.handler._handle_post_request()
         self.assertEqual(http.client.BAD_REQUEST, rcode)
 
+    def test_handle_login(self):
+        orig_is_user_logged_in = self.handler._is_user_logged_in
+        orig_check_user_name_and_pwd = self.handler._check_user_name_and_pwd
+        try:
+            def create_is_user_logged_in(status):
+                '''Create a replacement _is_user_logged_in() method.'''
+                def my_is_user_logged_in():
+                    return status
+                return my_is_user_logged_in
+
+            # Check case where _is_user_logged_in() returns True
+            self.handler._is_user_logged_in = create_is_user_logged_in(True)
+            self.handler.headers['cookie'] = 12345
+            self.handler.path = '/login'
+            self.handler.do_POST()
+            self.assertEqual(self.handler.rcode, http.client.OK)
+            self.handler.wfile.seek(0, 0)
+            d = self.handler.wfile.read()
+            self.assertEqual(json.loads(d.decode()),
+                             ['user has already login'])
+
+            # Clear the output
+            self.handler.wfile.seek(0, 0)
+            self.handler.wfile.truncate()
+
+            # Check case where _is_user_logged_in() returns False
+            self.handler._is_user_logged_in = create_is_user_logged_in(False)
+
+            def create_check_user_name_and_pwd(status, error_info=None):
+                '''Create a replacement _check_user_name_and_pwd() method.'''
+                def my_check_user_name_and_pwd():
+                    return status, error_info
+                return my_check_user_name_and_pwd
+
+            # (a) Check case where _check_user_name_and_pwd() returns
+            # valid user status
+            self.handler._check_user_name_and_pwd = \
+                create_check_user_name_and_pwd(True)
+            self.handler.do_POST()
+            self.assertEqual(self.handler.rcode, http.client.OK)
+            self.handler.wfile.seek(0, 0)
+            d = self.handler.wfile.read()
+            self.assertEqual(json.loads(d.decode()), ['login success'])
+
+            # Clear the output
+            self.handler.wfile.seek(0, 0)
+            self.handler.wfile.truncate()
+
+            # (b) Check case where _check_user_name_and_pwd() returns
+            # invalid user status
+            self.handler._check_user_name_and_pwd = \
+                create_check_user_name_and_pwd(False, ['login failed'])
+            self.handler.do_POST()
+            self.assertEqual(self.handler.rcode, http.client.UNAUTHORIZED)
+            self.handler.wfile.seek(0, 0)
+            d = self.handler.wfile.read()
+            self.assertEqual(json.loads(d.decode()), ['login failed'])
+
+        finally:
+            self.handler._is_user_logged_in = orig_is_user_logged_in
+            self.handler._check_user_name_and_pwd = orig_check_user_name_and_pwd
+
+class MockSession:
+    """Act like isc.cc.Session, stealing group_sendmsg/recvmsg().
+
+    The initial simple version only records given parameters in
+    group_sendmsg() for later inspection and raise a timeout exception
+    from recvmsg().  As we see the need for more test cases these methods
+    should be extended.
+
+    """
+    def __init__(self, sent_messages):
+        self.__sent_messages = sent_messages
+
+    def group_sendmsg(self, msg, module_name, want_answer):
+        self.__sent_messages.append((msg, module_name))
+
+    def group_recvmsg(self, nonblock, seq):
+        raise isc.cc.session.SessionTimeout('dummy timeout')
+
 class MyCommandControl(CommandControl):
+    def __init__(self, httpserver, verbose):
+        super().__init__(httpserver, verbose)
+        self.sent_messages = [] # for inspection; allow tests to see it
+        self._cc = MockSession(self.sent_messages)
+
     def _get_modules_specification(self):
         return {}
 
@@ -311,6 +413,12 @@ class MyCommandControl(CommandControl):
     def _handle_msg_from_msgq(self):
         pass
 
+    def _start_msg_handle_thread(self): # just not bother to be threads
+        pass
+
+    def _get_current_thread(self):
+        return None
+
 class TestCommandControl(unittest.TestCase):
 
     def setUp(self):
@@ -423,8 +531,24 @@ class TestCommandControl(unittest.TestCase):
         os.remove(file_name)
 
     def test_send_command(self):
-        rcode, value = self.cmdctl.send_command('Cmdctl', 'print_settings', None)
-        self.assertEqual(rcode, 0)
+        # Send a command to other module.  We check an expected message
+        # is sent via the session (cmdct._cc).  Due to the behavior of
+        # our mock session object the anser will be "fail", but it's not
+        # the subject of this test, and so it's okay.
+        # TODO: more detailed cases should be tested.
+        rcode, value = self.cmdctl.send_command('Init', 'shutdown', None)
+        self.assertEqual(1, len(self.cmdctl.sent_messages))
+        self.assertEqual(({'command': ['shutdown']}, 'Init'),
+                         self.cmdctl.sent_messages[-1])
+        self.assertEqual(1, rcode)
+
+        # Send a command to cmdctl itself.  Should be the same effect.
+        rcode, value = self.cmdctl.send_command('Cmdctl', 'print_settings',
+                                                None)
+        self.assertEqual(2, len(self.cmdctl.sent_messages))
+        self.assertEqual(({'command': ['print_settings']}, 'Cmdctl'),
+                         self.cmdctl.sent_messages[-1])
+        self.assertEqual(1, rcode)
 
 class MySecureHTTPServer(SecureHTTPServer):
     def server_bind(self):
@@ -470,6 +594,88 @@ class TestSecureHTTPServer(unittest.TestCase):
         self.assertEqual(1, len(self.server._user_infos))
         self.assertTrue('root' in self.server._user_infos)
 
+    def test_get_user_info(self):
+        self.assertIsNone(self.server.get_user_info('root'))
+        self.server._create_user_info(SRC_FILE_PATH + 'cmdctl-accounts.csv')
+        self.assertIn('6f0c73bd33101a5ec0294b3ca39fec90ef4717fe',
+                      self.server.get_user_info('root'))
+
+        # When the file is not changed calling _create_user_info() again
+        # should have no effect. In order to test this, we overwrite the
+        # user-infos that were just set and make sure it isn't touched by
+        # the call (so make sure it isn't set to some empty value)
+        fake_users_val = { 'notinfile': [] }
+        self.server._user_infos = fake_users_val
+        self.server._create_user_info(SRC_FILE_PATH + 'cmdctl-accounts.csv')
+        self.assertEqual(fake_users_val, self.server._user_infos)
+
+    def test_create_user_info_changing_file_time(self):
+        self.assertEqual(0, len(self.server._user_infos))
+
+        # Create a file
+        accounts_file = BUILD_FILE_PATH + 'new_file.csv'
+        with TmpTextFile(accounts_file, ['root,foo,bar']):
+            self.server._create_user_info(accounts_file)
+            self.assertEqual(1, len(self.server._user_infos))
+            self.assertTrue('root' in self.server._user_infos)
+
+            # Make sure re-reading is a noop if file was not modified
+            fake_users_val = { 'notinfile': [] }
+            self.server._user_infos = fake_users_val
+            self.server._create_user_info(accounts_file)
+            self.assertEqual(fake_users_val, self.server._user_infos)
+
+        # create the file again, this time read should not be a noop
+        with TmpTextFile(accounts_file, ['otherroot,foo,bar']):
+            # Set mtime in future
+            stat = os.stat(accounts_file)
+            os.utime(accounts_file, (stat.st_atime, stat.st_mtime + 10))
+            self.server._create_user_info(accounts_file)
+            self.assertEqual(1, len(self.server._user_infos))
+            self.assertTrue('otherroot' in self.server._user_infos)
+
+    def test_create_user_info_changing_file_name(self):
+        """
+        Check that the accounts file is re-read if the file name is different
+        """
+        self.assertEqual(0, len(self.server._user_infos))
+
+        # Create two files
+        accounts_file1 = BUILD_FILE_PATH + 'new_file.csv'
+        accounts_file2 = BUILD_FILE_PATH + 'new_file2.csv'
+        with TmpTextFile(accounts_file2, ['otherroot,foo,bar']):
+            with TmpTextFile(accounts_file1, ['root,foo,bar']):
+                self.server._create_user_info(accounts_file1)
+                self.assertEqual(1, len(self.server._user_infos))
+                self.assertTrue('root' in self.server._user_infos)
+
+                # Make sure re-reading is a noop if file was not modified
+                fake_users_val = { 'notinfile': [] }
+                self.server._user_infos = fake_users_val
+                self.server._create_user_info(accounts_file1)
+                self.assertEqual(fake_users_val, self.server._user_infos)
+
+                # But a different file should be read
+                self.server._create_user_info(accounts_file2)
+                self.assertEqual(1, len(self.server._user_infos))
+                self.assertTrue('otherroot' in self.server._user_infos)
+
+    def test_create_user_info_nonexistent_file(self):
+        # Even if there was data initially, if set to a nonexistent
+        # file it should result in no users
+        accounts_file = BUILD_FILE_PATH + 'new_file.csv'
+        self.assertFalse(os.path.exists(accounts_file))
+        fake_users_val = { 'notinfile': [] }
+        self.server._user_infos = fake_users_val
+        self.server._create_user_info(accounts_file)
+        self.assertEqual({}, self.server._user_infos)
+
+        # Should it now be created it should be read
+        with TmpTextFile(accounts_file, ['root,foo,bar']):
+            self.server._create_user_info(accounts_file)
+            self.assertEqual(1, len(self.server._user_infos))
+            self.assertTrue('root' in self.server._user_infos)
+
     def test_check_file(self):
         # Just some file that we know exists
         file_name = BUILD_FILE_PATH + 'cmdctl-keyfile.pem'

+ 1 - 1
src/bin/dbutil/run_dbutil.sh.in

@@ -30,7 +30,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/threads/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 

+ 6 - 0
src/bin/dbutil/tests/Makefile.am

@@ -5,5 +5,11 @@ SUBDIRS = . testdata
 noinst_SCRIPTS = dbutil_test.sh
 
 check-local:
+if HAVE_SQLITE3_PROGRAM
 	B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
 	$(SHELL) $(abs_builddir)/dbutil_test.sh
+else
+	@echo ""
+	@echo " **** The sqlite3 program is required to run dbutil tests **** "
+	@echo ""
+endif

+ 21 - 21
src/bin/dbutil/tests/dbutil_test.sh.in

@@ -161,7 +161,7 @@ get_schema() {
 # @param $2 Expected backup file
 upgrade_ok_test() {
     copy_file $1 $tempfile
-    ${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+    @SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
     if [ $? -eq 0 ]
     then
         # Compare schema with the reference
@@ -199,7 +199,7 @@ upgrade_ok_test() {
 # @param $2 Expected backup file
 upgrade_fail_test() {
     copy_file $1 $tempfile
-    ${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+    @SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
     failzero $?
     check_backup $1 $backupfile
 }
@@ -222,7 +222,7 @@ record_count_test() {
     records_count=`sqlite3 $tempfile 'select count(*) from records'`
     zones_count=`sqlite3 $tempfile 'select count(*) from zones'`
 
-    ${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+    @SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
     if [ $? -ne 0 ]
     then
         # Reason for failure should already have been output
@@ -268,12 +268,12 @@ record_count_test() {
 # @param $2 Expected version string
 check_version() {
     copy_file $1 $verfile
-    ${SHELL} ../run_dbutil.sh --check $verfile
+    @SHELL@ ../run_dbutil.sh --check $verfile
     if [ $? -gt 2 ]
     then
         fail "version check failed on database $1; return code $?"
     else
-        ${SHELL} ../run_dbutil.sh --check $verfile 2>&1 | grep "$2" > /dev/null
+        @SHELL@ ../run_dbutil.sh --check $verfile 2>&1 | grep "$2" > /dev/null
         if [ $? -ne 0 ]
         then
             fail "database $1 not at expected version $2 (output: $?)"
@@ -293,7 +293,7 @@ check_version() {
 # @param $2 Backup file
 check_version_fail() {
     copy_file $1 $verfile
-    ${SHELL} ../run_dbutil.sh --check $verfile
+    @SHELL@ ../run_dbutil.sh --check $verfile
     failzero $?
     check_no_backup $tempfile $backupfile
 }
@@ -310,12 +310,12 @@ sec=0
 # Test: check that the utility fails if the database does not exist
 sec=`expr $sec + 1`
 echo $sec".1. Non-existent database - check"
-${SHELL} ../run_dbutil.sh --check $tempfile
+@SHELL@ ../run_dbutil.sh --check $tempfile
 failzero $?
 check_no_backup $tempfile $backupfile
 
 echo $sec".2. Non-existent database - upgrade"
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
 failzero $?
 check_no_backup $tempfile $backupfile
 rm -f $tempfile $backupfile
@@ -330,7 +330,7 @@ rm -f $tempfile $backupfile
 
 echo $sec".2. Database is an empty file - upgrade"
 touch $tempfile
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
 failzero $?
 # A backup is performed before anything else, so the backup should exist.
 check_backup $tempfile $backupfile
@@ -344,7 +344,7 @@ rm -f $tempfile $backupfile
 
 echo $sec".2. Database is not an SQLite file - upgrade"
 echo "This is not an sqlite3 database" > $tempfile
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile
 failzero $?
 # ...and as before, a backup should have been created
 check_backup $tempfile $backupfile
@@ -459,31 +459,31 @@ rm -f $tempfile $backupfile ${backupfile}-1 ${backupfile}-2
 sec=`expr $sec + 1`
 echo $sec".1 Command-line errors"
 copy_file $testdata/old_v1.sqlite3 $tempfile
-${SHELL} ../run_dbutil.sh $tempfile
+@SHELL@ ../run_dbutil.sh $tempfile
 failzero $?
-${SHELL} ../run_dbutil.sh --upgrade --check $tempfile
+@SHELL@ ../run_dbutil.sh --upgrade --check $tempfile
 failzero $?
-${SHELL} ../run_dbutil.sh --noconfirm --check $tempfile
+@SHELL@ ../run_dbutil.sh --noconfirm --check $tempfile
 failzero $?
-${SHELL} ../run_dbutil.sh --check
+@SHELL@ ../run_dbutil.sh --check
 failzero $?
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm
 failzero $?
-${SHELL} ../run_dbutil.sh --check $tempfile $backupfile
+@SHELL@ ../run_dbutil.sh --check $tempfile $backupfile
 failzero $?
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile $backupfile
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm $tempfile $backupfile
 failzero $?
 rm -f $tempfile $backupfile
 
 echo $sec".2 verbose flag"
 copy_file $testdata/old_v1.sqlite3 $tempfile
-${SHELL} ../run_dbutil.sh --upgrade --noconfirm --verbose $tempfile
+@SHELL@ ../run_dbutil.sh --upgrade --noconfirm --verbose $tempfile
 passzero $?
 rm -f $tempfile $backupfile
 
 echo $sec".3 Interactive prompt - yes"
 copy_file $testdata/old_v1.sqlite3 $tempfile
-${SHELL} ../run_dbutil.sh --upgrade $tempfile << .
+@SHELL@ ../run_dbutil.sh --upgrade $tempfile << .
 Yes
 .
 passzero $?
@@ -492,7 +492,7 @@ rm -f $tempfile $backupfile
 
 echo $sec".4 Interactive prompt - no"
 copy_file $testdata/old_v1.sqlite3 $tempfile
-${SHELL} ../run_dbutil.sh --upgrade $tempfile << .
+@SHELL@ ../run_dbutil.sh --upgrade $tempfile << .
 no
 .
 passzero $?
@@ -502,7 +502,7 @@ rm -f $tempfile $backupfile
 
 echo $sec".5 quiet flag"
 copy_file $testdata/old_v1.sqlite3 $tempfile
-${SHELL} ../run_dbutil.sh --check --quiet $tempfile 2>&1 | grep .
+@SHELL@ ../run_dbutil.sh --check --quiet $tempfile 2>&1 | grep .
 failzero $?
 rm -f $tempfile $backupfile
 

+ 2 - 2
src/bin/ddns/ddns.py.in

@@ -354,7 +354,7 @@ class DDNSServer:
                 zname = Name(zone_spec['name'])
                 # class has the default value in case it's unspecified.
                 # ideally this should be merged within the config module, but
-                # the current implementation doesn't esnure that, so we need to
+                # the current implementation doesn't ensure that, so we need to
                 # subsitute it ourselves.
                 if 'class' in zone_spec:
                     zclass = RRClass(zone_spec['class'])
@@ -510,7 +510,7 @@ class DDNSServer:
         '''Send DDNS response to the client.
 
         Right now, this is a straightforward subroutine of handle_request(),
-        but is intended to be extended evetually so that it can handle more
+        but is intended to be extended eventually so that it can handle more
         complicated operations for TCP (which requires asynchronous write).
         Further, when we support multiple requests over a single TCP
         connection, this method may even be shared by multiple methods.

+ 1 - 1
src/bin/ddns/ddns_messages.mes

@@ -141,7 +141,7 @@ logged.
 
 % DDNS_RESPONSE_TCP_SOCKET_SEND_FAILED failed to complete sending update response to %1 over TCP
 b10-ddns had tried to send an update response over TCP, and it hadn't
-been completed at that time, and a followup attempt to complete the
+been completed at that time, and a follow-up attempt to complete the
 send operation failed due to some network I/O error.  While a network
 error can happen any time, this event is quite unexpected for two
 reasons.  First, since the size of a response to an update request

File diff suppressed because it is too large
+ 1 - 1
src/bin/ddns/tests/Makefile.am


File diff suppressed because it is too large
+ 208 - 1552
src/bin/dhcp4/config_parser.cc


+ 11 - 13
src/bin/dhcp4/config_parser.h

@@ -12,8 +12,10 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <exceptions/exceptions.h>
 #include <cc/data.h>
+#include <exceptions/exceptions.h>
+#include <dhcpsrv/dhcp_parsers.h>
+
 #include <stdint.h>
 #include <string>
 
@@ -28,7 +30,8 @@ namespace dhcp {
 
 class Dhcpv4Srv;
 
-/// @brief Configure DHCPv4 server (@c Dhcpv4Srv) with a set of configuration values.
+/// @brief Configure DHCPv4 server (@c Dhcpv4Srv) with a set of configuration 
+/// values.
 ///
 /// This function parses configuration information stored in @c config_set
 /// and configures the @c server by applying the configuration to it.
@@ -41,9 +44,9 @@ class Dhcpv4Srv;
 /// (such as malformed configuration or invalid configuration parameter),
 /// this function returns appropriate error code.
 ///
-/// This function is called every time a new configuration is received. The extra
-/// parameter is a reference to DHCPv4 server component. It is currently not used
-/// and CfgMgr::instance() is accessed instead.
+/// This function is called every time a new configuration is received. The 
+/// extra parameter is a reference to DHCPv4 server component. It is currently
+/// not used and CfgMgr::instance() is accessed instead.
 ///
 /// This method does not throw. It catches all exceptions and returns them as
 /// reconfiguration statuses. It may return the following response codes:
@@ -58,15 +61,10 @@ isc::data::ConstElementPtr
 configureDhcp4Server(Dhcpv4Srv&,
                      isc::data::ConstElementPtr config_set);
 
-
-/// @brief Returns the global uint32_t values storage.
-///
-/// This function must be only used by unit tests that need
-/// to access uint32_t global storage to verify that the
-/// Uint32Parser works as expected.
+/// @brief Returns the global context
 ///
-/// @return a reference to a global uint32 values storage.
-const std::map<std::string, uint32_t>& getUint32Defaults();
+/// @return a reference to the global context
+ParserContextPtr& globalContext();
 
 }; // end of isc::dhcp namespace
 }; // end of isc namespace

+ 1 - 1
src/bin/dhcp4/ctrl_dhcp4_srv.h

@@ -97,7 +97,7 @@ protected:
     /// @brief A dummy configuration handler that always returns success.
     ///
     /// This configuration handler does not perform configuration
-    /// parsing and always returns success. A dummy hanlder should
+    /// parsing and always returns success. A dummy handler should
     /// be installed using \ref isc::config::ModuleCCSession ctor
     /// to get the initial configuration. This initial configuration
     /// comprises values for only those elements that were modified

+ 1 - 1
src/bin/dhcp4/dhcp4_messages.mes

@@ -74,7 +74,7 @@ many possible reasons for such a failure.
 % DHCP4_LEASE_ALLOC lease %1 has been allocated for client-id %2, hwaddr %3
 This debug message indicates that the server successfully granted a lease
 in response to client's REQUEST message. This is a normal behavior and
-incicates successful operation.
+indicates successful operation.
 
 % DHCP4_LEASE_ALLOC_FAIL failed to grant a lease for client-id %1, hwaddr %2
 This message indicates that the server failed to grant a lease to the

+ 28 - 6
src/bin/dhcp4/dhcp4_srv.cc

@@ -57,7 +57,7 @@ static const char* SERVER_ID_FILE = "b10-dhcp4-serverid";
 // These are hardcoded parameters. Currently this is a skeleton server that only
 // grants those options and a single, fixed, hardcoded lease.
 
-Dhcpv4Srv::Dhcpv4Srv(uint16_t port, const char* dbconfig) {
+Dhcpv4Srv::Dhcpv4Srv(uint16_t port, const char* dbconfig, const bool use_bcast) {
     LOG_DEBUG(dhcp4_logger, DBG_DHCP4_START, DHCP4_OPEN_SOCKET).arg(port);
     try {
         // First call to instance() will create IfaceMgr (it's a singleton)
@@ -67,7 +67,7 @@ Dhcpv4Srv::Dhcpv4Srv(uint16_t port, const char* dbconfig) {
         if (port) {
             // open sockets only if port is non-zero. Port 0 is used
             // for non-socket related testing.
-            IfaceMgr::instance().openSockets4(port);
+            IfaceMgr::instance().openSockets4(port, use_bcast);
         }
 
         string srvid_file = CfgMgr::instance().getDataDir() + "/" + string(SERVER_ID_FILE);
@@ -287,9 +287,9 @@ Dhcpv4Srv::generateServerID() {
             continue;
         }
 
-        const IfaceMgr::AddressCollection addrs = iface->getAddresses();
+        const Iface::AddressCollection addrs = iface->getAddresses();
 
-        for (IfaceMgr::AddressCollection::const_iterator addr = addrs.begin();
+        for (Iface::AddressCollection::const_iterator addr = addrs.begin();
              addr != addrs.end(); ++addr) {
             if (addr->getFamily() != AF_INET) {
                 continue;
@@ -317,7 +317,7 @@ Dhcpv4Srv::writeServerID(const std::string& file_name) {
     return (true);
 }
 
-string 
+string
 Dhcpv4Srv::srvidToString(const OptionPtr& srvid) {
     if (!srvid) {
         isc_throw(BadValue, "NULL pointer passed to srvidToString()");
@@ -343,7 +343,7 @@ Dhcpv4Srv::copyDefaultFields(const Pkt4Ptr& question, Pkt4Ptr& answer) {
     answer->setIndex(question->getIndex());
     answer->setCiaddr(question->getCiaddr());
 
-    answer->setSiaddr(IOAddress("0.0.0.0")); // explictly set this to 0
+    answer->setSiaddr(IOAddress("0.0.0.0")); // explicitly set this to 0
     answer->setHops(question->getHops());
 
     // copy MAC address
@@ -517,6 +517,28 @@ Dhcpv4Srv::assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer) {
 
         answer->setYiaddr(lease->addr_);
 
+        // If remote address is not set, we are dealing with a directly
+        // connected client requesting new lease. We can send response to
+        // the address assigned in the lease, but first we have to make sure
+        // that IfaceMgr supports responding directly to the client when
+        // client doesn't have address assigned to its interface yet.
+        if (answer->getRemoteAddr().toText() == "0.0.0.0") {
+            if (IfaceMgr::instance().isDirectResponseSupported()) {
+                answer->setRemoteAddr(lease->addr_);
+            } else {
+                // Since IfaceMgr does not support direct responses to
+                // clients not having IP addresses, we have to send response
+                // to broadcast. We don't check whether the use_bcast flag
+                // was set in the constructor, because this flag is only used
+                // by unit tests to prevent opening broadcast sockets, as
+                // it requires root privileges. If this function is invoked by
+                // unit tests, we expect that it sets broadcast address if
+                // direct response is not supported, so as a test can verify
+                // function's behavior, regardless of the use_bcast flag's value.
+                answer->setRemoteAddr(IOAddress("255.255.255.255"));
+            }
+        }
+
         // IP Address Lease time (type 51)
         opt = OptionPtr(new Option(Option::V4, DHO_DHCP_LEASE_TIME));
         opt->setUint32(lease->valid_lft_);

+ 4 - 2
src/bin/dhcp4/dhcp4_srv.h

@@ -66,8 +66,10 @@ class Dhcpv4Srv : public boost::noncopyable {
     /// @param port specifies port number to listen on
     /// @param dbconfig Lease manager configuration string.  The default
     ///        of the "memfile" manager is used for testing.
+    /// @param use_bcast configure sockets to support broadcast messages.
     Dhcpv4Srv(uint16_t port = DHCP4_SERVER_PORT,
-              const char* dbconfig = "type=memfile");
+              const char* dbconfig = "type=memfile",
+              const bool use_bcast = true);
 
     /// @brief Destructor. Used during DHCPv4 service shutdown.
     ~Dhcpv4Srv();
@@ -216,7 +218,7 @@ protected:
     /// @param msg_type specifies message type
     void appendDefaultOptions(Pkt4Ptr& msg, uint8_t msg_type);
 
-    /// @brief Returns server-intentifier option
+    /// @brief Returns server-identifier option
     ///
     /// @return server-id option
     OptionPtr getServerID() { return serverid_; }

File diff suppressed because it is too large
+ 1 - 1
src/bin/dhcp4/tests/Makefile.am


+ 12 - 10
src/bin/dhcp4/tests/config_parser_unittest.cc

@@ -25,7 +25,10 @@
 #include <dhcp/option_int.h>
 #include <dhcpsrv/subnet.h>
 #include <dhcpsrv/cfgmgr.h>
+
 #include <boost/foreach.hpp>
+#include <boost/scoped_ptr.hpp>
+
 #include <iostream>
 #include <fstream>
 #include <sstream>
@@ -47,20 +50,20 @@ public:
         // Open port 0 means to not do anything at all. We don't want to
         // deal with sockets here, just check if configuration handling
         // is sane.
-        srv_ = new Dhcpv4Srv(0);
+        srv_.reset(new Dhcpv4Srv(0));
     }
 
     // Checks if global parameter of name have expected_value
     void checkGlobalUint32(string name, uint32_t expected_value) {
-        const std::map<std::string, uint32_t>& uint32_defaults = getUint32Defaults();
-        std::map<std::string, uint32_t>::const_iterator it =
-            uint32_defaults.find(name);
-        if (it == uint32_defaults.end()) {
+        const Uint32StoragePtr uint32_defaults = 
+                                        globalContext()->uint32_values_;
+        try {
+            uint32_t actual_value = uint32_defaults->getParam(name);
+            EXPECT_EQ(expected_value, actual_value);
+        } catch (DhcpConfigError) {
             ADD_FAILURE() << "Expected uint32 with name " << name
                           << " not found";
-            return;
         }
-        EXPECT_EQ(expected_value, it->second);
     }
 
     // Checks if the result of DHCP server configuration has
@@ -74,7 +77,6 @@ public:
 
     ~Dhcp4ParserTest() {
         resetConfiguration();
-        delete srv_;
     };
 
     /// @brief Create the simple configuration with single option.
@@ -83,7 +85,7 @@ public:
     /// option value. These parameters are: "name", "code", "data",
     /// "csv-format" and "space".
     ///
-    /// @param param_value string holiding option parameter value to be
+    /// @param param_value string holding option parameter value to be
     /// injected into the configuration string.
     /// @param parameter name of the parameter to be configured with
     /// param value.
@@ -279,7 +281,7 @@ public:
         }
     }
 
-    Dhcpv4Srv* srv_;
+    boost::scoped_ptr<Dhcpv4Srv> srv_;
 
     int rcode_;
     ConstElementPtr comment_;

+ 12 - 14
src/bin/dhcp4/tests/ctrl_dhcp4_srv_unittest.cc

@@ -1,4 +1,4 @@
-// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012-2013 Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,7 @@
 #include <dhcp/dhcp4.h>
 #include <dhcp4/ctrl_dhcp4_srv.h>
 
+#include <boost/scoped_ptr.hpp>
 #include <gtest/gtest.h>
 
 #include <fstream>
@@ -36,7 +37,7 @@ using namespace isc::config;
 namespace {
 
 class NakedControlledDhcpv4Srv: public ControlledDhcpv4Srv {
-    // "naked" DHCPv4 server, exposes internal fields
+    // "Naked" DHCPv4 server, exposes internal fields
 public:
     NakedControlledDhcpv4Srv():ControlledDhcpv4Srv(DHCP4_SERVER_PORT + 10000) { }
 };
@@ -52,21 +53,21 @@ public:
 
 TEST_F(CtrlDhcpv4SrvTest, commands) {
 
-    ControlledDhcpv4Srv* srv = NULL;
-    ASSERT_NO_THROW({
-        srv = new ControlledDhcpv4Srv(DHCP4_SERVER_PORT + 10000);
-    });
+    boost::scoped_ptr<ControlledDhcpv4Srv> srv;
+    ASSERT_NO_THROW(
+        srv.reset(new ControlledDhcpv4Srv(DHCP4_SERVER_PORT + 10000))
+    );
 
-    // use empty parameters list
+    // Use empty parameters list
     ElementPtr params(new isc::data::MapElement());
     int rcode = -1;
 
-    // case 1: send bogus command
+    // Case 1: send bogus command
     ConstElementPtr result = ControlledDhcpv4Srv::execDhcpv4ServerCommand("blah", params);
     ConstElementPtr comment = parseAnswer(rcode, result);
     EXPECT_EQ(1, rcode); // expect failure (no such command as blah)
 
-    // case 2: send shutdown command without any parameters
+    // Case 2: send shutdown command without any parameters
     result = ControlledDhcpv4Srv::execDhcpv4ServerCommand("shutdown", params);
     comment = parseAnswer(rcode, result);
     EXPECT_EQ(0, rcode); // expect success
@@ -75,13 +76,10 @@ TEST_F(CtrlDhcpv4SrvTest, commands) {
     ConstElementPtr x(new isc::data::IntElement(pid));
     params->set("pid", x);
 
-    // case 3: send shutdown command with 1 parameter: pid
+    // Case 3: send shutdown command with 1 parameter: pid
     result = ControlledDhcpv4Srv::execDhcpv4ServerCommand("shutdown", params);
     comment = parseAnswer(rcode, result);
     EXPECT_EQ(0, rcode); // expect success
-
-
-    delete srv;
 }
 
-} // end of anonymous namespace
+} // End of anonymous namespace

+ 191 - 235
src/bin/dhcp4/tests/dhcp4_srv_unittest.cc

@@ -17,6 +17,7 @@
 
 #include <asiolink/io_address.h>
 #include <dhcp/dhcp4.h>
+#include <dhcp/iface_mgr.h>
 #include <dhcp/option.h>
 #include <dhcp/option4_addrlst.h>
 #include <dhcp/option_custom.h>
@@ -29,6 +30,8 @@
 #include <dhcpsrv/utils.h>
 #include <gtest/gtest.h>
 
+#include <boost/scoped_ptr.hpp>
+
 #include <fstream>
 #include <iostream>
 
@@ -44,7 +47,16 @@ namespace {
 class NakedDhcpv4Srv: public Dhcpv4Srv {
     // "Naked" DHCPv4 server, exposes internal fields
 public:
-    NakedDhcpv4Srv(uint16_t port = 0):Dhcpv4Srv(port) { }
+
+    /// @brief Constructor.
+    ///
+    /// It disables configuration of broadcast options on
+    /// sockets that are opened by the Dhcpv4Srv constructor.
+    /// Setting broadcast options requires root privileges
+    /// which is not the case when running unit tests.
+    NakedDhcpv4Srv(uint16_t port = 0)
+        : Dhcpv4Srv(port, "type=memfile", false) {
+    }
 
     using Dhcpv4Srv::processDiscover;
     using Dhcpv4Srv::processRequest;
@@ -116,10 +128,10 @@ public:
 
     /// @brief Configures options being requested in the PRL option.
     ///
-    /// The lpr-servers option is NOT configured here altough it is
+    /// The lpr-servers option is NOT configured here although it is
     /// added to the 'Parameter Request List' option in the
     /// \ref addPrlOption. When requested option is not configured
-    /// the server should not return it in its rensponse. The goal
+    /// the server should not return it in its response. The goal
     /// of not configuring the requested option is to verify that
     /// the server will not return it.
     void configureRequestedOptions() {
@@ -132,8 +144,7 @@ public:
 
         // domain-name
         OptionDefinition def("domain-name", DHO_DOMAIN_NAME, OPT_FQDN_TYPE);
-        boost::shared_ptr<OptionCustom>
-            option_domain_name(new OptionCustom(def, Option::V4));
+        OptionCustomPtr option_domain_name(new OptionCustom(def, Option::V4));
         option_domain_name->writeFqdn("example.com");
         subnet_->addOption(option_domain_name, false, "dhcp4");
 
@@ -152,8 +163,7 @@ public:
     /// @brief checks that the response matches request
     /// @param q query (client's message)
     /// @param a answer (server's message)
-    void messageCheck(const boost::shared_ptr<Pkt4>& q,
-                      const boost::shared_ptr<Pkt4>& a) {
+    void messageCheck(const Pkt4Ptr& q, const Pkt4Ptr& a) {
         ASSERT_TRUE(q);
         ASSERT_TRUE(a);
 
@@ -170,6 +180,8 @@ public:
         EXPECT_TRUE(a->getOption(DHO_DHCP_SERVER_IDENTIFIER));
         EXPECT_TRUE(a->getOption(DHO_DHCP_LEASE_TIME));
         EXPECT_TRUE(a->getOption(DHO_SUBNET_MASK));
+        EXPECT_TRUE(a->getOption(DHO_DOMAIN_NAME));
+        EXPECT_TRUE(a->getOption(DHO_DOMAIN_NAME_SERVERS));
 
         // Check that something is offered
         EXPECT_TRUE(a->getYiaddr().toText() != "0.0.0.0");
@@ -345,6 +357,120 @@ public:
         EXPECT_TRUE(expected_clientid->getData() == opt->getData());
     }
 
+    /// @brief Tests if Discover or Request message is processed correctly
+    ///
+    /// @param msg_type DHCPDISCOVER or DHCPREQUEST
+    /// @param client_addr client address
+    /// @param relay_addr relay address
+    void testDiscoverRequest(const uint8_t msg_type,
+                             const IOAddress& client_addr,
+                             const IOAddress& relay_addr) {
+
+        boost::scoped_ptr<NakedDhcpv4Srv> srv(new NakedDhcpv4Srv(0));
+        vector<uint8_t> mac(6);
+        for (int i = 0; i < 6; i++) {
+            mac[i] = i*10;
+        }
+
+        boost::shared_ptr<Pkt4> req(new Pkt4(msg_type, 1234));
+        boost::shared_ptr<Pkt4> rsp;
+
+        req->setIface("eth0");
+        req->setIndex(17);
+        req->setHWAddr(1, 6, mac);
+        req->setRemoteAddr(IOAddress(client_addr));
+        req->setGiaddr(relay_addr);
+
+        // We are going to test that certain options are returned
+        // in the response message when requested using 'Parameter
+        // Request List' option. Let's configure those options that
+        // are returned when requested.
+        configureRequestedOptions();
+
+        if (msg_type == DHCPDISCOVER) {
+            ASSERT_NO_THROW(
+                rsp = srv->processDiscover(req);
+            );
+
+            // Should return OFFER
+            ASSERT_TRUE(rsp);
+            EXPECT_EQ(DHCPOFFER, rsp->getType());
+
+        } else {
+            ASSERT_NO_THROW(
+                rsp = srv->processRequest(req);
+            );
+
+            // Should return ACK
+            ASSERT_TRUE(rsp);
+            EXPECT_EQ(DHCPACK, rsp->getType());
+
+        }
+
+        if (relay_addr.toText() != "0.0.0.0") {
+            // This is relayed message. It should be sent brsp to relay address.
+            EXPECT_EQ(req->getGiaddr().toText(),
+                      rsp->getRemoteAddr().toText());
+
+        } else if (client_addr.toText() != "0.0.0.0") {
+            // This is a message from a client having an IP address.
+            EXPECT_EQ(req->getRemoteAddr().toText(),
+                      rsp->getRemoteAddr().toText());
+
+        } else {
+            // This is a message from a client having no IP address yet.
+            // If IfaceMgr supports direct traffic the response should
+            // be sent to the new address assigned to the client.
+            if (IfaceMgr::instance().isDirectResponseSupported()) {
+                EXPECT_EQ(rsp->getYiaddr(),
+                          rsp->getRemoteAddr().toText());
+
+            // If direct response to the client having no IP address is
+            // not supported, response should go to broadcast.
+            } else {
+                EXPECT_EQ("255.255.255.255", rsp->getRemoteAddr().toText());
+
+            }
+
+        }
+
+        messageCheck(req, rsp);
+
+        // We did not request any options so these should not be present
+        // in the RSP.
+        EXPECT_FALSE(rsp->getOption(DHO_LOG_SERVERS));
+        EXPECT_FALSE(rsp->getOption(DHO_COOKIE_SERVERS));
+        EXPECT_FALSE(rsp->getOption(DHO_LPR_SERVERS));
+
+        // Repeat the test but request some options.
+        // Add 'Parameter Request List' option.
+        addPrlOption(req);
+
+        if (msg_type == DHCPDISCOVER) {
+            ASSERT_NO_THROW(
+                rsp = srv->processDiscover(req);
+            );
+
+            // Should return non-NULL packet.
+            ASSERT_TRUE(rsp);
+            EXPECT_EQ(DHCPOFFER, rsp->getType());
+
+        } else {
+            ASSERT_NO_THROW(
+                rsp = srv->processRequest(req);
+            );
+
+            // Should return non-NULL packet.
+            ASSERT_TRUE(rsp);
+            EXPECT_EQ(DHCPACK, rsp->getType());
+
+        }
+
+        // Check that the requested options are returned.
+        optionsCheck(rsp);
+
+    }
+
     ~Dhcpv4SrvTest() {
         CfgMgr::instance().deleteSubnets4();
 
@@ -367,29 +493,21 @@ public:
 TEST_F(Dhcpv4SrvTest, basic) {
 
     // Check that the base class can be instantiated
-    Dhcpv4Srv* srv = NULL;
-    ASSERT_NO_THROW({
-        srv = new Dhcpv4Srv(DHCP4_SERVER_PORT + 10000);
-    });
-    delete srv;
+    boost::scoped_ptr<Dhcpv4Srv> srv;
+    ASSERT_NO_THROW(srv.reset(new Dhcpv4Srv(DHCP4_SERVER_PORT + 10000)));
+    srv.reset();
 
     // Check that the derived class can be instantiated
-    NakedDhcpv4Srv* naked_srv = NULL;
-    ASSERT_NO_THROW({
-        naked_srv = new NakedDhcpv4Srv(DHCP4_SERVER_PORT + 10000);
-    });
+    boost::scoped_ptr<NakedDhcpv4Srv> naked_srv;
+    ASSERT_NO_THROW(
+            naked_srv.reset(new NakedDhcpv4Srv(DHCP4_SERVER_PORT + 10000)));
     EXPECT_TRUE(naked_srv->getServerID());
-    delete naked_srv;
 
-    ASSERT_NO_THROW({
-        naked_srv = new NakedDhcpv4Srv(0);
-    });
+    ASSERT_NO_THROW(naked_srv.reset(new NakedDhcpv4Srv(0)));
     EXPECT_TRUE(naked_srv->getServerID());
-
-    delete naked_srv;
 }
 
-// Verifies that received DISCOVER can be processed correctly,
+// Verifies that DISCOVER received via relay can be processed correctly,
 // that the OFFER message generated in response is valid and
 // contains necessary options.
 //
@@ -397,248 +515,86 @@ TEST_F(Dhcpv4SrvTest, basic) {
 // are other tests that verify correctness of the allocation
 // engine. See DiscoverBasic, DiscoverHint, DiscoverNoClientId
 // and DiscoverInvalidHint.
-TEST_F(Dhcpv4SrvTest, processDiscover) {
-    NakedDhcpv4Srv* srv = new NakedDhcpv4Srv(0);
-    vector<uint8_t> mac(6);
-    for (int i = 0; i < 6; i++) {
-        mac[i] = 255 - i;
-    }
-
-    boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPDISCOVER, 1234));
-    boost::shared_ptr<Pkt4> offer;
-
-    pkt->setIface("eth0");
-    pkt->setIndex(17);
-    pkt->setHWAddr(1, 6, mac);
-    pkt->setRemoteAddr(IOAddress("192.0.2.56"));
-    pkt->setGiaddr(IOAddress("192.0.2.67"));
-
-    // Let's make it a relayed message
-    pkt->setHops(3);
-    pkt->setRemotePort(DHCP4_SERVER_PORT);
-
-    // We are going to test that certain options are returned
-    // (or not returned) in the OFFER message when requested
-    // using 'Parameter Request List' option. Let's configure
-    // those options that are returned when requested.
-    configureRequestedOptions();
-
-    // Should not throw
-    EXPECT_NO_THROW(
-        offer = srv->processDiscover(pkt);
-    );
-
-    // Should return something
-    ASSERT_TRUE(offer);
-
-    EXPECT_EQ(DHCPOFFER, offer->getType());
-
-    // This is relayed message. It should be sent back to relay address.
-    EXPECT_EQ(pkt->getGiaddr(), offer->getRemoteAddr());
-
-    messageCheck(pkt, offer);
-
-    // There are some options that are always present in the
-    // message, even if not requested.
-    EXPECT_TRUE(offer->getOption(DHO_DOMAIN_NAME));
-    EXPECT_TRUE(offer->getOption(DHO_DOMAIN_NAME_SERVERS));
-
-    // We did not request any options so they should not be present
-    // in the OFFER.
-    EXPECT_FALSE(offer->getOption(DHO_LOG_SERVERS));
-    EXPECT_FALSE(offer->getOption(DHO_COOKIE_SERVERS));
-    EXPECT_FALSE(offer->getOption(DHO_LPR_SERVERS));
-
-    // Add 'Parameter Request List' option.
-    addPrlOption(pkt);
-
-    // Now repeat the test but request some options.
-    EXPECT_NO_THROW(
-        offer = srv->processDiscover(pkt);
-    );
-
-    // Should return something
-    ASSERT_TRUE(offer);
-
-    EXPECT_EQ(DHCPOFFER, offer->getType());
-
-    // This is relayed message. It should be sent back to relay address.
-    EXPECT_EQ(pkt->getGiaddr(), offer->getRemoteAddr());
-
-    messageCheck(pkt, offer);
-
-    // Check that the requested options are returned.
-    optionsCheck(offer);
-
-    // Now repeat the test for directly sent message
-    pkt->setHops(0);
-    pkt->setGiaddr(IOAddress("0.0.0.0"));
-    pkt->setRemotePort(DHCP4_CLIENT_PORT);
-
-    EXPECT_NO_THROW(
-        offer = srv->processDiscover(pkt);
-    );
-
-    // Should return something
-    ASSERT_TRUE(offer);
-
-    EXPECT_EQ(DHCPOFFER, offer->getType());
-
-    // This is direct message. It should be sent back to origin, not
-    // to relay.
-    EXPECT_EQ(pkt->getRemoteAddr(), offer->getRemoteAddr());
-
-    messageCheck(pkt, offer);
+TEST_F(Dhcpv4SrvTest, processDiscoverRelay) {
+    testDiscoverRequest(DHCPDISCOVER,
+                        IOAddress("192.0.2.56"),
+                        IOAddress("192.0.2.67"));
+}
 
-    // Check that the requested options are returned.
-    optionsCheck(offer);
+// Verifies that the non-relayed DISCOVER is processed correctly when
+// client source address is specified.
+TEST_F(Dhcpv4SrvTest, processDiscoverNoRelay) {
+    testDiscoverRequest(DHCPDISCOVER,
+                        IOAddress("0.0.0.0"),
+                        IOAddress("192.0.2.67"));
+}
 
-    delete srv;
+// Verified that the non-relayed DISCOVER is processed correctly when
+// client source address is not specified.
+TEST_F(Dhcpv4SrvTest, processDiscoverNoClientAddr) {
+    testDiscoverRequest(DHCPDISCOVER,
+                        IOAddress("0.0.0.0"),
+                        IOAddress("0.0.0.0"));
 }
 
-// Verifies that received REQUEST can be processed correctly,
-// that the ACK message generated in response is valid and
+// Verifies that REQUEST received via relay can be processed correctly,
+// that the OFFER message generated in response is valid and
 // contains necessary options.
 //
 // Note: this test focuses on the packet correctness. There
 // are other tests that verify correctness of the allocation
-// engine. See RequestBasic.
-TEST_F(Dhcpv4SrvTest, processRequest) {
-    NakedDhcpv4Srv* srv = new NakedDhcpv4Srv(0);
-    vector<uint8_t> mac(6);
-    for (int i = 0; i < 6; i++) {
-        mac[i] = i*10;
-    }
-
-    boost::shared_ptr<Pkt4> req(new Pkt4(DHCPREQUEST, 1234));
-    boost::shared_ptr<Pkt4> ack;
-
-    req->setIface("eth0");
-    req->setIndex(17);
-    req->setHWAddr(1, 6, mac);
-    req->setRemoteAddr(IOAddress("192.0.2.56"));
-    req->setGiaddr(IOAddress("192.0.2.67"));
-
-    // We are going to test that certain options are returned
-    // in the ACK message when requested using 'Parameter
-    // Request List' option. Let's configure those options that
-    // are returned when requested.
-    configureRequestedOptions();
-
-    // Should not throw
-    ASSERT_NO_THROW(
-        ack = srv->processRequest(req);
-    );
-
-    // Should return something
-    ASSERT_TRUE(ack);
-
-    EXPECT_EQ(DHCPACK, ack->getType());
-
-    // This is relayed message. It should be sent back to relay address.
-    EXPECT_EQ(req->getGiaddr(), ack->getRemoteAddr());
-
-    messageCheck(req, ack);
-
-    // There are some options that are always present in the
-    // message, even if not requested.
-    EXPECT_TRUE(ack->getOption(DHO_DOMAIN_NAME));
-    EXPECT_TRUE(ack->getOption(DHO_DOMAIN_NAME_SERVERS));
-
-    // We did not request any options so these should not be present
-    // in the ACK.
-    EXPECT_FALSE(ack->getOption(DHO_LOG_SERVERS));
-    EXPECT_FALSE(ack->getOption(DHO_COOKIE_SERVERS));
-    EXPECT_FALSE(ack->getOption(DHO_LPR_SERVERS));
-
-    // Add 'Parameter Request List' option.
-    addPrlOption(req);
-
-    // Repeat the test but request some options.
-    ASSERT_NO_THROW(
-        ack = srv->processRequest(req);
-    );
-
-    // Should return something
-    ASSERT_TRUE(ack);
-
-    EXPECT_EQ(DHCPACK, ack->getType());
-
-    // This is relayed message. It should be sent back to relay address.
-    EXPECT_EQ(req->getGiaddr(), ack->getRemoteAddr());
-
-    // Check that the requested options are returned.
-    optionsCheck(ack);
-
-    // Now repeat the test for directly sent message
-    req->setHops(0);
-    req->setGiaddr(IOAddress("0.0.0.0"));
-    req->setRemotePort(DHCP4_CLIENT_PORT);
-
-    EXPECT_NO_THROW(
-        ack = srv->processDiscover(req);
-    );
-
-    // Should return something
-    ASSERT_TRUE(ack);
-
-    EXPECT_EQ(DHCPOFFER, ack->getType());
-
-    // This is direct message. It should be sent back to origin, not
-    // to relay.
-    EXPECT_EQ(ack->getRemoteAddr(), req->getRemoteAddr());
-
-    messageCheck(req, ack);
+// engine. See DiscoverBasic, DiscoverHint, DiscoverNoClientId
+// and DiscoverInvalidHint.
+TEST_F(Dhcpv4SrvTest, processRequestRelay) {
+    testDiscoverRequest(DHCPREQUEST,
+                        IOAddress("192.0.2.56"),
+                        IOAddress("192.0.2.67"));
+}
 
-    // Check that the requested options are returned.
-    optionsCheck(ack);
+// Verifies that the non-relayed REQUEST is processed correctly when
+// client source address is specified.
+TEST_F(Dhcpv4SrvTest, processRequestNoRelay) {
+    testDiscoverRequest(DHCPREQUEST,
+                        IOAddress("0.0.0.0"),
+                        IOAddress("192.0.2.67"));
+}
 
-    delete srv;
+// Verified that the non-relayed REQUEST is processed correctly when
+// client source address is not specified.
+TEST_F(Dhcpv4SrvTest, processRequestNoClientAddr) {
+    testDiscoverRequest(DHCPREQUEST,
+                        IOAddress("0.0.0.0"),
+                        IOAddress("0.0.0.0"));
 }
 
 TEST_F(Dhcpv4SrvTest, processRelease) {
-    NakedDhcpv4Srv* srv = new NakedDhcpv4Srv();
-
-    boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPRELEASE, 1234));
+    NakedDhcpv4Srv srv;
+    Pkt4Ptr pkt(new Pkt4(DHCPRELEASE, 1234));
 
     // Should not throw
-    EXPECT_NO_THROW(
-        srv->processRelease(pkt);
-    );
-
-    delete srv;
+    EXPECT_NO_THROW(srv.processRelease(pkt));
 }
 
 TEST_F(Dhcpv4SrvTest, processDecline) {
-    NakedDhcpv4Srv* srv = new NakedDhcpv4Srv();
-
-    boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPDECLINE, 1234));
+    NakedDhcpv4Srv srv;
+    Pkt4Ptr pkt(new Pkt4(DHCPDECLINE, 1234));
 
     // Should not throw
-    EXPECT_NO_THROW(
-        srv->processDecline(pkt);
-    );
-
-    delete srv;
+    EXPECT_NO_THROW(srv.processDecline(pkt));
 }
 
 TEST_F(Dhcpv4SrvTest, processInform) {
-    NakedDhcpv4Srv* srv = new NakedDhcpv4Srv();
-
-    boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPINFORM, 1234));
+    NakedDhcpv4Srv srv;
+    Pkt4Ptr pkt(new Pkt4(DHCPINFORM, 1234));
 
     // Should not throw
-    EXPECT_NO_THROW(
-        srv->processInform(pkt);
-    );
+    EXPECT_NO_THROW(srv.processInform(pkt));
 
     // Should return something
-    EXPECT_TRUE(srv->processInform(pkt));
+    EXPECT_TRUE(srv.processInform(pkt));
 
     // @todo Implement more reasonable tests before starting
     // work on processSomething() method.
-
-    delete srv;
 }
 
 TEST_F(Dhcpv4SrvTest, serverReceivedPacketName) {

File diff suppressed because it is too large
+ 252 - 1581
src/bin/dhcp6/config_parser.cc


+ 10 - 3
src/bin/dhcp6/config_parser.h

@@ -20,6 +20,8 @@
 
 #include <cc/data.h>
 #include <exceptions/exceptions.h>
+#include <dhcpsrv/dhcp_parsers.h>
+
 #include <string>
 
 namespace isc {
@@ -29,9 +31,9 @@ class Dhcpv6Srv;
 
 /// @brief Configures DHCPv6 server
 ///
-/// This function is called every time a new configuration is received. The extra
-/// parameter is a reference to DHCPv6 server component. It is currently not used
-/// and CfgMgr::instance() is accessed instead.
+/// This function is called every time a new configuration is received. The 
+/// extra parameter is a reference to DHCPv6 server component. It is currently 
+/// not used and CfgMgr::instance() is accessed instead.
 ///
 /// This method does not throw. It catches all exceptions and returns them as
 /// reconfiguration statuses. It may return the following response codes:
@@ -47,6 +49,11 @@ class Dhcpv6Srv;
 isc::data::ConstElementPtr
 configureDhcp6Server(Dhcpv6Srv& server, isc::data::ConstElementPtr config_set);
 
+/// @brief Returns the global context
+///
+/// @returns a reference to the global context
+ParserContextPtr& globalContext();
+ 
 }; // end of isc::dhcp namespace
 }; // end of isc namespace
 

+ 1 - 1
src/bin/dhcp6/ctrl_dhcp6_srv.h

@@ -95,7 +95,7 @@ protected:
     /// @brief A dummy configuration handler that always returns success.
     ///
     /// This configuration handler does not perform configuration
-    /// parsing and always returns success. A dummy hanlder should
+    /// parsing and always returns success. A dummy handler should
     /// be installed using \ref isc::config::ModuleCCSession ctor
     /// to get the initial configuration. This initial configuration
     /// comprises values for only those elements that were modified

+ 6 - 0
src/bin/dhcp6/dhcp6.spec

@@ -199,6 +199,12 @@
                   "item_default": ""
                 },
 
+                { "item_name": "interface-id",
+                  "item_type": "string",
+                  "item_optional": false,
+                  "item_default": ""
+                },
+
                 { "item_name": "renew-timer",
                   "item_type": "integer",
                   "item_optional": false,

+ 36 - 10
src/bin/dhcp6/dhcp6_srv.cc

@@ -329,7 +329,7 @@ Dhcpv6Srv::generateServerID() {
         // we will grow knobs to selectively turn them on or off. Also,
         // this code is used only *once* during first start on a new machine
         // and then server-id is stored. (or at least it will be once
-        // DUID storage is implemente
+        // DUID storage is implemented)
 
         // I wish there was a this_is_a_real_physical_interface flag...
 
@@ -403,8 +403,13 @@ Dhcpv6Srv::copyDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
     if (clientid) {
         answer->addOption(clientid);
     }
+    /// @todo: Should throw if there is no client-id (except anonymous INF-REQUEST)
+
+    // If this is a relayed message, we need to copy relay information
+    if (!question->relay_info_.empty()) {
+        answer->copyRelayInfo(question);
+    }
 
-    // TODO: Should throw if there is no client-id (except anonymous INF-REQUEST)
 }
 
 void
@@ -523,16 +528,37 @@ Dhcpv6Srv::sanityCheck(const Pkt6Ptr& pkt, RequirementLevel clientid,
 Subnet6Ptr
 Dhcpv6Srv::selectSubnet(const Pkt6Ptr& question) {
 
-    /// @todo: pass interface information only if received direct (non-relayed) message
+    Subnet6Ptr subnet;
 
-    // Try to find a subnet if received packet from a directly connected client
-    Subnet6Ptr subnet = CfgMgr::instance().getSubnet6(question->getIface());
-    if (subnet) {
-        return (subnet);
-    }
+    if (question->relay_info_.empty()) {
+        // This is a direct (non-relayed) message
+
+        // Try to find a subnet if received packet from a directly connected client
+        subnet = CfgMgr::instance().getSubnet6(question->getIface());
+        if (!subnet) {
+            // If no subnet was found, try to find it based on remote address
+            subnet = CfgMgr::instance().getSubnet6(question->getRemoteAddr());
+        }
+    } else {
 
-    // If no subnet was found, try to find it based on remote address
-    subnet = CfgMgr::instance().getSubnet6(question->getRemoteAddr());
+        // This is a relayed message
+        OptionPtr interface_id = question->getAnyRelayOption(D6O_INTERFACE_ID,
+                                                             Pkt6::RELAY_GET_FIRST);
+        if (interface_id) {
+            subnet = CfgMgr::instance().getSubnet6(interface_id);
+        }
+
+        if (!subnet) {
+            // If no interface-id was specified (or not configured on server), let's
+            // try address matching
+            IOAddress link_addr = question->relay_info_.back().linkaddr_;
+
+            // if relay filled in link_addr field, then let's use it
+            if (link_addr != IOAddress("::")) {
+                subnet = CfgMgr::instance().getSubnet6(link_addr);
+            }
+        }
+    }
 
     return (subnet);
 }

+ 2 - 2
src/bin/dhcp6/dhcp6_srv.h

@@ -40,7 +40,7 @@ namespace dhcp {
 /// packets, processes them, manages leases assignment and generates
 /// appropriate responses.
 ///
-/// @note Only one instance of this class is instantated as it encompasses
+/// @note Only one instance of this class is instantiated as it encompasses
 ///       the whole operation of the server.  Nothing, however, enforces the
 ///       singleton status of the object.
 class Dhcpv6Srv : public boost::noncopyable {
@@ -69,7 +69,7 @@ public:
     /// @brief Destructor. Used during DHCPv6 service shutdown.
     virtual ~Dhcpv6Srv();
 
-    /// @brief Returns server-intentifier option.
+    /// @brief Returns server-indentifier option.
     ///
     /// @return server-id option
     OptionPtr getServerID() { return serverid_; }

File diff suppressed because it is too large
+ 1 - 1
src/bin/dhcp6/tests/Makefile.am


+ 104 - 6
src/bin/dhcp6/tests/config_parser_unittest.cc

@@ -88,7 +88,7 @@ public:
     /// option value. These parameters are: "name", "code", "data" and
     /// "csv-format".
     ///
-    /// @param param_value string holiding option parameter value to be
+    /// @param param_value string holding option parameter value to be
     /// injected into the configuration string.
     /// @param parameter name of the parameter to be configured with
     /// param value.
@@ -277,13 +277,13 @@ public:
                             expected_data_len));
     }
 
-    int rcode_;
-    Dhcpv6Srv srv_;
+    int rcode_; ///< return core (see @ref isc::config::parseAnswer)
+    Dhcpv6Srv srv_; ///< instance of the Dhcp6Srv used during tests
 
-    ConstElementPtr comment_;
+    ConstElementPtr comment_; ///< comment (see @ref isc::config::parseAnswer)
 
-    string valid_iface_;
-    string bogus_iface_;
+    string valid_iface_; ///< name of a valid network interface (present in system)
+    string bogus_iface_; ///< name of a invalid network interface (not present in system)
 };
 
 // Goal of this test is a verification if a very simple config update
@@ -500,6 +500,104 @@ TEST_F(Dhcp6ParserTest, interfaceGlobal) {
     EXPECT_EQ(1, rcode_);
 }
 
+
+// This test checks if it is possible to define a subnet with an
+// interface-id option defined.
+TEST_F(Dhcp6ParserTest, subnetInterfaceId) {
+
+    const string valid_interface_id = "foobar";
+    const string bogus_interface_id = "blah";
+
+    // There should be at least one interface
+
+    const string config = "{ "
+        "\"preferred-lifetime\": 3000,"
+        "\"rebind-timer\": 2000, "
+        "\"renew-timer\": 1000, "
+        "\"subnet6\": [ { "
+        "    \"pool\": [ \"2001:db8:1::1 - 2001:db8:1::ffff\" ],"
+        "    \"interface-id\": \"" + valid_interface_id + "\","
+        "    \"subnet\": \"2001:db8:1::/64\" } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    ConstElementPtr status;
+    EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+
+    // Returned value should be 0 (configuration success)
+    ASSERT_TRUE(status);
+    comment_ = parseAnswer(rcode_, status);
+    EXPECT_EQ(0, rcode_);
+
+    // Try to get a subnet based on bogus interface-id option
+    OptionBuffer tmp(bogus_interface_id.begin(), bogus_interface_id.end());
+    OptionPtr ifaceid(new Option(Option::V6, D6O_INTERFACE_ID, tmp));
+    Subnet6Ptr subnet = CfgMgr::instance().getSubnet6(ifaceid);
+    EXPECT_FALSE(subnet);
+
+    // Now try to get subnet for valid interface-id value
+    tmp = OptionBuffer(valid_interface_id.begin(), valid_interface_id.end());
+    ifaceid.reset(new Option(Option::V6, D6O_INTERFACE_ID, tmp));
+    subnet = CfgMgr::instance().getSubnet6(ifaceid);
+    ASSERT_TRUE(subnet);
+    EXPECT_TRUE(ifaceid->equal(subnet->getInterfaceId()));
+}
+
+
+// This test checks if it is not allowed to define global interface
+// parameter.
+TEST_F(Dhcp6ParserTest, interfaceIdGlobal) {
+
+    const string config = "{ \"interface\": [ \"all\" ],"
+        "\"preferred-lifetime\": 3000,"
+        "\"rebind-timer\": 2000, "
+        "\"renew-timer\": 1000, "
+        "\"interface-id\": \"foobar\"," // Not valid. Can be defined in subnet only
+        "\"subnet6\": [ { "
+        "    \"pool\": [ \"2001:db8:1::1 - 2001:db8:1::ffff\" ],"
+        "    \"subnet\": \"2001:db8:1::/64\" } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    ConstElementPtr status;
+    EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+
+    // Returned value should be 1 (parse error)
+    ASSERT_TRUE(status);
+    comment_ = parseAnswer(rcode_, status);
+    EXPECT_EQ(1, rcode_);
+}
+
+// This test checks if it is not possible to define a subnet with an
+// interface (i.e. local subnet) and interface-id (remote subnet) defined.
+TEST_F(Dhcp6ParserTest, subnetInterfaceAndInterfaceId) {
+
+    const string config = "{ \"preferred-lifetime\": 3000,"
+        "\"rebind-timer\": 2000, "
+        "\"renew-timer\": 1000, "
+        "\"subnet6\": [ { "
+        "    \"pool\": [ \"2001:db8:1::1 - 2001:db8:1::ffff\" ],"
+        "    \"interface\": \"" + valid_iface_ + "\","
+        "    \"interface-id\": \"foobar\","
+        "    \"subnet\": \"2001:db8:1::/64\" } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    ConstElementPtr status;
+    EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+
+    // Returned value should be 1 (configuration error)
+    ASSERT_TRUE(status);
+    comment_ = parseAnswer(rcode_, status);
+    EXPECT_EQ(1, rcode_);
+
+}
+
+
+
 // Test verifies that a subnet with pool values that do not belong to that
 // pool are rejected.
 TEST_F(Dhcp6ParserTest, poolOutOfSubnet) {

+ 8 - 10
src/bin/dhcp6/tests/ctrl_dhcp6_srv_unittest.cc

@@ -1,4 +1,4 @@
-// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012-2013 Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,7 @@
 #include <dhcp6/ctrl_dhcp6_srv.h>
 #include <config/ccsession.h>
 
+#include <boost/scoped_ptr.hpp>
 #include <gtest/gtest.h>
 
 #include <iostream>
@@ -52,12 +53,12 @@ public:
 
 TEST_F(CtrlDhcpv6SrvTest, commands) {
 
-    ControlledDhcpv6Srv* srv = NULL;
-    ASSERT_NO_THROW({
-        srv = new ControlledDhcpv6Srv(DHCP6_SERVER_PORT + 10000);
-    });
+    boost::scoped_ptr<ControlledDhcpv6Srv> srv;
+    ASSERT_NO_THROW(
+        srv.reset(new ControlledDhcpv6Srv(DHCP6_SERVER_PORT + 10000))
+    );
 
-    // use empty parameters list
+    // Use empty parameters list
     ElementPtr params(new isc::data::MapElement());
     int rcode = -1;
 
@@ -78,10 +79,7 @@ TEST_F(CtrlDhcpv6SrvTest, commands) {
     // case 3: send shutdown command with 1 parameter: pid
     result = ControlledDhcpv6Srv::execDhcpv6ServerCommand("shutdown", params);
     comment = parseAnswer(rcode, result);
-    EXPECT_EQ(0, rcode); // expect success
-
-
-    delete srv;
+    EXPECT_EQ(0, rcode); // Expect success
 }
 
 } // end of anonymous namespace

+ 343 - 50
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc

@@ -79,20 +79,12 @@ public:
 
 static const char* DUID_FILE = "server-id-test.txt";
 
-class Dhcpv6SrvTest : public ::testing::Test {
+// test fixture for any tests requiring blank/empty configuration
+// serves as base class for additional tests
+class NakedDhcpv6SrvTest : public ::testing::Test {
 public:
-    /// Name of the server-id file (used in server-id tests)
-
-    // these are empty for now, but let's keep them around
-    Dhcpv6SrvTest() : rcode_(-1) {
-        subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 48, 1000,
-                                         2000, 3000, 4000));
-        pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, IOAddress("2001:db8:1:1::"), 64));
-        subnet_->addPool(pool_);
-
-        CfgMgr::instance().deleteSubnets6();
-        CfgMgr::instance().addSubnet6(subnet_);
 
+    NakedDhcpv6SrvTest() : rcode_(-1) {
         // it's ok if that fails. There should not be such a file anyway
         unlink(DUID_FILE);
     }
@@ -106,6 +98,16 @@ public:
         return (ia);
     }
 
+    /// @brief generates interface-id option, based on text
+    ///
+    /// @param iface_id textual representation of the interface-id content
+    ///
+    /// @return pointer to the option object
+    OptionPtr generateInterfaceId(const string& iface_id) {
+        OptionBuffer tmp(iface_id.begin(), iface_id.end());
+        return OptionPtr(new Option(Option::V6, D6O_INTERFACE_ID, tmp));
+    }
+
     // Generate client-id option
     OptionPtr generateClientId(size_t duid_size = 32) {
 
@@ -142,25 +144,22 @@ public:
         EXPECT_TRUE(expected_clientid->getData() == tmp->getData());
     }
 
-    // Checks that server response (ADVERTISE or REPLY) contains proper IA_NA option
-    // It returns IAADDR option for each chaining with checkIAAddr method.
-    boost::shared_ptr<Option6IAAddr> checkIA_NA(const Pkt6Ptr& rsp, uint32_t expected_iaid,
-                                         uint32_t expected_t1, uint32_t expected_t2) {
-        OptionPtr tmp = rsp->getOption(D6O_IA_NA);
-        // Can't use ASSERT_TRUE() in method that returns something
-        if (!tmp) {
-            ADD_FAILURE() << "IA_NA option not present in response";
-            return (boost::shared_ptr<Option6IAAddr>());
-        }
+    // Checks if server response is a NAK
+    void checkNakResponse(const Pkt6Ptr& rsp, uint8_t expected_message_type,
+                          uint32_t expected_transid,
+                          uint16_t expected_status_code) {
+        // Check if we get response at all
+        checkResponse(rsp, expected_message_type, expected_transid);
 
-        boost::shared_ptr<Option6IA> ia = boost::dynamic_pointer_cast<Option6IA>(tmp);
-        EXPECT_EQ(expected_iaid, ia->getIAID() );
-        EXPECT_EQ(expected_t1, ia->getT1());
-        EXPECT_EQ(expected_t2, ia->getT2());
+        // Check that IA_NA was returned
+        OptionPtr option_ia_na = rsp->getOption(D6O_IA_NA);
+        ASSERT_TRUE(option_ia_na);
 
-        tmp = ia->getOption(D6O_IAADDR);
-        boost::shared_ptr<Option6IAAddr> addr = boost::dynamic_pointer_cast<Option6IAAddr>(tmp);
-        return (addr);
+        // check that the status is no address available
+        boost::shared_ptr<Option6IA> ia = boost::dynamic_pointer_cast<Option6IA>(option_ia_na);
+        ASSERT_TRUE(ia);
+
+        checkIA_NAStatusCode(ia, expected_status_code);
     }
 
     // Checks that server rejected IA_NA, i.e. that it has no addresses and
@@ -180,7 +179,7 @@ public:
         EXPECT_EQ(0, ia->getT1());
         EXPECT_EQ(0, ia->getT2());
 
-        boost::shared_ptr<OptionCustom> status =
+        OptionCustomPtr status =
             boost::dynamic_pointer_cast<OptionCustom>(ia->getOption(D6O_STATUS_CODE));
 
         // It is ok to not include status success as this is the default behavior
@@ -199,9 +198,8 @@ public:
         }
     }
 
-
     void checkMsgStatusCode(const Pkt6Ptr& msg, uint16_t expected_status) {
-        boost::shared_ptr<OptionCustom> status =
+        OptionCustomPtr status =
             boost::dynamic_pointer_cast<OptionCustom>(msg->getOption(D6O_STATUS_CODE));
 
         // It is ok to not include status success as this is the default behavior
@@ -219,7 +217,71 @@ public:
         }
     }
 
-    // Check that generated IAADDR option contains expected address.
+    // Basic checks for generated response (message type and transaction-id).
+    void checkResponse(const Pkt6Ptr& rsp, uint8_t expected_message_type,
+                       uint32_t expected_transid) {
+        ASSERT_TRUE(rsp);
+        EXPECT_EQ(expected_message_type, rsp->getType());
+        EXPECT_EQ(expected_transid, rsp->getTransid());
+    }
+
+    virtual ~NakedDhcpv6SrvTest() {
+        // Let's clean up if there is such a file.
+        unlink(DUID_FILE);
+    };
+
+    // A DUID used in most tests (typically as client-id)
+    DuidPtr duid_;
+
+    int rcode_;
+    ConstElementPtr comment_;
+};
+
+// Provides suport for tests against a preconfigured subnet6
+// extends upon NakedDhcp6SrvTest
+class Dhcpv6SrvTest : public NakedDhcpv6SrvTest {
+public:
+    /// Name of the server-id file (used in server-id tests)
+
+    // these are empty for now, but let's keep them around
+    Dhcpv6SrvTest() {
+        subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 48, 1000,
+                                         2000, 3000, 4000));
+        pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, IOAddress("2001:db8:1:1::"), 64));
+        subnet_->addPool(pool_);
+
+        CfgMgr::instance().deleteSubnets6();
+        CfgMgr::instance().addSubnet6(subnet_);
+    }
+
+    // Checks that server response (ADVERTISE or REPLY) contains proper IA_NA option
+    // It returns IAADDR option for each chaining with checkIAAddr method.
+    boost::shared_ptr<Option6IAAddr> checkIA_NA(const Pkt6Ptr& rsp, uint32_t expected_iaid,
+                                            uint32_t expected_t1, uint32_t expected_t2) {
+        OptionPtr tmp = rsp->getOption(D6O_IA_NA);
+        // Can't use ASSERT_TRUE() in method that returns something
+        if (!tmp) {
+            ADD_FAILURE() << "IA_NA option not present in response";
+            return (boost::shared_ptr<Option6IAAddr>());
+        }
+
+        boost::shared_ptr<Option6IA> ia = boost::dynamic_pointer_cast<Option6IA>(tmp);
+        if (!ia) {
+            ADD_FAILURE() << "IA_NA cannot convert option ptr to Option6";
+            return (boost::shared_ptr<Option6IAAddr>());
+        }
+
+        EXPECT_EQ(expected_iaid, ia->getIAID());
+        EXPECT_EQ(expected_t1, ia->getT1());
+        EXPECT_EQ(expected_t2, ia->getT2());
+
+        tmp = ia->getOption(D6O_IAADDR);
+        boost::shared_ptr<Option6IAAddr> addr = boost::dynamic_pointer_cast<Option6IAAddr>(tmp);
+        return (addr);
+    }
+
+    // Check that generated IAADDR option contains expected address
+    // and lifetime values match the configured subnet
     void checkIAAddr(const boost::shared_ptr<Option6IAAddr>& addr,
                      const IOAddress& expected_addr,
                      uint32_t /* expected_preferred */,
@@ -235,15 +297,8 @@ public:
         EXPECT_EQ(addr->getValid(), subnet_->getValid());
     }
 
-    // Basic checks for generated response (message type and transaction-id).
-    void checkResponse(const Pkt6Ptr& rsp, uint8_t expected_message_type,
-                       uint32_t expected_transid) {
-        ASSERT_TRUE(rsp);
-        EXPECT_EQ(expected_message_type, rsp->getType());
-        EXPECT_EQ(expected_transid, rsp->getTransid());
-    }
-
     // Checks if the lease sent to client is present in the database
+    // and is valid when checked agasint the configured subnet
     Lease6Ptr checkLease(const DuidPtr& duid, const OptionPtr& ia_na,
                          boost::shared_ptr<Option6IAAddr> addr) {
         boost::shared_ptr<Option6IA> ia = boost::dynamic_pointer_cast<Option6IA>(ia_na);
@@ -265,9 +320,6 @@ public:
 
     ~Dhcpv6SrvTest() {
         CfgMgr::instance().deleteSubnets6();
-
-        // Let's clean up if there is such a file.
-        unlink(DUID_FILE);
     };
 
     // A subnet used in most tests
@@ -275,13 +327,132 @@ public:
 
     // A pool used in most tests
     Pool6Ptr pool_;
+};
 
-    // A DUID used in most tests (typically as client-id)
-    DuidPtr duid_;
+// This test verifies that incoming SOLICIT can be handled properly when
+// there are no subnets configured.
+//
+// This test sends a SOLICIT and the expected response
+// is an ADVERTISE with STATUS_NoAddrsAvail and no address provided in the
+// response
+TEST_F(NakedDhcpv6SrvTest, SolicitNoSubnet) {
+    NakedDhcpv6Srv srv(0);
+
+    Pkt6Ptr sol = Pkt6Ptr(new Pkt6(DHCPV6_SOLICIT, 1234));
+    sol->setRemoteAddr(IOAddress("fe80::abcd"));
+    sol->addOption(generateIA(234, 1500, 3000));
+    OptionPtr clientid = generateClientId();
+    sol->addOption(clientid);
+
+    // Pass it to the server and get an advertise
+    Pkt6Ptr reply = srv.processSolicit(sol);
+
+    // check that we get the right NAK
+    checkNakResponse (reply, DHCPV6_ADVERTISE, 1234, STATUS_NoAddrsAvail);
+}
+
+// This test verifies that incoming REQUEST can be handled properly when
+// there are no subnets configured.
+//
+// This test sends a REQUEST and the expected response
+// is an REPLY with STATUS_NoAddrsAvail and no address provided in the
+// response
+TEST_F(NakedDhcpv6SrvTest, RequestNoSubnet) {
+    NakedDhcpv6Srv srv(0);
+
+    // Let's create a REQUEST
+    Pkt6Ptr req = Pkt6Ptr(new Pkt6(DHCPV6_REQUEST, 1234));
+    req->setRemoteAddr(IOAddress("fe80::abcd"));
+    boost::shared_ptr<Option6IA> ia = generateIA(234, 1500, 3000);
+
+    // with a hint
+    IOAddress hint("2001:db8:1:1::dead:beef");
+    OptionPtr hint_opt(new Option6IAAddr(D6O_IAADDR, hint, 300, 500));
+    ia->addOption(hint_opt);
+    req->addOption(ia);
+    OptionPtr clientid = generateClientId();
+    req->addOption(clientid);
+
+    // server-id is mandatory in REQUEST
+    req->addOption(srv.getServerID());
+
+    // Pass it to the server and hope for a REPLY
+    Pkt6Ptr reply = srv.processRequest(req);
+
+    // check that we get the right NAK
+    checkNakResponse (reply, DHCPV6_REPLY, 1234, STATUS_NoAddrsAvail);
+}
+
+// This test verifies that incoming RENEW can be handled properly, even when
+// no subnets are configured.
+//
+// This test sends a RENEW and the expected response
+// is an REPLY with STATUS_NoBinding and no address provided in the
+// response
+TEST_F(NakedDhcpv6SrvTest, RenewNoSubnet) {
+    NakedDhcpv6Srv srv(0);
+
+    const IOAddress addr("2001:db8:1:1::cafe:babe");
+    const uint32_t iaid = 234;
+
+    // Generate client-id also duid_
+    OptionPtr clientid = generateClientId();
+
+    // Let's create a RENEW
+    Pkt6Ptr req = Pkt6Ptr(new Pkt6(DHCPV6_RENEW, 1234));
+    req->setRemoteAddr(IOAddress("fe80::abcd"));
+    boost::shared_ptr<Option6IA> ia = generateIA(iaid, 1500, 3000);
+
+    OptionPtr renewed_addr_opt(new Option6IAAddr(D6O_IAADDR, addr, 300, 500));
+    ia->addOption(renewed_addr_opt);
+    req->addOption(ia);
+    req->addOption(clientid);
+
+    // Server-id is mandatory in RENEW
+    req->addOption(srv.getServerID());
+
+    // Pass it to the server and hope for a REPLY
+    Pkt6Ptr reply = srv.processRenew(req);
+
+    // check that we get the right NAK
+    checkNakResponse (reply, DHCPV6_REPLY, 1234, STATUS_NoBinding);
+}
+
+// This test verifies that incoming RELEASE can be handled properly, even when
+// no subnets are configured.
+//
+// This test sends a RELEASE and the expected response
+// is an REPLY with STATUS_NoBinding and no address provided in the
+// response
+TEST_F(NakedDhcpv6SrvTest, ReleaseNoSubnet) {
+    NakedDhcpv6Srv srv(0);
+
+    const IOAddress addr("2001:db8:1:1::cafe:babe");
+    const uint32_t iaid = 234;
+
+    // Generate client-id also duid_
+    OptionPtr clientid = generateClientId();
+
+    // Let's create a RELEASE
+    Pkt6Ptr req = Pkt6Ptr(new Pkt6(DHCPV6_RELEASE, 1234));
+    req->setRemoteAddr(IOAddress("fe80::abcd"));
+    boost::shared_ptr<Option6IA> ia = generateIA(iaid, 1500, 3000);
+
+    OptionPtr released_addr_opt(new Option6IAAddr(D6O_IAADDR, addr, 300, 500));
+    ia->addOption(released_addr_opt);
+    req->addOption(ia);
+    req->addOption(clientid);
+
+    // Server-id is mandatory in RELEASE
+    req->addOption(srv.getServerID());
+
+    // Pass it to the server and hope for a REPLY
+    Pkt6Ptr reply = srv.processRelease(req);
+
+    // check that we get the right NAK
+    checkNakResponse (reply, DHCPV6_REPLY, 1234, STATUS_NoBinding);
+}
 
-    int rcode_;
-    ConstElementPtr comment_;
-};
 
 // Test verifies that the Dhcpv6_srv class can be instantiated. It checks a mode
 // without open sockets and with sockets opened on a high port (to not require
@@ -425,7 +596,7 @@ TEST_F(Dhcpv6SrvTest, advertiseOptions) {
     sol->addOption(clientid);
 
     // Pass it to the server and get an advertise
-    boost::shared_ptr<Pkt6> adv = srv.processSolicit(sol);
+    Pkt6Ptr adv = srv.processSolicit(sol);
 
     // check if we get response at all
     ASSERT_TRUE(adv);
@@ -517,6 +688,7 @@ TEST_F(Dhcpv6SrvTest, SolicitBasic) {
     // check that IA_NA was returned and that there's an address included
     boost::shared_ptr<Option6IAAddr> addr = checkIA_NA(reply, 234, subnet_->getT1(),
                                                 subnet_->getT2());
+    ASSERT_TRUE(addr);
 
     // Check that the assigned address is indeed from the configured pool
     checkIAAddr(addr, addr->getAddress(), subnet_->getPreferred(), subnet_->getValid());
@@ -570,6 +742,7 @@ TEST_F(Dhcpv6SrvTest, SolicitHint) {
     // check that IA_NA was returned and that there's an address included
     boost::shared_ptr<Option6IAAddr> addr = checkIA_NA(reply, 234, subnet_->getT1(),
                                                 subnet_->getT2());
+    ASSERT_TRUE(addr);
 
     // check that we've got the address we requested
     checkIAAddr(addr, hint, subnet_->getPreferred(), subnet_->getValid());
@@ -618,6 +791,7 @@ TEST_F(Dhcpv6SrvTest, SolicitInvalidHint) {
     // check that IA_NA was returned and that there's an address included
     boost::shared_ptr<Option6IAAddr> addr = checkIA_NA(reply, 234, subnet_->getT1(),
                                                 subnet_->getT2());
+    ASSERT_TRUE(addr);
 
     // Check that the assigned address is indeed from the configured pool
     checkIAAddr(addr, addr->getAddress(), subnet_->getPreferred(), subnet_->getValid());
@@ -679,6 +853,9 @@ TEST_F(Dhcpv6SrvTest, ManySolicits) {
                                                 subnet_->getT2());
     boost::shared_ptr<Option6IAAddr> addr3 = checkIA_NA(reply3, 3, subnet_->getT1(),
                                                 subnet_->getT2());
+    ASSERT_TRUE(addr1);
+    ASSERT_TRUE(addr2);
+    ASSERT_TRUE(addr3);
 
     // Check that the assigned address is indeed from the configured pool
     checkIAAddr(addr1, addr1->getAddress(), subnet_->getPreferred(), subnet_->getValid());
@@ -749,6 +926,7 @@ TEST_F(Dhcpv6SrvTest, RequestBasic) {
     // check that IA_NA was returned and that there's an address included
     boost::shared_ptr<Option6IAAddr> addr = checkIA_NA(reply, 234, subnet_->getT1(),
                                                 subnet_->getT2());
+    ASSERT_TRUE(addr);
 
     // check that we've got the address we requested
     checkIAAddr(addr, hint, subnet_->getPreferred(), subnet_->getValid());
@@ -773,6 +951,8 @@ TEST_F(Dhcpv6SrvTest, RequestBasic) {
 TEST_F(Dhcpv6SrvTest, ManyRequests) {
     NakedDhcpv6Srv srv(0);
 
+    ASSERT_TRUE(subnet_);
+
     Pkt6Ptr req1 = Pkt6Ptr(new Pkt6(DHCPV6_REQUEST, 1234));
     Pkt6Ptr req2 = Pkt6Ptr(new Pkt6(DHCPV6_REQUEST, 2345));
     Pkt6Ptr req3 = Pkt6Ptr(new Pkt6(DHCPV6_REQUEST, 3456));
@@ -817,6 +997,10 @@ TEST_F(Dhcpv6SrvTest, ManyRequests) {
     boost::shared_ptr<Option6IAAddr> addr3 = checkIA_NA(reply3, 3, subnet_->getT1(),
                                                 subnet_->getT2());
 
+    ASSERT_TRUE(addr1);
+    ASSERT_TRUE(addr2);
+    ASSERT_TRUE(addr3);
+
     // Check that the assigned address is indeed from the configured pool
     checkIAAddr(addr1, addr1->getAddress(), subnet_->getPreferred(), subnet_->getValid());
     checkIAAddr(addr2, addr2->getAddress(), subnet_->getPreferred(), subnet_->getValid());
@@ -905,6 +1089,8 @@ TEST_F(Dhcpv6SrvTest, RenewBasic) {
     boost::shared_ptr<Option6IAAddr> addr_opt = checkIA_NA(reply, 234, subnet_->getT1(),
                                                            subnet_->getT2());
 
+    ASSERT_TRUE(addr_opt);
+
     // Check that we've got the address we requested
     checkIAAddr(addr_opt, addr, subnet_->getPreferred(), subnet_->getValid());
 
@@ -1431,6 +1617,113 @@ TEST_F(Dhcpv6SrvTest, selectSubnetIface) {
     EXPECT_EQ(subnet3, srv.selectSubnet(pkt));
 }
 
+// This test verifies if selectSubnet() selects proper subnet for a given
+// linkaddr in RELAY-FORW message
+TEST_F(Dhcpv6SrvTest, selectSubnetRelayLinkaddr) {
+    NakedDhcpv6Srv srv(0);
+
+    Subnet6Ptr subnet1(new Subnet6(IOAddress("2001:db8:1::"), 48, 1, 2, 3, 4));
+    Subnet6Ptr subnet2(new Subnet6(IOAddress("2001:db8:2::"), 48, 1, 2, 3, 4));
+    Subnet6Ptr subnet3(new Subnet6(IOAddress("2001:db8:3::"), 48, 1, 2, 3, 4));
+
+    Pkt6::RelayInfo relay;
+    relay.linkaddr_ = IOAddress("2001:db8:2::1234");
+    relay.peeraddr_ = IOAddress("fe80::1");
+
+    // CASE 1: We have only one subnet defined and we received relayed traffic.
+    // The only available subnet should NOT be selected.
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1); // just a single subnet
+
+    Pkt6Ptr pkt = Pkt6Ptr(new Pkt6(DHCPV6_SOLICIT, 1234));
+    pkt->relay_info_.push_back(relay);
+
+    Subnet6Ptr selected = srv.selectSubnet(pkt);
+    EXPECT_FALSE(selected);
+
+    // CASE 2: We have three subnets defined and we received relayed traffic.
+    // Nothing should be selected.
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1);
+    CfgMgr::instance().addSubnet6(subnet2);
+    CfgMgr::instance().addSubnet6(subnet3);
+    selected = srv.selectSubnet(pkt);
+    EXPECT_EQ(selected, subnet2);
+
+    // CASE 3: We have three subnets defined and we received relayed traffic
+    // that came out of subnet 2. We should select subnet2 then
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1);
+    CfgMgr::instance().addSubnet6(subnet2);
+    CfgMgr::instance().addSubnet6(subnet3);
+
+    // Source of the packet should have no meaning. Selection is based
+    // on linkaddr field in the relay
+    pkt->setRemoteAddr(IOAddress("2001:db8:1::baca"));
+    selected = srv.selectSubnet(pkt);
+    EXPECT_EQ(selected, subnet2);
+
+    // CASE 4: We have three subnets defined and we received relayed traffic
+    // that came out of undefined subnet. We should select nothing
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1);
+    CfgMgr::instance().addSubnet6(subnet2);
+    CfgMgr::instance().addSubnet6(subnet3);
+    pkt->relay_info_.clear();
+    relay.linkaddr_ = IOAddress("2001:db8:4::1234");
+    pkt->relay_info_.push_back(relay);
+    selected = srv.selectSubnet(pkt);
+    EXPECT_FALSE(selected);
+
+}
+
+// This test verifies if selectSubnet() selects proper subnet for a given
+// interface-id option
+TEST_F(Dhcpv6SrvTest, selectSubnetRelayInterfaceId) {
+    NakedDhcpv6Srv srv(0);
+
+    Subnet6Ptr subnet1(new Subnet6(IOAddress("2001:db8:1::"), 48, 1, 2, 3, 4));
+    Subnet6Ptr subnet2(new Subnet6(IOAddress("2001:db8:2::"), 48, 1, 2, 3, 4));
+    Subnet6Ptr subnet3(new Subnet6(IOAddress("2001:db8:3::"), 48, 1, 2, 3, 4));
+
+    subnet1->setInterfaceId(generateInterfaceId("relay1"));
+    subnet2->setInterfaceId(generateInterfaceId("relay2"));
+
+    // CASE 1: We have only one subnet defined and it is for interface-id "relay1"
+    // Packet came with interface-id "relay2". We should not select subnet1
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1); // just a single subnet
+
+    Pkt6Ptr pkt = Pkt6Ptr(new Pkt6(DHCPV6_SOLICIT, 1234));
+    Pkt6::RelayInfo relay;
+    relay.linkaddr_ = IOAddress("2001:db8:2::1234");
+    relay.peeraddr_ = IOAddress("fe80::1");
+    OptionPtr opt = generateInterfaceId("relay2");
+    relay.options_.insert(make_pair(opt->getType(), opt));
+    pkt->relay_info_.push_back(relay);
+
+    // There is only one subnet configured and we are outside of that subnet
+    Subnet6Ptr selected = srv.selectSubnet(pkt);
+    EXPECT_FALSE(selected);
+
+    // CASE 2: We have only one subnet defined and it is for interface-id "relay2"
+    // Packet came with interface-id "relay2". We should select it
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet2); // just a single subnet
+    selected = srv.selectSubnet(pkt);
+    EXPECT_EQ(selected, subnet2);
+
+    // CASE 3: We have only 3 subnets defined: one remote for interface-id "relay1",
+    // one remote for interface-id "relay2" and third local
+    // packet comes with interface-id "relay2". We should select subnet2
+    CfgMgr::instance().deleteSubnets6();
+    CfgMgr::instance().addSubnet6(subnet1);
+    CfgMgr::instance().addSubnet6(subnet2);
+    CfgMgr::instance().addSubnet6(subnet3);
+
+    EXPECT_EQ(subnet2, srv.selectSubnet(pkt));
+}
+
 // This test verifies if the server-id disk operations (read, write) are
 // working properly.
 TEST_F(Dhcpv6SrvTest, ServerID) {

+ 1 - 1
src/bin/loadzone/run_loadzone.sh.in

@@ -25,7 +25,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/threads/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 

File diff suppressed because it is too large
+ 1 - 1
src/bin/loadzone/tests/Makefile.am


File diff suppressed because it is too large
+ 1 - 1
src/bin/loadzone/tests/correct/Makefile.am


+ 1 - 1
src/bin/loadzone/tests/correct/correct_test.sh.in

@@ -53,7 +53,7 @@ echo "I:test master file BIND 8 compatibility TTL and \$TTL semantics"
 echo "I:test master file RFC1035 TTL and \$TTL semantics"
 echo "I:test master file BIND8 compatibility and mixed \$INCLUDE with \$TTL semantics"
 echo "I:test master file RFC1035 TTL and mixed \$INCLUDE with \$TTL semantics"
-echo "I:test master file BIND9 extenstion of TTL"
+echo "I:test master file BIND9 extension of TTL"
 echo "I:test master file RFC1035 missing CLASS, TTL, NAME semantics"
 echo "I:test master file comments"
 

+ 1 - 1
src/bin/loadzone/tests/correct/ttlext.db

@@ -11,7 +11,7 @@ ns			A	10.53.0.1
 a			TXT	"soa minttl 3"
 b		2S	TXT	"explicit ttl 2"
 c			TXT	"soa minttl 3"
-$TTL 10M  ; bind9 extention ttl
+$TTL 10M  ; bind9 extension ttl
 d			TXT	"default ttl 600"
 e		4	TXT	"explicit ttl 4"
 f			TXT	"default ttl 600"

+ 1 - 0
src/bin/msgq/Makefile.am

@@ -10,6 +10,7 @@ b10_msgq_DATA = msgq.spec
 CLEANFILES = b10-msgq msgq.pyc
 CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/msgq_messages.py
 CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/msgq_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/msgq_messages.pyo
 
 man_MANS = b10-msgq.8
 DISTCLEANFILES = $(man_MANS)

+ 16 - 15
src/bin/msgq/msgq.py.in

@@ -143,7 +143,7 @@ class SubscriptionManager:
         this group, instance pair.  This includes wildcard subscriptions."""
         target = (group, instance)
         partone = self.find_sub(group, instance)
-        parttwo = self.find_sub(group, "*")
+        parttwo = self.find_sub(group, CC_INSTANCE_WILDCARD)
         return list(set(partone + parttwo))
 
 class MsgQ:
@@ -429,19 +429,19 @@ class MsgQ:
         """Process a single command.  This will split out into one of the
            other functions."""
         logger.debug(TRACE_DETAIL, MSGQ_RECV_HDR, routing)
-        cmd = routing["type"]
-        if cmd == 'send':
+        cmd = routing[CC_HEADER_TYPE]
+        if cmd == CC_COMMAND_SEND:
             self.process_command_send(sock, routing, data)
-        elif cmd == 'subscribe':
+        elif cmd == CC_COMMAND_SUBSCRIBE:
             self.process_command_subscribe(sock, routing, data)
-        elif cmd == 'unsubscribe':
+        elif cmd == CC_COMMAND_UNSUBSCRIBE:
             self.process_command_unsubscribe(sock, routing, data)
-        elif cmd == 'getlname':
+        elif cmd == CC_COMMAND_GET_LNAME:
             self.process_command_getlname(sock, routing, data)
-        elif cmd == 'ping':
+        elif cmd == CC_COMMAND_PING:
             # Command for testing purposes
             self.process_command_ping(sock, routing, data)
-        elif cmd == 'stop':
+        elif cmd == CC_COMMAND_STOP:
             self.stop()
         else:
             logger.error(MSGQ_INVALID_CMD, cmd)
@@ -570,11 +570,12 @@ class MsgQ:
         return "%x_%x@%s" % (time.time(), self.connection_counter, self.hostname)
 
     def process_command_ping(self, sock, routing, data):
-        self.sendmsg(sock, { "type" : "pong" }, data)
+        self.sendmsg(sock, { CC_HEADER_TYPE : CC_COMMAND_PONG }, data)
 
     def process_command_getlname(self, sock, routing, data):
         lname = [ k for k, v in self.lnames.items() if v == sock ][0]
-        self.sendmsg(sock, { "type" : "getlname" }, { "lname" : lname })
+        self.sendmsg(sock, { CC_HEADER_TYPE : CC_COMMAND_GET_LNAME },
+                     { CC_PAYLOAD_LNAME : lname })
 
     def process_command_send(self, sock, routing, data):
         group = routing[CC_HEADER_GROUP]
@@ -638,15 +639,15 @@ class MsgQ:
             self.send_prepared_msg(sock, errmsg)
 
     def process_command_subscribe(self, sock, routing, data):
-        group = routing["group"]
-        instance = routing["instance"]
+        group = routing[CC_HEADER_GROUP]
+        instance = routing[CC_HEADER_INSTANCE]
         if group == None or instance == None:
             return  # ignore invalid packets entirely
         self.subs.subscribe(group, instance, sock)
 
     def process_command_unsubscribe(self, sock, routing, data):
-        group = routing["group"]
-        instance = routing["instance"]
+        group = routing[CC_HEADER_GROUP]
+        instance = routing[CC_HEADER_INSTANCE]
         if group == None or instance == None:
             return  # ignore invalid packets entirely
         self.subs.unsubscribe(group, instance, sock)
@@ -789,7 +790,7 @@ class MsgQ:
             if not self.running:
                 return
 
-            # TODO: Any config handlig goes here.
+            # TODO: Any config handling goes here.
 
             return isc.config.create_answer(0)
 

+ 1 - 1
src/bin/msgq/run_msgq.sh.in

@@ -20,7 +20,7 @@ export PYTHON_EXEC
 
 MYPATH_PATH=@abs_top_builddir@/src/bin/msgq
 
-PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python/isc/cc:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/log/.libs
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python/isc/cc:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs
 export PYTHONPATH
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries

File diff suppressed because it is too large
+ 4 - 4
src/bin/msgq/tests/Makefile.am


+ 278 - 0
src/bin/msgq/tests/msgq_run_test.py

@@ -0,0 +1,278 @@
+# Copyright (C) 2013  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+In this test file, we actually start msgq as a process and test it
+as a whole. It may be considered a system test instead of unit test,
+but apart from the terminology, we don't care much. We need to test
+the message queue works as expected, together with the libraries.
+
+In each test, we first start a timeout (because we do some waits
+for messages and if they wouldn't come, the test could block indefinitely).
+The timeout is long, because it is for the case the test fails.
+
+We then start the msgq and wait for the socket file to appear
+(that should indicate it is ready to receive connections). Then the
+actual test starts. After the test, we kill it and remove the test file.
+
+We also register signal handlers for many signals. Even in the case
+the test is interrupted or crashes, we should ensure the message queue
+itself is terminated.
+"""
+
+import unittest
+import os
+import signal
+import sys
+import subprocess
+import time
+
+import isc.log
+import isc.cc.session
+from isc.cc.proto_defs import *
+
+# Due to problems with too long path on build bots, we place the socket
+# into the top-level build directory. That is ugly, but works.
+SOCKET_PATH = os.path.abspath(os.environ['B10_FROM_BUILD'] + '/msgq.sock')
+MSGQ_PATH = os.environ['B10_FROM_BUILD'] + '/src/bin/msgq/run_msgq.sh'
+TIMEOUT = 15 # Some long time (seconds), for single test.
+
+class MsgqRunTest(unittest.TestCase):
+    def setUp(self):
+        """
+        As described above - check the socket file does not exist.
+        Then register signals and timeouts. Finally, launch msgq
+        and wait for it to start.
+        """
+        self.__msgq = None
+        self.__opened_connections = []
+        # A precondition check
+        self.assertFalse(os.path.exists(SOCKET_PATH))
+        signal.alarm(TIMEOUT)
+        self.__orig_signals = {}
+        # Register handlers for many signals. Most of them probably
+        # can't happen in python, but we register them anyway just to be
+        # safe.
+        for sig in [signal.SIGHUP, signal.SIGINT, signal.SIGQUIT,
+            signal.SIGILL, signal.SIGTRAP, signal.SIGABRT, signal.SIGBUS,
+            signal.SIGFPE, signal.SIGALRM, signal.SIGTERM]:
+            self.__orig_signals[sig] = signal.signal(sig, self.__signal)
+        # Start msgq
+        self.__msgq = subprocess.Popen([MSGQ_PATH, '-s', SOCKET_PATH],
+                                       close_fds=True)
+        # Some testing data
+        self.__no_recpt = {"result": [-1, "No such recipient"]}
+        # Wait for it to become ready (up to the alarm-set timeout)
+        connection = None
+        while not connection:
+            try:
+                # If the msgq is ready, this'll succeed. If not, it'll throw
+                # session error.
+                connection = isc.cc.session.Session(SOCKET_PATH)
+            except isc.cc.session.SessionError:
+                time.sleep(0.1) # Retry after a short time
+        # We have the connection now, that means it works. Close this
+        # connection, we won't use it. Each test gets enough new connections
+        # of its own.
+        connection.close()
+
+    def __message(self, data):
+        """
+        Provide some testing message. The data will be included in it, so
+        several different messages can be created.
+        """
+        return {"Message": "Text", "Data": data}
+
+    def tearDown(self):
+        """
+        Perform cleanup after the test.
+        """
+        self.__cleanup()
+
+    def __signal(self, signal, frame):
+        """
+        Called from a signal handler. We perform some cleanup, output
+        a complain and terminate with error.
+        """
+        self.__cleanup()
+        sys.stderr.write("Test terminating from signal " + str(signal) +
+                         " in " + str(frame) + "\n")
+        sys.exit(1)
+
+    def __cleanup(self):
+        """
+        Kill msgq (if running) and restore original signal handlers.
+        """
+        # Remove the socket (as we kill, msgq might not clean up)
+        for conn in self.__opened_connections:
+            conn.close()
+        self.__opened_connections = []
+        if self.__msgq:
+            self.__msgq.kill()
+            self.__msgq = None
+        if os.path.exists(SOCKET_PATH):
+            os.unlink(SOCKET_PATH)
+        for sig in self.__orig_signals:
+            signal.signal(sig, self.__orig_signals[sig])
+        # Cancel timeout (so someone else is not hit by it)
+        signal.alarm(0)
+
+    def __get_connection(self):
+        """
+        Create a connection to the daemon and make sure it is properly closed
+        at the end of the test.
+        """
+        connection = isc.cc.session.Session(SOCKET_PATH)
+        self.__opened_connections.append(connection)
+        return connection
+
+    def test_send_direct(self):
+        """
+        Connect twice to msgq, send a message from one to another using direct
+        l-name and see it comes.
+        """
+        # Create the connections
+        conn1 = self.__get_connection()
+        conn2 = self.__get_connection()
+        # Send the message
+        lname1 = conn1.lname
+        conn2.group_sendmsg(self.__message(1), "*", to=lname1)
+        # Receive the message and see it contains correct data
+        (msg, env) = conn1.group_recvmsg(nonblock=False)
+        self.assertEqual(self.__message(1), msg)
+        # We don't check there are no extra headers, just that none are missing
+        # or wrong.
+        self.assertEqual(lname1, env[CC_HEADER_TO])
+        self.assertEqual(conn2.lname, env[CC_HEADER_FROM])
+        self.assertEqual("*", env[CC_HEADER_GROUP])
+        self.assertEqual(CC_INSTANCE_WILDCARD, env[CC_HEADER_INSTANCE])
+        self.assertEqual(CC_COMMAND_SEND, env[CC_HEADER_TYPE])
+        self.assertFalse(env[CC_HEADER_WANT_ANSWER])
+
+    def __barrier(self, connections):
+        """
+        Make sure all previous commands on all supplied connections are
+        processed, by sending a ping and waiting for an answer.
+        """
+        for c in connections:
+            c.sendmsg({"type": "ping"})
+        for c in connections:
+            pong = c.recvmsg(nonblock=False)
+            self.assertEqual(({"type": "pong"}, None), pong)
+
+    def test_send_group(self):
+        """
+        Create several connections. First, try to send a message to a (empty)
+        group and see an error is bounced back. Then subscribe the others
+        to the group and send it again. Send to a different group and see it
+        bounced back. Unsubscribe and see it is bounced again.
+
+        Then the other connections answer (after unsubscribing, strange, but
+        legal). See both answers come.
+
+        Then, look there are no more waiting messages.
+        """
+        conn_a = self.__get_connection()
+        conn_b = []
+        for i in range(0, 10):
+            conn_b.append(self.__get_connection())
+        # Send a message to empty group and get an error answer
+        seq = conn_a.group_sendmsg(self.__message(1), "group",
+                                   want_answer=True)
+        (msg, env) = conn_a.group_recvmsg(nonblock=False, seq=seq)
+        self.assertEqual(self.__no_recpt, msg)
+        self.assertEqual(conn_a.lname, env[CC_HEADER_TO])
+        # Subscribe the two connections
+        for c in conn_b:
+            c.group_subscribe("group")
+        # The subscribe doesn't wait for answer, so make sure it is
+        # all processed before continuing.
+        self.__barrier(conn_b)
+        # Send a message to the group (this time not empty)
+        seq = conn_a.group_sendmsg(self.__message(2), "group",
+                                   want_answer=True)
+        envs = []
+        for c in conn_b:
+            (msg, env) = c.group_recvmsg(nonblock=False)
+            self.assertEqual(self.__message(2), msg)
+            self.assertEqual(conn_a.lname, env[CC_HEADER_FROM])
+            # The daemon does not mangle the headers. Is it OK?
+            self.assertEqual(CC_TO_WILDCARD, env[CC_HEADER_TO])
+            self.assertEqual("group", env[CC_HEADER_GROUP])
+            self.assertEqual(CC_INSTANCE_WILDCARD, env[CC_HEADER_INSTANCE])
+            self.assertEqual(CC_COMMAND_SEND, env[CC_HEADER_TYPE])
+            self.assertTrue(env[CC_HEADER_WANT_ANSWER])
+            envs.append(env)
+        # Send to non-existing group
+        seq_ne = conn_a.group_sendmsg(self.__message(3), "no-group",
+                                      want_answer=True)
+        (msg, env) = conn_a.group_recvmsg(nonblock=False, seq=seq_ne)
+        self.assertEqual(self.__no_recpt, msg)
+        self.assertEqual(conn_a.lname, env[CC_HEADER_TO])
+        # Unsubscribe the connections
+        for c in conn_b:
+            c.group_unsubscribe("group")
+        # Synchronize the unsubscriptions
+        self.__barrier(conn_b)
+        seq_ne = conn_a.group_sendmsg(self.__message(4), "group",
+                                      want_answer=True)
+        (msg, env) = conn_a.group_recvmsg(nonblock=False, seq=seq_ne)
+        self.assertEqual(self.__no_recpt, msg)
+        self.assertEqual(conn_a.lname, env[CC_HEADER_TO])
+        # Send answers for the original message that was delivered
+        lnames = set()
+        for (c, env) in zip(conn_b, envs):
+            c.group_reply(env, self.__message("Reply"))
+            lnames.add(c.lname)
+        # Check the both answers come
+        while lnames:
+            # While there are still connections we didn't get the answer from
+            # (the order is not guaranteed, therefore the juggling with set)
+            (msg, env) = conn_a.group_recvmsg(nonblock=False, seq=seq)
+            self.assertEqual(self.__message("Reply"), msg)
+            lname = env[CC_HEADER_FROM]
+            self.assertTrue(lname in lnames)
+            lnames.remove(lname)
+
+        # The barrier makes the msgq process everything we sent. As the
+        # processing is single-threaded in it, any stray message would have
+        # arrived before the barrier ends.
+        self.__barrier(conn_b)
+        self.__barrier([conn_a])
+        for c in conn_b:
+            self.assertEqual((None, None), c.group_recvmsg())
+        self.assertEqual((None, None), conn_a.group_recvmsg())
+
+    def test_conn_disconn(self):
+        """
+        Keep connecting and disconnecting, checking we can still send
+        and receive messages.
+        """
+        conn = self.__get_connection()
+        conn.group_subscribe("group")
+        for i in range(0, 50):
+            new = self.__get_connection()
+            new.group_subscribe("group")
+            self.__barrier([conn, new])
+            new.group_sendmsg(self.__message(i), "group")
+            (msg, env) = conn.group_recvmsg(nonblock=False)
+            self.assertEqual(self.__message(i), msg)
+            conn.close()
+            conn = new
+
+if __name__ == '__main__':
+    isc.log.init("msgq-tests")
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()

+ 3 - 3
src/bin/msgq/tests/msgq_test.py

@@ -186,7 +186,7 @@ class MsgQTest(unittest.TestCase):
         The test is not exhaustive as it doesn't test all combination
         of existence of the recipient, addressing schemes, want_answer
         header and the reply header. It is not needed, these should
-        be mostly independant. That means, for example, if the message
+        be mostly independent. That means, for example, if the message
         is a reply and there's no recipient to send it to, the error
         would not be generated no matter if we addressed the recipient
         by lname or group. If we included everything, the test would
@@ -338,7 +338,7 @@ class BadSocket:
         self.send_exception = send_exception
 
     # completely wrap all calls and member access
-    # (except explicitely overridden ones)
+    # (except explicitly overridden ones)
     def __getattr__(self, name, *args):
         attr = getattr(self.socket, name)
         if isinstance(attr, collections.Callable):
@@ -834,7 +834,7 @@ class SocketTests(unittest.TestCase):
         self.assertIsNone(self.__killed_socket)
 
     def test_send_data_interrupt(self):
-        '''send() is interruptted. send_data() returns 0, sock isn't killed.'''
+        '''send() is interrupted. send_data() returns 0, sock isn't killed.'''
         expected_blockings = []
         for eno in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]:
             self.__sock_error.errno = eno

+ 1 - 1
src/bin/resolver/Makefile.am

@@ -1,4 +1,4 @@
-SUBDIRS = . tests
+SUBDIRS = . tests bench
 
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/bin -I$(top_builddir)/src/bin

+ 25 - 0
src/bin/resolver/bench/Makefile.am

@@ -0,0 +1,25 @@
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/bin
+AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_builddir)/src/bin/resolver
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+noinst_PROGRAMS = resolver-bench
+
+resolver_bench_SOURCES = main.cc
+resolver_bench_SOURCES += fake_resolution.h fake_resolution.cc
+resolver_bench_SOURCES += dummy_work.h dummy_work.cc
+resolver_bench_SOURCES += naive_resolver.h naive_resolver.cc
+
+resolver_bench_LDADD  = $(GTEST_LDADD)
+resolver_bench_LDADD += $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
+resolver_bench_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
+

+ 28 - 0
src/bin/resolver/bench/dummy_work.cc

@@ -0,0 +1,28 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <resolver/bench/dummy_work.h>
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+void
+dummy_work() {
+    // Function left intentonally blank.
+};
+
+}
+}
+}

+ 36 - 0
src/bin/resolver/bench/dummy_work.h

@@ -0,0 +1,36 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DUMMY_WORK_H
+#define DUMMY_WORK_H
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+/// \brief An empty function.
+///
+/// An empty function, to fill the CPU with something during the benchmark.
+/// It is expected to be called many times by whatever simulates doing some
+/// real CPU-bound work.
+///
+/// It is defined in separate translation unit, so the compiler does not
+/// know it is empty and can't optimise the call out.
+void dummy_work();
+
+}
+}
+}
+
+#endif

+ 172 - 0
src/bin/resolver/bench/fake_resolution.cc

@@ -0,0 +1,172 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <resolver/bench/fake_resolution.h>
+#include <resolver/bench/dummy_work.h>
+
+#include <asiolink/interval_timer.h>
+
+#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+#include <algorithm>
+#include <stdlib.h> // not cstdlib, which doesn't officially have random()
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+// Parameters of the generated queries.
+// How much work is each operation?
+const size_t parse_size = 100000;
+const size_t render_size = 100000;
+const size_t send_size = 1000;
+const size_t cache_read_size = 10000;
+const size_t cache_write_size = 10000;
+// How large a change is to terminate in this iteration (either by getting
+// the complete answer, or by finding it in the cache). With 0.5, half the
+// queries are found in the cache directly. Half of the rest needs just one
+// upstream query. Etc.
+const float chance_complete = 0.5;
+// Number of milliseconds an upstream query can take. It picks a random number
+// in between.
+const size_t upstream_time_min = 2;
+const size_t upstream_time_max = 50;
+
+FakeQuery::FakeQuery(FakeInterface& interface) :
+    interface_(&interface),
+    outstanding_(false)
+{
+    // Schedule what tasks are needed.
+    // First, parse the query
+    steps_.push_back(Step(Compute, parse_size));
+    // Look into the cache if it is there
+    steps_.push_back(Step(CacheRead, cache_read_size));
+    while ((1.0 * random()) / RAND_MAX > chance_complete) {
+        // Needs another step of recursion. Render the upstream query.
+        steps_.push_back(Step(Compute, render_size));
+        // Send it and wait for the answer.
+        steps_.push_back(Step(Upstream, upstream_time_min +
+                              (random() *
+                               (upstream_time_max - upstream_time_min) /
+                               RAND_MAX)));
+        // After it comes, parse the answer and store it in the cache.
+        steps_.push_back(Step(Compute, parse_size));
+        steps_.push_back(Step(CacheWrite, cache_write_size));
+    }
+    // Last, render the answer and send it.
+    steps_.push_back(Step(Compute, render_size));
+    steps_.push_back(Step(Send, send_size));
+    // Reverse it, so we can pop_back the tasks as we work on them.
+    std::reverse(steps_.begin(), steps_.end());
+}
+
+void
+FakeQuery::performTask(const StepCallback& callback) {
+    // nextTask also does all the sanity checking we need.
+    if (nextTask() == Upstream) {
+        outstanding_ = true;
+        interface_->scheduleUpstreamAnswer(this, callback,
+                                           steps_.back().second);
+        steps_.pop_back();
+    } else {
+        for (size_t i = 0; i < steps_.back().second; ++i) {
+            dummy_work();
+        }
+        steps_.pop_back();
+        callback();
+    }
+}
+
+FakeInterface::FakeInterface(size_t query_count) :
+    queries_(query_count)
+{
+    BOOST_FOREACH(FakeQueryPtr& query, queries_) {
+        query = FakeQueryPtr(new FakeQuery(*this));
+    }
+}
+
+void
+FakeInterface::processEvents() {
+    service_.run_one();
+}
+
+namespace {
+
+void
+processDone(bool* flag) {
+    *flag = true;
+}
+
+}
+
+FakeQueryPtr
+FakeInterface::receiveQuery() {
+    // Handle all the events that are already scheduled.
+    // As processEvents blocks until an event happens and we want to terminate
+    // if there are no events, we do a small trick. We post an event to the end
+    // of the queue and work until it is found. This should process all the
+    // events that were there already.
+    bool processed = false;
+    service_.post(boost::bind(&processDone, &processed));
+    while (!processed) {
+        processEvents();
+    }
+
+    // Now, look if there are more queries to return.
+    if (queries_.empty()) {
+        return (FakeQueryPtr());
+    } else {
+        // Take from the back. The order doesn't matter and it's faster from
+        // there.
+        FakeQueryPtr result(queries_.back());
+        queries_.pop_back();
+        return (result);
+    }
+}
+
+class FakeInterface::UpstreamQuery {
+public:
+    UpstreamQuery(FakeQuery* query, const FakeQuery::StepCallback& callback,
+                  const boost::shared_ptr<asiolink::IntervalTimer> timer) :
+        query_(query),
+        callback_(callback),
+        timer_(timer)
+    {}
+    void trigger() {
+        query_->answerReceived();
+        callback_();
+        // We are not needed any more.
+        delete this;
+    }
+private:
+    FakeQuery* const query_;
+    const FakeQuery::StepCallback callback_;
+    // Just to hold it alive before the callback is called.
+    const boost::shared_ptr<asiolink::IntervalTimer> timer_;
+};
+
+void
+FakeInterface::scheduleUpstreamAnswer(FakeQuery* query,
+                                      const FakeQuery::StepCallback& callback,
+                                      size_t msec)
+{
+    const boost::shared_ptr<asiolink::IntervalTimer>
+        timer(new asiolink::IntervalTimer(service_));
+    UpstreamQuery* q(new UpstreamQuery(query, callback, timer));
+    timer->setup(boost::bind(&UpstreamQuery::trigger, q), msec);
+}
+
+}
+}
+}

+ 228 - 0
src/bin/resolver/bench/fake_resolution.h

@@ -0,0 +1,228 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef FAKE_RESOLUTION_H
+#define FAKE_RESOLUTION_H
+
+#include <exceptions/exceptions.h>
+#include <asiolink/io_service.h>
+
+#include <boost/function.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <utility>
+#include <vector>
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+/// \brief The kind of task a FakeQuery might want to perform.
+///
+/// The benchmark should examine which kind of task the query needs to perform
+/// to progress forward. According to the task, some resources might need to be
+/// locked, something re-scheduled, or such.
+enum Task {
+    /// \brief Some CPU-bound computation.
+    ///
+    /// The query needs to do some computation without any shared resources.
+    /// This might be parsing or rendering of the query, verification of
+    /// signatures, etc.
+    Compute,
+    /// \brief The query needs to read data from cache.
+    CacheRead,
+    /// \brief The query needs to modify the cache.
+    CacheWrite,
+    /// \brief A response is to be sent.
+    ///
+    /// This needs to access the interface/socket. If the socket is shared
+    /// between threads, it might need to lock it.
+    Send,
+    /// \brief An answer from upstream server is needed.
+    ///
+    /// The query needs to send a query to some authoritative server and wait
+    /// for the answer. Something might need to be locked (or not, depending
+    /// on the architecture of the thing that sends and receives). Also, the
+    /// task will not complete immediately, the callback of performTask
+    /// will be called at later time.
+    Upstream
+};
+
+class FakeInterface;
+
+/// \brief Imitation of the work done to resolve a query.
+///
+/// An object of this class represents some fake work that should look like
+/// the work needed to perform resolution of one query. No real work is done,
+/// but several steps are scheduled, with characteristics hopefully
+/// corresponding to steps of the real query.
+///
+/// The idea is that benchmark will repeatedly check if the query is done.
+/// If not, it examines the next task by calling nextTask(). Depending on
+/// the result, it'd lock or prepare any shared resources. After that, it'd
+/// call performTask() to do the task. Once the query calls the callback
+/// passed, it can proceed to the next step.
+///
+/// See naive_resolver.cc for example code how this could be done.
+class FakeQuery {
+private:
+    // The queries come only through an interface. Don't let others create.
+    friend class FakeInterface;
+    /// \brief Constructor
+    FakeQuery(FakeInterface& interface);
+public:
+    /// \brief Is work on the query completely done?
+    ///
+    /// If this returns true, do not call performTask or nextTask any more.
+    /// The resolution is done.
+    ///
+    /// \throw isc::InvalidOperation if upstream query is still in progress.
+    bool done() const {
+        if (outstanding_) {
+            isc_throw(isc::InvalidOperation, "Upstream query outstanding");
+        }
+        return (steps_.empty());
+    }
+    /// \brief Callback to signify a task has been performed.
+    typedef boost::function<void()> StepCallback;
+    /// \brief Perform next step in the resolution.
+    ///
+    /// Do whatever is needed to be done for the next step of resolution.
+    /// Once the step is done, the callback is called.
+    ///
+    /// The callback is usually called from within this call. However, in
+    /// the case when the nextTask() returned `Upstream`, the call to the
+    /// callback is delayed for some period of time after the method
+    /// returns.
+    ///
+    /// \throw isc::InvalidOperation if it is called when done() is true, or
+    ///     if an upstream query is still in progress (performTask was called
+    ///     before and the callback was not called by the query yet).
+    void performTask(const StepCallback& callback);
+    /// \brief Examine the kind of the next resolution process.
+    ///
+    /// Call this to know what kind of task will performTask do next.
+    ///
+    /// \throw isc::InvalidOperation if it is called when done() is true, or
+    ///     if an upstream query is still in progress (performTask was called
+    ///     before and the callback was not called by the query yet).
+    Task nextTask() const {
+        // Will check for outstanding_ internally too
+        if (done()) {
+            isc_throw(isc::InvalidOperation, "We are done, no more tasks");
+        }
+        return (steps_.back().first);
+    }
+    /// \brief Move network communication to different interface.
+    ///
+    /// By default, a query does all the "communication" on the interface
+    /// it was born on. This may be used to move a query from one interface
+    /// to another.
+    ///
+    /// You don't have to lock either of the interfaces to do so, this
+    /// only switches the data in the query.
+    ///
+    /// \throw isc::InvalidOperation if it is called while an upstream query
+    ///     is in progress.
+    void migrateTo(FakeInterface& dst_interface) {
+        if (outstanding_) {
+            isc_throw(isc::InvalidOperation,
+                      "Can't migrate in the middle of query");
+        }
+        interface_ = &dst_interface;
+    }
+    /// \brief The answer for upstream query was received
+    ///
+    /// This should be called from within the FakeInterface only.
+    /// It marks that the query from upstream was answered.
+    void answerReceived() {
+        outstanding_ = false;
+    }
+private:
+    // The scheduled steps for this task.
+    typedef std::pair<Task, size_t> Step;
+    // The scheduled steps. Reversed (first to be done at the end), so we can
+    // pop_back() the completed steps.
+    std::vector<Step> steps_;
+    // The interface to schedule timeouts on.
+    FakeInterface* interface_;
+    // Is an upstream query outstanding?
+    bool outstanding_;
+};
+
+typedef boost::shared_ptr<FakeQuery> FakeQueryPtr;
+
+/// \brief An imitation of interface for receiving queries.
+///
+/// This is effectively a little bit smarter factory for queries. You can
+/// request a new query from it, or let process events (incoming answers).
+///
+/// It contains its own event loop. If the benchmark has more threads, have
+/// one in each of the threads (if the threads ever handles network
+/// communication -- if it accepts queries, sends answers or does upstream
+/// queries).
+///
+/// If the model simulated would share the same interface between multiple
+/// threads, it is better to have one in each thread as well, but lock
+/// access to receiveQuery() so only one is used at once (no idea what happens
+/// if ASIO loop is accessed from multiple threads).
+///
+/// Note that the creation of the queries is not thread safe (due to
+/// the random() function inside). The interface generates all its queries
+/// in advance, on creation time. But you need to create all the needed
+/// interfaces from single thread and then distribute them to your threads.
+class FakeInterface {
+public:
+    /// \brief Constructor
+    ///
+    /// Initiarile the interface and create query_count queries for the
+    /// benchmark. They will be handed out one by one with receiveQuery().
+    FakeInterface(size_t query_count);
+    /// \brief Wait for answers from upstream servers.
+    ///
+    /// Wait until at least one "answer" comes from the remote server. This
+    /// will effectively block the calling thread until it is time to call
+    /// a callback of performTask.
+    ///
+    /// It is not legal to call it without any outstanding upstream queries
+    /// on this interface. However, the situation is not explicitly checked.
+    ///
+    /// \note Due to internal implementation, it is not impossible no or more
+    ///    than one callbacks to be called from within this method.
+    void processEvents();
+    /// \brief Accept another query.
+    ///
+    /// Generate a new fake query to resolve.
+    ///
+    /// This method might call callbacks of other queries waiting for upstream
+    /// answer.
+    ///
+    /// This returns a NULL pointer when there are no more queries to answer
+    /// (the number designated for the benchmark was reached).
+    FakeQueryPtr receiveQuery();
+private:
+    class UpstreamQuery;
+    friend class FakeQuery;
+    void scheduleUpstreamAnswer(FakeQuery* query,
+                                const FakeQuery::StepCallback& callback,
+                                size_t msec);
+    asiolink::IOService service_;
+    std::vector<FakeQueryPtr> queries_;
+};
+
+}
+}
+}
+
+#endif

+ 27 - 0
src/bin/resolver/bench/main.cc

@@ -0,0 +1,27 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <resolver/bench/naive_resolver.h>
+
+#include <bench/benchmark.h>
+
+const size_t count = 1000; // TODO: We may want to read this from argv.
+
+int main(int, const char**) {
+    // Run the naive implementation
+    isc::resolver::bench::NaiveResolver naive_resolver(count);
+    isc::bench::BenchMark<isc::resolver::bench::NaiveResolver>
+        (1, naive_resolver, true);
+    return 0;
+}

+ 66 - 0
src/bin/resolver/bench/naive_resolver.cc

@@ -0,0 +1,66 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <resolver/bench/naive_resolver.h>
+
+#include <cassert>
+#include <boost/bind.hpp>
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+NaiveResolver::NaiveResolver(size_t query_count) :
+    interface_(query_count),
+    processed_(false)
+{}
+
+namespace {
+
+void
+stepDone(bool* flag) {
+    *flag = true;
+}
+
+}
+
+size_t
+NaiveResolver::run() {
+    assert(!processed_);
+    size_t count = 0;
+    FakeQueryPtr query;
+    // Process a query at a time. As the previous is already handled, the
+    // receiveQuery may never trigger other events.
+    while ((query = interface_.receiveQuery())) {
+        // Handle each step
+        while (!query->done()) {
+            bool done = false; // This step is not yet done.
+            // If there were more queries/threads/whatever, we would examine
+            // the query->nextTask() and lock or prepare resources accordingly.
+            // But as there's just one, we simply do the task, without caring.
+            query->performTask(boost::bind(&stepDone, &done));
+            // We may need to wait for the upstream query.
+            while (!done) {
+                interface_.processEvents();
+            }
+        }
+        count ++;
+    }
+    processed_ = true;
+    return (count);
+}
+
+}
+}
+}

+ 44 - 0
src/bin/resolver/bench/naive_resolver.h

@@ -0,0 +1,44 @@
+// Copyright (C) 2013  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef RESOLVER_BENCH_NAIVE_H
+#define RESOLVER_BENCH_NAIVE_H
+
+#include <resolver/bench/fake_resolution.h>
+
+namespace isc {
+namespace resolver {
+namespace bench {
+
+/// \brief Naive implementation of resolver for the benchmark
+///
+/// This is here mostly to show how to implement the other benchmark
+/// implementations. Look at the code inside how to use the fake
+/// resolution.
+class NaiveResolver {
+public:
+    /// \brief Constructor. Initializes the data.
+    NaiveResolver(size_t query_count);
+    /// \brief Run the resolution.
+    size_t run();
+private:
+    FakeInterface interface_;
+    bool processed_;
+};
+
+}
+}
+}
+
+#endif

+ 1 - 1
src/bin/sockcreator/README

@@ -35,7 +35,7 @@ must be a socket, not pipe.
   The answer to this is either 'S' directly followed by the socket (using
   sendmsg) if it is successful. If it fails, 'E' is returned instead, followed
   by either 'S' or 'B' (either socket() or bind() call failed). Then there is
-  one int (architecture-dependent length and endianess), which is the errno
+  one int (architecture-dependent length and endianness), which is the errno
   value after the failure.
 
 The creator may also send these messages at any time (but not in the middle

+ 1 - 1
src/bin/sockcreator/sockcreator.h

@@ -85,7 +85,7 @@ typedef int (*close_t)(int);
 /// \param type The type of socket to create (SOCK_STREAM, SOCK_DGRAM, etc).
 /// \param bind_addr The address to bind.
 /// \param addr_len The actual length of bind_addr.
-/// \param close_fun The furction used to close a socket if there's an error
+/// \param close_fun The function used to close a socket if there's an error
 ///     after the creation.
 ///
 /// \return The file descriptor of the newly created socket, if everything

+ 1 - 1
src/bin/stats/stats.py.in

@@ -194,7 +194,7 @@ class Stats:
         '''Constructor
 
         module_ccsession_class is parameterized so that test can specify
-        a mocked class to test the behavior without involing network I/O.
+        a mocked class to test the behavior without involving network I/O.
         In other cases this parameter shouldn't be specified.
 
         '''

+ 6 - 5
src/bin/stats/stats_httpd.py.in

@@ -35,6 +35,7 @@ import re
 import isc.cc
 import isc.config
 import isc.util.process
+from isc.util.address_formatter import AddressFormatter
 
 import isc.log
 from isc.log_messages.stats_httpd_messages import *
@@ -325,8 +326,8 @@ class StatsHttpd:
                 server_address, HttpHandler,
                 self.xml_handler, self.xsd_handler, self.xsl_handler,
                 self.write_log)
-            logger.info(STATSHTTPD_STARTED, server_address[0],
-                        server_address[1])
+            logger.info(STATSHTTPD_STARTED,
+                        AddressFormatter(server_address, address_family))
             return httpd
         except (socket.gaierror, socket.error,
                 OverflowError, TypeError) as err:
@@ -341,8 +342,8 @@ class StatsHttpd:
         """Closes sockets for HTTP"""
         while len(self.httpd)>0:
             ht = self.httpd.pop()
-            logger.info(STATSHTTPD_CLOSING, ht.server_address[0],
-                        ht.server_address[1])
+            logger.info(STATSHTTPD_CLOSING,
+                        AddressFormatter(ht.server_address))
             ht.server_close()
 
     def start(self):
@@ -406,7 +407,7 @@ class StatsHttpd:
         old_config = self.config.copy()
         self.load_config(new_config)
         # If the http sockets aren't opened or
-        # if new_config doesn't have'listen_on', it returns
+        # if new_config doesn't have 'listen_on', it returns
         if len(self.httpd) == 0 or 'listen_on' not in new_config:
             return isc.config.ccsession.create_answer(0)
         self.close_httpd()

+ 2 - 2
src/bin/stats/stats_httpd_messages.mes

@@ -24,7 +24,7 @@ The stats-httpd module was unable to connect to the BIND 10 command
 and control bus. A likely problem is that the message bus daemon
 (b10-msgq) is not running. The stats-httpd module will now shut down.
 
-% STATSHTTPD_CLOSING closing %1#%2
+% STATSHTTPD_CLOSING closing %1
 The stats-httpd daemon will stop listening for requests on the given
 address and port number.
 
@@ -80,7 +80,7 @@ and an error is sent back.
 % STATSHTTPD_SHUTDOWN shutting down
 The stats-httpd daemon is shutting down.
 
-% STATSHTTPD_STARTED listening on %1#%2
+% STATSHTTPD_STARTED listening on %1
 The stats-httpd daemon will now start listening for requests on the
 given address and port number.
 

File diff suppressed because it is too large
+ 2 - 2
src/bin/stats/tests/Makefile.am


+ 137 - 92
src/bin/stats/tests/b10-stats-httpd_test.py

@@ -46,10 +46,10 @@ import isc
 import isc.log
 import stats_httpd
 import stats
-from test_utils import BaseModules, ThreadingServerManager, MyStats,\
-                       MyStatsHttpd, SignalHandler,\
-                       send_command, CONST_BASETIME
+from test_utils import ThreadingServerManager, SignalHandler, \
+    MyStatsHttpd, CONST_BASETIME
 from isc.testutils.ccsession_mock import MockModuleCCSession
+from isc.config import RPCRecipientMissing, RPCError
 
 # This test suite uses xml.etree.ElementTree.XMLParser via
 # xml.etree.ElementTree.parse. On the platform where expat isn't
@@ -104,6 +104,11 @@ DUMMY_DATA = {
         }
     }
 
+# Bad practice: this should be localized
+stats._BASETIME = CONST_BASETIME
+stats.get_timestamp = lambda: time.mktime(CONST_BASETIME)
+stats.get_datetime = lambda x=None: time.strftime("%Y-%m-%dT%H:%M:%SZ", CONST_BASETIME)
+
 def get_availaddr(address='127.0.0.1', port=8001):
     """returns a tuple of address and port which is available to
     listen on the platform. The first argument is a address for
@@ -230,13 +235,11 @@ class TestHttpHandler(unittest.TestCase):
     def setUp(self):
         # set the signal handler for deadlock
         self.sig_handler = SignalHandler(self.fail)
-        self.base = BaseModules()
-        self.stats_server = ThreadingServerManager(MyStats)
-        self.stats = self.stats_server.server
-        DUMMY_DATA['Stats']['lname'] = self.stats.cc_session.lname
-        self.stats_server.run()
+        DUMMY_DATA['Stats']['lname'] = 'test-lname'
         (self.address, self.port) = get_availaddr()
-        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd,
+                                                         (self.address,
+                                                          self.port))
         self.stats_httpd = self.stats_httpd_server.server
         self.stats_httpd_server.run()
         self.client = http.client.HTTPConnection(self.address, self.port)
@@ -245,13 +248,9 @@ class TestHttpHandler(unittest.TestCase):
 
     def tearDown(self):
         self.client.close()
-        self.stats_httpd_server.shutdown()
-        self.stats_server.shutdown()
-        self.base.shutdown()
         # reset the signal handler
         self.sig_handler.reset()
 
-    @unittest.skipIf(sys.version_info >= (3, 3), "Unsupported in Python 3.3 or higher")
     @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
     def test_do_GET(self):
         self.assertTrue(type(self.stats_httpd.httpd) is list)
@@ -456,15 +455,10 @@ class TestHttpHandler(unittest.TestCase):
         self.assertEqual(response.status, 404)
 
     def test_do_GET_failed1(self):
-        # checks status
-        self.assertEqual(send_command("status", "Stats"),
-                         (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
-        # failure case(Stats is down)
-        self.assertTrue(self.stats.running)
-        self.assertEqual(send_command("shutdown", "Stats"),
-                         (0, None)) # Stats is down
-        self.assertFalse(self.stats.running)
-        self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+        # failure case (Stats is down, so rpc_call() results in an exception)
+        # Note: this should eventually be RPCRecipientMissing.
+        self.stats_httpd._rpc_answers.append(
+            isc.cc.session.SessionTimeout('timeout'))
 
         # request XML
         self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
@@ -486,10 +480,8 @@ class TestHttpHandler(unittest.TestCase):
 
     def test_do_GET_failed2(self):
         # failure case(Stats replies an error)
-        self.stats.mccs.set_command_handler(
-            lambda cmd, args: \
-                isc.config.ccsession.create_answer(1, "specified arguments are incorrect: I have an error.")
-            )
+        self.stats_httpd._rpc_answers.append(
+            RPCError(1, "specified arguments are incorrect: I have an error."))
 
         # request XML
         self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
@@ -498,12 +490,16 @@ class TestHttpHandler(unittest.TestCase):
         self.assertEqual(response.status, 404)
 
         # request XSD
+        self.stats_httpd._rpc_answers.append(
+            RPCError(1, "specified arguments are incorrect: I have an error."))
         self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
         self.client.endheaders()
         response = self.client.getresponse()
         self.assertEqual(response.status, 200)
 
         # request XSL
+        self.stats_httpd._rpc_answers.append(
+            RPCError(1, "specified arguments are incorrect: I have an error."))
         self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
         self.client.endheaders()
         response = self.client.getresponse()
@@ -567,12 +563,10 @@ class TestHttpServer(unittest.TestCase):
     def setUp(self):
         # set the signal handler for deadlock
         self.sig_handler = SignalHandler(self.fail)
-        self.base = BaseModules()
 
     def tearDown(self):
         if hasattr(self, "stats_httpd"):
             self.stats_httpd.stop()
-        self.base.shutdown()
         # reset the signal handler
         self.sig_handler.reset()
 
@@ -604,9 +598,6 @@ class TestStatsHttpd(unittest.TestCase):
     def setUp(self):
         # set the signal handler for deadlock
         self.sig_handler = SignalHandler(self.fail)
-        self.base = BaseModules()
-        self.stats_server = ThreadingServerManager(MyStats)
-        self.stats_server.run()
         # checking IPv6 enabled on this platform
         self.ipv6_enabled = is_ipv6_enabled()
         # instantiation of StatsHttpd indirectly calls gethostbyaddr(), which
@@ -617,71 +608,80 @@ class TestStatsHttpd(unittest.TestCase):
         self.__gethostbyaddr_orig = socket.gethostbyaddr
         socket.gethostbyaddr = lambda x: ('test.example.', [], None)
 
+        # Some tests replace this library function.  Keep the original for
+        # restor
+        self.__orig_select_select = select.select
+
     def tearDown(self):
         socket.gethostbyaddr = self.__gethostbyaddr_orig
         if hasattr(self, "stats_httpd"):
             self.stats_httpd.stop()
-        self.stats_server.shutdown()
-        self.base.shutdown()
         # reset the signal handler
         self.sig_handler.reset()
 
+        # restore original of replaced library
+        select.select = self.__orig_select_select
+
     def test_init(self):
         server_address = get_availaddr()
         self.stats_httpd = MyStatsHttpd(server_address)
         self.assertEqual(self.stats_httpd.running, False)
         self.assertEqual(self.stats_httpd.poll_intval, 0.5)
         self.assertNotEqual(len(self.stats_httpd.httpd), 0)
-        self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
-        self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
-        self.assertEqual(len(self.stats_httpd.config), 2)
+        self.assertIsNotNone(self.stats_httpd.mccs)
+        self.assertIsNotNone(self.stats_httpd.cc_session)
+        # The real CfgMgr would return 'version', but our test mock omits it,
+        # so the len(config) should be 1
+        self.assertEqual(len(self.stats_httpd.config), 1)
         self.assertTrue('listen_on' in self.stats_httpd.config)
         self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
         self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
         self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
         self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
-        ans = send_command(
-            isc.config.ccsession.COMMAND_GET_MODULE_SPEC,
-            "ConfigManager", {"module_name":"StatsHttpd"})
-        # assert StatsHttpd is added to ConfigManager
-        self.assertNotEqual(ans, (0,{}))
-        self.assertTrue(ans[1]['module_name'], 'StatsHttpd')
+        self.assertEqual('StatsHttpd', self.stats_httpd.mccs.\
+                             get_module_spec().get_module_name())
 
     def test_init_hterr(self):
-        orig_open_httpd = stats_httpd.StatsHttpd.open_httpd
-        def err_open_httpd(arg): raise stats_httpd.HttpServerError
-        stats_httpd.StatsHttpd.open_httpd = err_open_httpd
-        self.assertRaises(stats_httpd.HttpServerError, stats_httpd.StatsHttpd)
-        ans = send_command(
-            isc.config.ccsession.COMMAND_GET_MODULE_SPEC,
-            "ConfigManager", {"module_name":"StatsHttpd"})
-        # assert StatsHttpd is removed from ConfigManager
-        self.assertEqual(ans, (0,{}))
-        stats_httpd.StatsHttpd.open_httpd = orig_open_httpd
+        """Test the behavior of StatsHttpd constructor when open_httpd fails.
+
+        We specifically check the following two:
+        - close_mccs() is called (so stats-httpd tells ConfigMgr it's shutting
+          down)
+        - the constructor results in HttpServerError exception.
+
+        """
+        self.__mccs_closed = False
+        def call_checker():
+            self.__mccs_closed = True
+        class FailingStatsHttpd(MyStatsHttpd):
+            def open_httpd(self):
+                raise stats_httpd.HttpServerError
+            def close_mccs(self):
+                call_checker()
+        self.assertRaises(stats_httpd.HttpServerError, FailingStatsHttpd)
+        self.assertTrue(self.__mccs_closed)
 
     def test_openclose_mccs(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
-        mccs = MockModuleCCSession()
-        self.stats_httpd.mccs = mccs
+        mccs = self.stats_httpd.mccs
         self.assertFalse(self.stats_httpd.mccs.stopped)
         self.assertFalse(self.stats_httpd.mccs.closed)
         self.stats_httpd.close_mccs()
         self.assertTrue(mccs.stopped)
         self.assertTrue(mccs.closed)
-        self.assertEqual(self.stats_httpd.mccs, None)
+        self.assertIsNone(self.stats_httpd.mccs)
         self.stats_httpd.open_mccs()
         self.assertIsNotNone(self.stats_httpd.mccs)
         self.stats_httpd.mccs = None
-        self.assertEqual(self.stats_httpd.mccs, None)
-        self.assertEqual(self.stats_httpd.close_mccs(), None)
+        self.assertIsNone(self.stats_httpd.mccs)
+        self.assertIsNone(self.stats_httpd.close_mccs())
 
     def test_mccs(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
         self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
         self.assertTrue(
             isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
-        self.assertTrue(
-            isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
+        self.assertIsNotNone(self.stats_httpd.cc_session)
         statistics_spec = self.stats_httpd.get_stats_spec()
         for mod in DUMMY_DATA:
             self.assertTrue(mod in statistics_spec)
@@ -699,8 +699,11 @@ class TestStatsHttpd(unittest.TestCase):
             self.stats_httpd = MyStatsHttpd(*server_addresses)
             for ht in self.stats_httpd.httpd:
                 self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
-                self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+                self.assertTrue(ht.address_family in set([socket.AF_INET,
+                                                          socket.AF_INET6]))
                 self.assertTrue(isinstance(ht.socket, socket.socket))
+                ht.socket.close() # to silence warning about resource leak
+            self.stats_httpd.close_mccs() # ditto
 
         # dual stack (address is ipv6)
         if self.ipv6_enabled:
@@ -710,6 +713,8 @@ class TestStatsHttpd(unittest.TestCase):
                 self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
                 self.assertEqual(ht.address_family, socket.AF_INET6)
                 self.assertTrue(isinstance(ht.socket, socket.socket))
+                ht.socket.close()
+            self.stats_httpd.close_mccs() # ditto
 
         # dual/single stack (address is ipv4)
         server_addresses = get_availaddr()
@@ -718,6 +723,8 @@ class TestStatsHttpd(unittest.TestCase):
             self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
             self.assertEqual(ht.address_family, socket.AF_INET)
             self.assertTrue(isinstance(ht.socket, socket.socket))
+            ht.socket.close()
+        self.stats_httpd.close_mccs()
 
     def test_httpd_anyIPv4(self):
         # any address (IPv4)
@@ -744,39 +751,69 @@ class TestStatsHttpd(unittest.TestCase):
                           get_availaddr(address='localhost'))
 
         # nonexistent hostname
-        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          ('my.host.domain', 8000))
 
         # over flow of port number
-        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          ('127.0.0.1', 80000))
 
         # negative
-        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          ('127.0.0.1', -8000))
 
         # alphabet
-        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          ('127.0.0.1', 'ABCDE'))
 
         # Address already in use
         server_addresses = get_availaddr()
-        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
-        self.stats_httpd_server.run()
-        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
-        send_command("shutdown", "StatsHttpd")
+        server = MyStatsHttpd(server_addresses)
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          server_addresses)
+
+    def __faked_select(self, ex=None):
+        """A helper subroutine for tests using faked select.select.
+
+        See test_running() for basic features.  If ex is not None,
+        it's assumed to be an exception object and will be raised on the
+        first call.
+
+        """
+        self.assertTrue(self.stats_httpd.running)
+        self.__call_count += 1
+        if ex is not None and self.__call_count == 1:
+            raise ex
+        if self.__call_count == 2:
+            self.stats_httpd.running  = False
+        assert self.__call_count <= 2 # safety net to avoid infinite loop
+        return ([], [], [])
 
     def test_running(self):
-        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
-        self.stats_httpd = self.stats_httpd_server.server
+        # Previous version of this test checks the result of "status" and
+        # "shutdown" commands; however, they are more explicitly tested
+        # in specific tests.  In this test we only have to check:
+        # - start() will set 'running' to True
+        # - as long as 'running' is True, it keeps calling select.select
+        # - when running becomes False, it exists from the loop and calls
+        #   stop()
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
         self.assertFalse(self.stats_httpd.running)
-        self.stats_httpd_server.run()
-        self.assertEqual(send_command("status", "StatsHttpd"),
-                         (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
-        self.assertTrue(self.stats_httpd.running)
-        self.assertEqual(send_command("shutdown", "StatsHttpd"), (0, None))
+
+        # In this test we'll call select.select() 2 times: on the first call
+        # stats_httpd.running should be True; on the second call the faked
+        # select() will set it to False.
+        self.__call_count = 0
+        select.select = lambda r, w, x, t: self.__faked_select()
+        self.stats_httpd.start()
         self.assertFalse(self.stats_httpd.running)
-        self.stats_httpd_server.shutdown()
+        self.assertIsNone(self.stats_httpd.mccs) # stop() clears .mccs
 
-        # failure case
+    def test_running_fail(self):
+        # A failure case of start(): we close the (real but dummy) socket for
+        # the CC session.  This breaks the select-loop due to exception
         self.stats_httpd = MyStatsHttpd(get_availaddr())
-        self.stats_httpd.cc_session.close()
+        self.stats_httpd.mccs.get_socket().close()
         self.assertRaises(ValueError, self.stats_httpd.start)
 
     def test_failure_with_a_select_error (self):
@@ -784,28 +821,26 @@ class TestStatsHttpd(unittest.TestCase):
         errno.EINTR is raised while it's selecting"""
         def raise_select_except(*args):
             raise select.error('dummy error')
-        orig_select = stats_httpd.select.select
-        stats_httpd.select.select = raise_select_except
+        select.select = raise_select_except
         self.stats_httpd = MyStatsHttpd(get_availaddr())
         self.assertRaises(select.error, self.stats_httpd.start)
-        stats_httpd.select.select = orig_select
 
     def test_nofailure_with_errno_EINTR(self):
         """checks no exception is raised if errno.EINTR is raised
         while it's selecting"""
-        def raise_select_except(*args):
-            raise select.error(errno.EINTR)
-        orig_select = stats_httpd.select.select
-        stats_httpd.select.select = raise_select_except
-        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
-        self.stats_httpd_server.run()
-        self.stats_httpd_server.shutdown()
-        stats_httpd.select.select = orig_select
+        self.__call_count = 0
+        select.select = lambda r, w, x, t: self.__faked_select(
+            select.error(errno.EINTR))
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.start() # shouldn't leak the exception
+        self.assertFalse(self.stats_httpd.running)
+        self.assertIsNone(self.stats_httpd.mccs)
 
     def test_open_template(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
         # successful conditions
-        tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
+        tmpl = self.stats_httpd.open_template(
+            stats_httpd.XML_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
         opts = dict(
             xml_string="<dummy></dummy>",
@@ -813,13 +848,15 @@ class TestStatsHttpd(unittest.TestCase):
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertGreater(lines.find(opts[n]), 0)
-        tmpl = self.stats_httpd.open_template(stats_httpd.XSD_TEMPLATE_LOCATION)
+        tmpl = self.stats_httpd.open_template(
+            stats_httpd.XSD_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
         opts = dict(xsd_namespace="http://host/path/to/")
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertGreater(lines.find(opts[n]), 0)
-        tmpl = self.stats_httpd.open_template(stats_httpd.XSL_TEMPLATE_LOCATION)
+        tmpl = self.stats_httpd.open_template(
+            stats_httpd.XSL_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
         opts = dict(xsd_namespace="http://host/path/to/")
         lines = tmpl.substitute(opts)
@@ -1067,7 +1104,15 @@ class TestStatsHttpd(unittest.TestCase):
         self.assertEqual('@description', stats_xsl[2].find('%sif' % nst).attrib['test'])
         self.assertEqual('@description', stats_xsl[2].find('%sif/%svalue-of' % ((nst,)*2)).attrib['select'])
 
+class Z_TestOSEnv(unittest.TestCase):
     def test_for_without_B10_FROM_SOURCE(self):
+        # Note: this test is sensitive due to its substantial side effect of
+        # reloading.  For exmaple, it affects tests that tweak module
+        # attributes (such as test_init_hterr).  It also breaks logging
+        # setting for unit tests.  To minimize these effects, we use
+        # workaround: make it very likely to run at the end of the tests
+        # by naming the test class "Z_".
+
         # just lets it go through the code without B10_FROM_SOURCE env
         # variable
         if "B10_FROM_SOURCE" in os.environ:

+ 201 - 149
src/bin/stats/tests/b10-stats_test.py

@@ -23,7 +23,6 @@ to real environment.
 
 import unittest
 import os
-import threading
 import io
 import time
 import imp
@@ -31,10 +30,7 @@ import sys
 
 import stats
 import isc.log
-import isc.cc.session
-from test_utils import BaseModules, ThreadingServerManager, MyStats, \
-    SimpleStats, SignalHandler, MyModuleCCSession, send_command
-from isc.testutils.ccsession_mock import MockModuleCCSession
+from test_utils import MyStats
 
 class TestUtilties(unittest.TestCase):
     items = [
@@ -91,9 +87,15 @@ class TestUtilties(unittest.TestCase):
         self.const_timestamp = 1308730448.965706
         self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
         self.const_datetime = '2011-06-22T08:14:08Z'
+        self.__orig_time = stats.time
+        self.__orig_gmtime = stats.gmtime
         stats.time = lambda : self.const_timestamp
         stats.gmtime = lambda : self.const_timetuple
 
+    def tearDown(self):
+        stats.time = self.__orig_time
+        stats.gmtime = self.__orig_gmtime
+
     def test_get_spec_defaults(self):
         self.assertEqual(
             stats.get_spec_defaults(self.items), {
@@ -243,8 +245,6 @@ class TestCallback(unittest.TestCase):
 class TestStats(unittest.TestCase):
     def setUp(self):
         # set the signal handler for deadlock
-        self.sig_handler = SignalHandler(self.fail)
-        self.base = BaseModules()
         self.const_timestamp = 1308730448.965706
         self.const_datetime = '2011-06-22T08:14:08Z'
         self.const_default_datetime = '1970-01-01T00:00:00Z'
@@ -253,15 +253,12 @@ class TestStats(unittest.TestCase):
         self.__orig_get_datetime = stats.get_datetime
 
     def tearDown(self):
-        self.base.shutdown()
-        # reset the signal handler
-        self.sig_handler.reset()
         # restore the stored original function in case we replaced them
         stats.get_timestamp = self.__orig_timestamp
         stats.get_datetime = self.__orig_get_datetime
 
     def test_init(self):
-        self.stats = stats.Stats()
+        self.stats = MyStats()
         self.assertEqual(self.stats.module_name, 'Stats')
         self.assertFalse(self.stats.running)
         self.assertTrue('command_show' in self.stats.callbacks)
@@ -291,7 +288,7 @@ class TestStats(unittest.TestCase):
 """
         orig_spec_location = stats.SPECFILE_LOCATION
         stats.SPECFILE_LOCATION = io.StringIO(spec_str)
-        self.assertRaises(stats.StatsError, stats.Stats)
+        self.assertRaises(stats.StatsError, MyStats)
         stats.SPECFILE_LOCATION = orig_spec_location
 
     def __send_command(self, stats, command_name, params=None):
@@ -310,13 +307,13 @@ class TestStats(unittest.TestCase):
             raise CheckException # terminate the loop
 
         # start without err
-        stats = SimpleStats()
-        self.assertFalse(stats.running)
-        stats._check_command = lambda: __check_start(stats)
+        self.stats = MyStats()
+        self.assertFalse(self.stats.running)
+        self.stats._check_command = lambda: __check_start(self.stats)
         # We are going to confirm start() will set running to True, avoiding
         # to fall into a loop with the exception trick.
-        self.assertRaises(CheckException, stats.start)
-        self.assertEqual(self.__send_command(stats, "status"),
+        self.assertRaises(CheckException, self.stats.start)
+        self.assertEqual(self.__send_command(self.stats, "status"),
                          (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
 
     def test_shutdown(self):
@@ -328,15 +325,15 @@ class TestStats(unittest.TestCase):
             # override get_interval() so it won't go poll statistics
             tested_stats.get_interval = lambda : 0
 
-        stats = SimpleStats()
-        stats._check_command = lambda: __check_shutdown(stats)
-        stats.start()
-        self.assertTrue(stats.mccs.stopped)
+        self.stats = MyStats()
+        self.stats._check_command = lambda: __check_shutdown(self.stats)
+        self.stats.start()
+        self.assertTrue(self.stats.mccs.stopped)
 
     def test_handlers(self):
         """Test command_handler"""
 
-        __stats = SimpleStats()
+        __stats = MyStats()
 
         # 'show' command.  We're going to check the expected methods are
         # called in the expected order, and check the resulting response.
@@ -376,7 +373,7 @@ class TestStats(unittest.TestCase):
                              'report_time': 42})),
                           ('update_module', ())], call_log)
 
-        # Then update faked timestamp so the intial polling will happen, and
+        # Then update faked timestamp so the initial polling will happen, and
         # confirm that.
         call_log = []
         stats.get_timestamp = lambda: 10
@@ -433,7 +430,7 @@ class TestStats(unittest.TestCase):
                         }]}
             return answer_value
 
-        self.stats = SimpleStats()
+        self.stats = MyStats()
         self.stats.cc_session.rpc_call = __check_rpc_call
 
         self.stats.update_modules()
@@ -480,7 +477,7 @@ class TestStats(unittest.TestCase):
         where we set the expected data in statistics_data.
 
         """
-        self.stats = SimpleStats()
+        self.stats = MyStats()
         def __faked_update_modules():
             self.stats.statistics_data = { \
                 'Stats': {
@@ -539,7 +536,7 @@ class TestStats(unittest.TestCase):
 
     def test_update_statistics_data(self):
         """test for list-type statistics"""
-        self.stats = SimpleStats()
+        self.stats = MyStats()
         _test_exp1 = {
               'zonename': 'test1.example',
               'queries.tcp': 5,
@@ -616,7 +613,7 @@ class TestStats(unittest.TestCase):
 
     def test_update_statistics_data_pt2(self):
         """test for named_set-type statistics"""
-        self.stats = SimpleStats()
+        self.stats = MyStats()
         _test_exp1 = \
             { 'test10.example': { 'queries.tcp': 5, 'queries.udp': 4 } }
         _test_exp2 = \
@@ -686,7 +683,7 @@ class TestStats(unittest.TestCase):
                 'Foo', 'foo1', _test_exp6), ['unknown module name: Foo'])
 
     def test_update_statistics_data_withmid(self):
-        self.stats = SimpleStats()
+        self.stats = MyStats()
 
         # This test relies on existing statistics data at the Stats object.
         # This version of test prepares the data using the do_polling() method;
@@ -701,7 +698,7 @@ class TestStats(unittest.TestCase):
         # We use the knowledge of what kind of messages are sent via
         # do_polling, and return the following faked answer directly.
         create_answer = isc.config.ccsession.create_answer # shortcut
-        self.stats._answers = [\
+        self.stats._answers = [
             # Answer for "show_processes"
             (create_answer(0, [[1034, 'b10-auth-1', 'Auth'],
                                [1035, 'b10-auth-2', 'Auth']]),  None),
@@ -754,7 +751,6 @@ class TestStats(unittest.TestCase):
         self.assertEqual(self.stats.statistics_data_bymid['Auth']['bar2@foo'],
                          {'queries.tcp': bar2_tcp})
         # kill running Auth but the statistics data doesn't change
-        self.base.auth2.server.shutdown()
         self.stats.update_statistics_data()
         self.assertTrue('Auth' in self.stats.statistics_data)
         self.assertTrue('queries.tcp' in self.stats.statistics_data['Auth'])
@@ -765,7 +761,6 @@ class TestStats(unittest.TestCase):
                          sum_qudp)
         self.assertTrue('Auth' in self.stats.statistics_data_bymid)
         # restore statistics data of killed auth
-        # self.base.b10_init.server.pid_list = [ killed ] + self.base.b10_init.server.pid_list[:]
         self.stats.update_statistics_data('Auth',
                                           "bar1@foo",
                                           {'queries.tcp': bar1_tcp})
@@ -794,7 +789,7 @@ class TestStats(unittest.TestCase):
     def test_config(self):
         orig_get_timestamp = stats.get_timestamp
         stats.get_timestamp = lambda : self.const_timestamp
-        stat = SimpleStats()
+        stat = MyStats()
 
         # test updating poll-interval
         self.assertEqual(stat.config['poll-interval'], 60)
@@ -840,7 +835,7 @@ class TestStats(unittest.TestCase):
             (0, {'Init': {'boot_time': self.const_datetime}}))
 
     def test_commands(self):
-        self.stats = stats.Stats()
+        self.stats = MyStats()
 
         # status
         self.assertEqual(self.stats.command_status(),
@@ -853,39 +848,57 @@ class TestStats(unittest.TestCase):
                          isc.config.create_answer(0))
         self.assertFalse(self.stats.running)
 
-    @unittest.skipIf(sys.version_info >= (3, 3), "Unsupported in Python 3.3 or higher")
-    def test_command_show(self):
-        # two auth instances invoked
-        list_auth = [ self.base.auth.server,
-                      self.base.auth2.server ]
-        sum_qtcp = 0
-        sum_qudp = 0
-        sum_qtcp_perzone1 = 0
-        sum_qudp_perzone1 = 0
-        sum_qtcp_perzone2 = 4 * len(list_auth)
-        sum_qudp_perzone2 = 3 * len(list_auth)
-        sum_qtcp_nds_perzone10 = 0
-        sum_qudp_nds_perzone10 = 0
-        sum_qtcp_nds_perzone20 = 4 * len(list_auth)
-        sum_qudp_nds_perzone20 = 3 * len(list_auth)
-        self.stats = stats.Stats()
+    def test_command_show_error(self):
+        self.stats = MyStats()
         self.assertEqual(self.stats.command_show(owner='Foo', name=None),
                          isc.config.create_answer(
-                1, "specified arguments are incorrect: owner: Foo, name: None"))
+                1,
+                "specified arguments are incorrect: owner: Foo, name: None"))
         self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
                          isc.config.create_answer(
-                1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+                1,
+                "specified arguments are incorrect: owner: Foo, name: _bar_"))
         self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
                          isc.config.create_answer(
-                1, "specified arguments are incorrect: owner: Foo, name: bar"))
+                1,
+                "specified arguments are incorrect: owner: Foo, name: bar"))
 
-        for a in list_auth:
-            sum_qtcp += a.queries_tcp
-            sum_qudp += a.queries_udp
-            sum_qtcp_perzone1 += a.queries_per_zone[0]['queries.tcp']
-            sum_qudp_perzone1 += a.queries_per_zone[0]['queries.udp']
-            sum_qtcp_nds_perzone10 += a.nds_queries_per_zone['test10.example']['queries.tcp']
-            sum_qudp_nds_perzone10 += a.nds_queries_per_zone['test10.example']['queries.udp']
+    def test_command_show_auth(self):
+        self.stats = MyStats()
+        self.stats.update_modules = lambda: None
+
+        # Test data borrowed from test_update_statistics_data_withmid
+        create_answer = isc.config.ccsession.create_answer # shortcut
+        self.stats._answers = [
+            (create_answer(0, [[1034, 'b10-auth-1', 'Auth'],
+                               [1035, 'b10-auth-2', 'Auth']]),  None),
+            (create_answer(0, self.stats._auth_sdata), {'from': 'auth1'}),
+            (create_answer(0, self.stats._auth_sdata), {'from': 'auth2'}),
+            (create_answer(0, self.stats._auth_sdata), {'from': 'auth3'})
+            ]
+
+        num_instances = 2
+        sum_qtcp = 0
+        sum_qudp = 0
+        sum_qtcp_perzone1 = 0
+        sum_qudp_perzone1 = 0
+        sum_qtcp_perzone2 = 4 * num_instances
+        sum_qudp_perzone2 = 3 * num_instances
+        sum_qtcp_nds_perzone10 = 0
+        sum_qudp_nds_perzone10 = 0
+        sum_qtcp_nds_perzone20 = 4 * num_instances
+        sum_qudp_nds_perzone20 = 3 * num_instances
+
+        self.maxDiff = None
+        for a in (0, num_instances):
+            sum_qtcp += self.stats._queries_tcp
+            sum_qudp += self.stats._queries_udp
+            sum_qtcp_perzone1 += self.stats._queries_per_zone[0]['queries.tcp']
+            sum_qudp_perzone1 += self.stats._queries_per_zone[0]['queries.udp']
+            sum_qtcp_nds_perzone10 += \
+                self.stats._nds_queries_per_zone['test10.example']['queries.tcp']
+            sum_qudp_nds_perzone10 += \
+                self.stats._nds_queries_per_zone['test10.example']['queries.udp']
 
         self.assertEqual(self.stats.command_show(owner='Auth'),
                          isc.config.create_answer(
@@ -926,26 +939,33 @@ class TestStats(unittest.TestCase):
                             'test20.example': {
                                 'queries.udp': sum_qudp_nds_perzone20,
                                 'queries.tcp': sum_qtcp_nds_perzone20 }}}}))
+
+    def test_command_show_stats(self):
+        self.stats = MyStats()
         orig_get_datetime = stats.get_datetime
         orig_get_timestamp = stats.get_timestamp
         stats.get_datetime = lambda x=None: self.const_datetime
         stats.get_timestamp = lambda : self.const_timestamp
-        self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'),
+        self.assertEqual(self.stats.command_show(owner='Stats',
+                                                 name='report_time'),
                          isc.config.create_answer(
                 0, {'Stats': {'report_time':self.const_datetime}}))
-        self.assertEqual(self.stats.command_show(owner='Stats', name='timestamp'),
+        self.assertEqual(self.stats.command_show(owner='Stats',
+                                                 name='timestamp'),
                          isc.config.create_answer(
                 0, {'Stats': {'timestamp':self.const_timestamp}}))
         stats.get_datetime = orig_get_datetime
         stats.get_timestamp = orig_get_timestamp
-        self.stats.modules[self.stats.module_name] = isc.config.module_spec.ModuleSpec(
-            { "module_name": self.stats.module_name,
-              "statistics": [] } )
+        self.stats.do_polling = lambda : None
+        self.stats.modules[self.stats.module_name] = \
+            isc.config.module_spec.ModuleSpec(
+            { "module_name": self.stats.module_name, "statistics": [] } )
         self.assertRaises(
-            stats.StatsError, self.stats.command_show, owner=self.stats.module_name, name='bar')
+            stats.StatsError, self.stats.command_show,
+            owner=self.stats.module_name, name='bar')
 
     def test_command_showchema(self):
-        self.stats = stats.Stats()
+        self.stats = MyStats()
         (rcode, value) = isc.config.ccsession.parse_answer(
             self.stats.command_showschema())
         self.assertEqual(rcode, 0)
@@ -1261,98 +1281,130 @@ class TestStats(unittest.TestCase):
                          isc.config.create_answer(
                 1, "module name is not specified"))
 
-    @unittest.skipIf(sys.version_info >= (3, 3), "Unsupported in Python 3.3 or higher")
-    def test_polling(self):
-        stats_server = ThreadingServerManager(MyStats)
-        stat = stats_server.server
-        stats_server.run()
-        self.assertEqual(
-            send_command('show', 'Stats'),
-            (0, stat.statistics_data))
-        # check statistics data of 'Init'
-        b10_init = self.base.b10_init.server
-        self.assertEqual(
-            stat.statistics_data_bymid['Init'][b10_init.cc_session.lname],
-            {'boot_time': self.const_datetime})
-        self.assertEqual(
-            len(stat.statistics_data_bymid['Init']), 1)
+    def test_polling_init(self):
+        """check statistics data of 'Init'."""
+
+        stat = MyStats()
+        stat.update_modules = lambda: None
+        create_answer = isc.config.ccsession.create_answer # shortcut
+
+        stat._answers = [
+            # Answer for "show_processes"
+            (create_answer(0, []),  None),
+            # Answers for "getstats" for Init (type of boot_time is invalid)
+            (create_answer(0, {'boot_time': self.const_datetime}),
+             {'from': 'init'}),
+            ]
+
+        stat.do_polling()
         self.assertEqual(
-            stat.statistics_data['Init'],
+            stat.statistics_data_bymid['Init']['init'],
             {'boot_time': self.const_datetime})
-        # check statistics data of each 'Auth' instances
-        list_auth = ['', '2']
-        for i in list_auth:
-            auth = getattr(self.base,"auth"+i).server
-            for s in stat.statistics_data_bymid['Auth'].values():
-                self.assertEqual(
-                    s, {'queries.perzone': auth.queries_per_zone,
-                        'nds_queries.perzone': auth.nds_queries_per_zone,
-                        'queries.tcp': auth.queries_tcp,
-                        'queries.udp': auth.queries_udp})
-            n = len(stat.statistics_data_bymid['Auth'])
-            self.assertEqual(n, len(list_auth))
-            # check consolidation of statistics data of the auth
-            # instances
+
+    def test_polling_consolidate(self):
+        """check statistics data of multiple instances of same module."""
+        stat = MyStats()
+        stat.update_modules = lambda: None
+        create_answer = isc.config.ccsession.create_answer # shortcut
+
+        # Test data borrowed from test_update_statistics_data_withmid
+        stat._answers = [
+            (create_answer(0, [[1034, 'b10-auth-1', 'Auth'],
+                               [1035, 'b10-auth-2', 'Auth']]),  None),
+            (create_answer(0, stat._auth_sdata), {'from': 'auth1'}),
+            (create_answer(0, stat._auth_sdata), {'from': 'auth2'}),
+            (create_answer(0, stat._auth_sdata), {'from': 'auth3'})
+            ]
+
+        stat.do_polling()
+
+        # check statistics data of each 'Auth' instances.  expected data
+        # for 'nds_queries.perzone' is special as it needs data merge.
+        self.assertEqual(2, len(stat.statistics_data_bymid['Auth'].values()))
+        for s in stat.statistics_data_bymid['Auth'].values():
             self.assertEqual(
-                stat.statistics_data['Auth'],
-                {'queries.perzone': [
-                        {'zonename':
-                             auth.queries_per_zone[0]['zonename'],
-                         'queries.tcp':
-                             auth.queries_per_zone[0]['queries.tcp']*n,
-                         'queries.udp':
-                             auth.queries_per_zone[0]['queries.udp']*n},
-                        {'zonename': "test2.example",
-                         'queries.tcp': 4*n,
-                         'queries.udp': 3*n },
-                        ],
-                 'nds_queries.perzone': {
-                         'test10.example': {
-                             'queries.tcp':
-                                 auth.nds_queries_per_zone['test10.example']['queries.tcp']*n,
-                             'queries.udp':
-                                 auth.nds_queries_per_zone['test10.example']['queries.udp']*n},
-                         'test20.example': {
-                             'queries.tcp':
-                                 4*n,
-                             'queries.udp':
-                                 3*n},
-                         },
-                 'queries.tcp': auth.queries_tcp*n,
-                 'queries.udp': auth.queries_udp*n})
-        # check statistics data of 'Stats'
+                s, {'queries.perzone': stat._auth_sdata['queries.perzone'],
+                    'nds_queries.perzone': stat._nds_queries_per_zone,
+                    'queries.tcp': stat._auth_sdata['queries.tcp'],
+                    'queries.udp': stat._auth_sdata['queries.udp']})
+
+        # check consolidation of statistics data of the auth instances.
+        # it's union of the reported data and the spec default.
+        n = len(stat.statistics_data_bymid['Auth'].values())
+        self.maxDiff = None
         self.assertEqual(
-            len(stat.statistics_data['Stats']), 5)
-        self.assertTrue('boot_time' in
-            stat.statistics_data['Stats'])
-        self.assertTrue('last_update_time' in
-            stat.statistics_data['Stats'])
-        self.assertTrue('report_time' in
-            stat.statistics_data['Stats'])
-        self.assertTrue('timestamp' in
-            stat.statistics_data['Stats'])
-        self.assertEqual(
-            stat.statistics_data['Stats']['lname'],
-            stat.mccs._session.lname)
-        stats_server.shutdown()
+            stat.statistics_data['Auth'],
+            {'queries.perzone': [
+                    {'zonename': 'test1.example',
+                     'queries.tcp': 5 * n,
+                     'queries.udp': 4 * n},
+                    {'zonename': 'test2.example',
+                     'queries.tcp': 4 * n,
+                     'queries.udp': 3 * n},
+                    ],
+             'nds_queries.perzone': {
+                    'test10.example': {
+                        'queries.tcp': 5 * n,
+                        'queries.udp': 4 * n
+                        },
+                    'test20.example': {
+                        'queries.tcp': 4 * n,
+                        'queries.udp': 3 * n
+                        },
+                    },
+             'queries.tcp': 3 * n,
+             'queries.udp': 2 * n})
+
+    def test_polling_stats(self):
+        """Check statistics data of 'Stats'
+
+        This is actually irrelevant to do_polling(), but provided to
+        compatibility of older tests.
+
+        """
+        stat = MyStats()
+        self.assertEqual(len(stat.statistics_data['Stats']), 5)
+        self.assertTrue('boot_time' in stat.statistics_data['Stats'])
+        self.assertTrue('last_update_time' in stat.statistics_data['Stats'])
+        self.assertTrue('report_time' in stat.statistics_data['Stats'])
+        self.assertTrue('timestamp' in stat.statistics_data['Stats'])
+        self.assertEqual(stat.statistics_data['Stats']['lname'],
+                         stat.mccs._session.lname)
 
     def test_polling2(self):
-        # set invalid statistics
-        b10_init = self.base.b10_init.server
-        b10_init.statistics_data = {'boot_time':1}
-        stats_server = ThreadingServerManager(MyStats)
-        stat = stats_server.server
-        stats_server.run()
-        self.assertEqual(
-            send_command('status', 'Stats'),
-            (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+        """Test do_polling() doesn't incorporate broken statistics data.
+
+        Actually, this is not a test for do_polling() itself.  It's bad, but
+        fixing that is a subject of different ticket.
+
+        """
+        stat = MyStats()
         # check default statistics data of 'Init'
         self.assertEqual(
-            stat.statistics_data['Init'],
-            {'boot_time': self.const_default_datetime})
-        stats_server.shutdown()
+             stat.statistics_data['Init'],
+             {'boot_time': self.const_default_datetime})
+
+        # set invalid statistics
+        create_answer = isc.config.ccsession.create_answer # shortcut
+        stat._answers = [
+            # Answer for "show_processes"
+            (create_answer(0, []),  None),
+            # Answers for "getstats" for Init (type of boot_time is invalid)
+            (create_answer(0, {'boot_time': 1}), {'from': 'init'}),
+            ]
+        stat.update_modules = lambda: None
+
+        # do_polling() should ignore the invalid answer;
+        # default data shouldn't be replaced.
+        stat.do_polling()
+        self.assertEqual(
+             stat.statistics_data['Init'],
+             {'boot_time': self.const_default_datetime})
 
-class TestOSEnv(unittest.TestCase):
+class Z_TestOSEnv(unittest.TestCase):
+    # Running this test would break logging setting.  To prevent it from
+    # affecting other tests we use the same workaround as
+    # Z_TestStatsHttpdError.
     def test_osenv(self):
         """
         test for the environ variable "B10_FROM_SOURCE"

+ 118 - 247
src/bin/stats/tests/test_utils.py

@@ -20,13 +20,11 @@ Utilities and mock modules for unittests of statistics modules
 import os
 import io
 import time
-import sys
 import threading
-import tempfile
 import json
 import signal
+import socket
 
-import msgq
 import isc.config.cfgmgr
 import stats
 import stats_httpd
@@ -48,22 +46,9 @@ class SignalHandler():
         signal.signal(signal.SIGALRM, self.orig_handler)
 
     def sig_handler(self, signal, frame):
-        """envokes unittest.TestCase.fail as a signal handler"""
+        """invokes unittest.TestCase.fail as a signal handler"""
         self.fail_handler("A deadlock might be detected")
 
-def send_command(command_name, module_name, params=None):
-    cc_session = isc.cc.Session()
-    command = isc.config.ccsession.create_command(command_name, params)
-    seq = cc_session.group_sendmsg(command, module_name)
-    try:
-        (answer, env) = cc_session.group_recvmsg(False, seq)
-        if answer:
-            return isc.config.ccsession.parse_answer(answer)
-    except isc.cc.SessionTimeout:
-        pass
-    finally:
-        cc_session.close()
-
 class ThreadingServerManager:
     def __init__(self, server, *args, **kwargs):
         self.server = server(*args, **kwargs)
@@ -91,45 +76,7 @@ class ThreadingServerManager:
         else:
             self.server._thread.join(0) # timeout is 0
 
-class MockMsgq:
-    def __init__(self):
-        self._started = threading.Event()
-        self.msgq = msgq.MsgQ(verbose=False)
-        result = self.msgq.setup()
-        if result:
-            sys.exit("Error on Msgq startup: %s" % result)
-
-    def run(self):
-        self._started.set()
-        try:
-            self.msgq.run()
-        finally:
-            # Make sure all the sockets, etc, are removed once it stops.
-            self.msgq.shutdown()
-
-    def shutdown(self):
-        # Ask it to terminate nicely
-        self.msgq.stop()
-
-class MockCfgmgr:
-    def __init__(self):
-        self._started = threading.Event()
-        self.cfgmgr = isc.config.cfgmgr.ConfigManager(
-            os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
-        self.cfgmgr.read_config()
-
-    def run(self):
-        self._started.set()
-        try:
-            self.cfgmgr.run()
-        except Exception:
-            pass
-
-    def shutdown(self):
-        self.cfgmgr.running = False
-
-class MockInit:
-    spec_str = """\
+INIT_SPEC_STR = """\
 {
   "module_spec": {
     "module_name": "Init",
@@ -221,56 +168,12 @@ class MockInit:
   }
 }
 """
-    _BASETIME = CONST_BASETIME
 
-    def __init__(self):
-        self._started = threading.Event()
-        self.running = False
-        self.spec_file = io.StringIO(self.spec_str)
-        # create ModuleCCSession object
-        self.mccs = isc.config.ModuleCCSession(
-            self.spec_file,
-            self.config_handler,
-            self.command_handler)
-        self.spec_file.close()
-        self.cc_session = self.mccs._session
-        self.got_command_name = ''
-        self.pid_list = [[ 9999, "b10-auth", "Auth" ],
-                         [ 9998, "b10-auth-2", "Auth" ]]
-        self.statistics_data = {
-            'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
-            }
-
-    def run(self):
-        self.mccs.start()
-        self.running = True
-        self._started.set()
-        try:
-            while self.running:
-                self.mccs.check_command(False)
-        except Exception:
-            pass
-
-    def shutdown(self):
-        self.running = False
-
-    def config_handler(self, new_config):
-        return isc.config.create_answer(0)
-
-    def command_handler(self, command, *args, **kwargs):
-        self._started.set()
-        self.got_command_name = command
-        sdata = self.statistics_data
-        if command == 'getstats':
-            return isc.config.create_answer(0, sdata)
-        elif command == 'show_processes':
-            # Return dummy pids
-            return isc.config.create_answer(
-                0, self.pid_list)
-        return isc.config.create_answer(1, "Unknown Command")
-
-class MockAuth:
-    spec_str = """\
+# Note: this is derived of the spec for the DNS authoritative server, but
+# for the purpose of this test, it's completely irrelevant to DNS.
+# Some statisittics specs do not make sense for practical sense but used
+# just cover various types of statistics data (list, map/dict, etc).
+AUTH_SPEC_STR = """\
 {
   "module_spec": {
     "module_name": "Auth",
@@ -392,68 +295,6 @@ class MockAuth:
   }
 }
 """
-    def __init__(self):
-        self._started = threading.Event()
-        self.running = False
-        self.spec_file = io.StringIO(self.spec_str)
-        # create ModuleCCSession object
-        self.mccs = isc.config.ModuleCCSession(
-            self.spec_file,
-            self.config_handler,
-            self.command_handler)
-        self.spec_file.close()
-        self.cc_session = self.mccs._session
-        self.got_command_name = ''
-        self.queries_tcp = 3
-        self.queries_udp = 2
-        self.queries_per_zone = [{
-                'zonename': 'test1.example',
-                'queries.tcp': 5,
-                'queries.udp': 4
-                }]
-        self.nds_queries_per_zone = {
-            'test10.example': {
-                'queries.tcp': 5,
-                'queries.udp': 4
-                }
-            }
-
-    def run(self):
-        self.mccs.start()
-        self.running = True
-        self._started.set()
-        try:
-            while self.running:
-                self.mccs.check_command(False)
-        except Exception:
-            pass
-
-    def shutdown(self):
-        self.running = False
-
-    def config_handler(self, new_config):
-        return isc.config.create_answer(0)
-
-    def command_handler(self, command, *args, **kwargs):
-        self.got_command_name = command
-        sdata = { 'queries.tcp': self.queries_tcp,
-                  'queries.udp': self.queries_udp,
-                  'queries.perzone' : self.queries_per_zone,
-                  'nds_queries.perzone' : {
-                    'test10.example': {
-                    'queries.tcp': \
-                      isc.cc.data.find(
-                        self.nds_queries_per_zone,
-                        'test10.example/queries.tcp')
-                    }
-                  },
-                  'nds_queries.perzone/test10.example/queries.udp' :
-                      isc.cc.data.find(self.nds_queries_per_zone,
-                                       'test10.example/queries.udp')
-                }
-        if command == 'getstats':
-            return isc.config.create_answer(0, sdata)
-        return isc.config.create_answer(1, "Unknown Command")
 
 class MyModuleCCSession(isc.config.ConfigData):
     """Mocked ModuleCCSession class.
@@ -468,6 +309,7 @@ class MyModuleCCSession(isc.config.ConfigData):
         isc.config.ConfigData.__init__(self, module_spec)
         self._session = self
         self.stopped = False
+        self.closed = False
         self.lname = 'mock_mod_ccs'
 
     def start(self):
@@ -476,10 +318,13 @@ class MyModuleCCSession(isc.config.ConfigData):
     def send_stopping(self):
         self.stopped = True     # just record it's called to inspect it later
 
-class SimpleStats(stats.Stats):
+    def close(self):
+        self.closed = True
+
+class MyStats(stats.Stats):
     """A faked Stats class for unit tests.
 
-    This class inherits most of the real Stats class, but replace the
+    This class inherits most of the real Stats class, but replaces the
     ModuleCCSession with a fake one so we can avoid network I/O in tests,
     and can also inspect or tweak messages via the session more easily.
     This class also maintains some faked module information and statistics
@@ -500,9 +345,9 @@ class SimpleStats(stats.Stats):
         # the default answer from faked recvmsg if _answers is empty
         self.__default_answer = isc.config.ccsession.create_answer(
             0, {'Init':
-                    json.loads(MockInit.spec_str)['module_spec']['statistics'],
+                    json.loads(INIT_SPEC_STR)['module_spec']['statistics'],
                 'Auth':
-                    json.loads(MockAuth.spec_str)['module_spec']['statistics']
+                    json.loads(AUTH_SPEC_STR)['module_spec']['statistics']
                 })
         # setup faked auth statistics
         self.__init_auth_stat()
@@ -530,24 +375,24 @@ class SimpleStats(stats.Stats):
     def __init_auth_stat(self):
         self._queries_tcp = 3
         self._queries_udp = 2
-        self.__queries_per_zone = [{
+        self._queries_per_zone = [{
                 'zonename': 'test1.example', 'queries.tcp': 5, 'queries.udp': 4
                 }]
-        self.__nds_queries_per_zone = \
+        self._nds_queries_per_zone = \
             { 'test10.example': { 'queries.tcp': 5, 'queries.udp': 4 } }
         self._auth_sdata = \
             { 'queries.tcp': self._queries_tcp,
               'queries.udp': self._queries_udp,
-              'queries.perzone' : self.__queries_per_zone,
+              'queries.perzone' : self._queries_per_zone,
               'nds_queries.perzone' : {
                 'test10.example': {
                     'queries.tcp': isc.cc.data.find(
-                        self.__nds_queries_per_zone,
+                        self._nds_queries_per_zone,
                         'test10.example/queries.tcp')
                     }
                 },
               'nds_queries.perzone/test10.example/queries.udp' :
-                  isc.cc.data.find(self.__nds_queries_per_zone,
+                  isc.cc.data.find(self._nds_queries_per_zone,
                                    'test10.example/queries.udp')
               }
 
@@ -589,32 +434,62 @@ class SimpleStats(stats.Stats):
         answer, _ = self.__group_recvmsg(None, None)
         return isc.config.ccsession.parse_answer(answer)[1]
 
-class MyStats(stats.Stats):
-
-    stats._BASETIME = CONST_BASETIME
-    stats.get_timestamp = lambda: time.mktime(CONST_BASETIME)
-    stats.get_datetime = lambda x=None: time.strftime("%Y-%m-%dT%H:%M:%SZ", CONST_BASETIME)
-
-    def __init__(self):
-        self._started = threading.Event()
-        stats.Stats.__init__(self)
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+    """A faked StatsHttpd class for unit tests.
 
-    def run(self):
-        self._started.set()
-        try:
-            self.start()
-        except Exception:
-            pass
+    This class inherits most of the real StatsHttpd class, but replaces the
+    ModuleCCSession with a fake one so we can avoid network I/O in tests,
+    and can also inspect or tweak messages via the session more easily.
 
-    def shutdown(self):
-        self.command_shutdown()
+    """
 
-class MyStatsHttpd(stats_httpd.StatsHttpd):
     ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
     def __init__(self, *server_address):
         self._started = threading.Event()
+        self.__dummy_sock = None # see below
+
+        # Prepare commonly used statistics schema and data requested in
+        # stats-httpd tests.  For the purpose of these tests, the content of
+        # statistics data is not so important (they don't test whther the
+        # counter values are correct, etc), so hardcoding the common case
+        # should suffice.  Note also that some of the statistics values and
+        # specs don't make sense in practice (see also comments on
+        # AUTH_SPEC_STR).
+        with open(stats.SPECFILE_LOCATION) as f:
+            stat_spec_str = f.read()
+        self.__default_spec_answer = {
+            'Init': json.loads(INIT_SPEC_STR)['module_spec']['statistics'],
+            'Auth': json.loads(AUTH_SPEC_STR)['module_spec']['statistics'],
+            'Stats': json.loads(stat_spec_str)['module_spec']['statistics']
+            }
+        self.__default_data_answer = {
+            'Init': {'boot_time':
+                         time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME)},
+            'Stats': {'last_update_time':
+                          time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME),
+                      'report_time':
+                          time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME),
+                      'lname': 'test-lname',
+                      'boot_time':
+                          time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME),
+                      'timestamp': time.mktime(CONST_BASETIME)},
+            'Auth': {'queries.udp': 4, 'queries.tcp': 6,
+                     'queries.perzone': [
+                    {'queries.udp': 8, 'queries.tcp': 10,
+                     'zonename': 'test1.example'},
+                    {'queries.udp': 6, 'queries.tcp': 8,
+                     'zonename': 'test2.example'}],
+                     'nds_queries.perzone': {
+                    'test10.example': {'queries.udp': 8, 'queries.tcp': 10},
+                    'test20.example': {'queries.udp': 6, 'queries.tcp': 8}}}}
+
+        # if set, use them as faked response to rpc_call (see below).
+        # it's a list of answer data of rpc_call.
+        self._rpc_answers = []
+
         if server_address:
-            stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+            stats_httpd.SPECFILE_LOCATION = \
+                self.__create_specfile(*server_address)
             try:
                 stats_httpd.StatsHttpd.__init__(self)
             finally:
@@ -624,7 +499,51 @@ class MyStatsHttpd(stats_httpd.StatsHttpd):
         else:
             stats_httpd.StatsHttpd.__init__(self)
 
-    def create_specfile(self, *server_address):
+        # replace some (faked) ModuleCCSession methods so we can inspect/fake.
+        # in order to satisfy select.select() we need some real socket.  We
+        # use an unusable AF_UNIX socket; we won't actually use it for
+        # communication.
+        self.cc_session.rpc_call = self.__rpc_call
+        self.__dummy_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        self.mccs.get_socket = lambda: self.__dummy_sock
+
+    def open_mccs(self):
+        self.mccs = MyModuleCCSession(stats_httpd.SPECFILE_LOCATION,
+                                      self.config_handler,
+                                      self.command_handler)
+        self.cc_session = self.mccs._session
+        self.mccs.start = self.load_config # force reload
+
+    def close_mccs(self):
+        super().close_mccs()
+        if self.__dummy_sock is not None:
+            self.__dummy_sock.close()
+            self.__dummy_sock = None
+
+    def __rpc_call(self, command, group, params={}):
+        """Faked ModuleCCSession.rpc_call for tests.
+
+        The stats httpd module only issues two commands: 'showschema' and
+        'show'.  In most cases we can simply use the prepared default
+        answer.  If customization is needed, the test case can add a
+        faked answer by appending it to _rpc_answers.  If the added object
+        is of Exception type this method raises it instead of return it,
+        emulating the situation where rpc_call() results in an exception.
+
+        """
+        if len(self._rpc_answers) == 0:
+            if command == 'showschema':
+                return self.__default_spec_answer
+            elif command == 'show':
+                return self.__default_data_answer
+            assert False, "unexpected command for faked rpc_call: " + command
+
+        answer = self._rpc_answers.pop(0)
+        if issubclass(type(answer), Exception):
+            raise answer
+        return answer
+
+    def __create_specfile(self, *server_address):
         spec_io = open(self.ORIG_SPECFILE_LOCATION)
         try:
             spec = json.load(spec_io)
@@ -633,7 +552,8 @@ class MyStatsHttpd(stats_httpd.StatsHttpd):
             for i in range(len(config)):
                 if config[i]['item_name'] == 'listen_on':
                     config[i]['item_default'] = \
-                        [ dict(address=a[0], port=a[1]) for a in server_address ]
+                        [ dict(address=a[0], port=a[1])
+                          for a in server_address ]
                     break
             return io.StringIO(json.dumps(spec))
         finally:
@@ -641,53 +561,4 @@ class MyStatsHttpd(stats_httpd.StatsHttpd):
 
     def run(self):
         self._started.set()
-        try:
-            self.start()
-        except Exception:
-            pass
-
-    def shutdown(self):
-        self.command_handler('shutdown', None)
-
-class BaseModules:
-    def __init__(self):
-        # MockMsgq
-        self.msgq = ThreadingServerManager(MockMsgq)
-        self.msgq.run()
-        # Check whether msgq is ready. A SessionTimeout is raised here if not.
-        isc.cc.session.Session().close()
-        # MockCfgmgr
-        self.cfgmgr = ThreadingServerManager(MockCfgmgr)
-        self.cfgmgr.run()
-        # MockInit
-        self.b10_init = ThreadingServerManager(MockInit)
-        self.b10_init.run()
-        # MockAuth
-        self.auth = ThreadingServerManager(MockAuth)
-        self.auth.run()
-        self.auth2 = ThreadingServerManager(MockAuth)
-        self.auth2.run()
-
-
-    def shutdown(self):
-        # MockMsgq. We need to wait (blocking) for it, otherwise it'll wipe out
-        # a socket for another test during its shutdown.
-        self.msgq.shutdown(True)
-
-        # We also wait for the others, but these are just so we don't create
-        # too many threads in parallel.
-
-        # MockAuth
-        self.auth2.shutdown(True)
-        self.auth.shutdown(True)
-        # MockInit
-        self.b10_init.shutdown(True)
-        # MockCfgmgr
-        self.cfgmgr.shutdown(True)
-        # remove the unused socket file
-        socket_file = self.msgq.server.msgq.socket_file
-        try:
-            if os.path.exists(socket_file):
-                os.remove(socket_file)
-        except OSError:
-            pass
+        self.start()

File diff suppressed because it is too large
+ 1 - 1
src/bin/tests/Makefile.am


+ 1 - 1
src/bin/usermgr/b10-cmdctl-usermgr.py.in

@@ -19,6 +19,7 @@
 This tool implements user management for b10-cmdctl. It is used to
 add and remove users from the accounts file.
 '''
+import sys; sys.path.append ('@@PYTHONPATH@@')
 from bind10_config import SYSCONFPATH
 from collections import OrderedDict
 import random
@@ -27,7 +28,6 @@ import csv
 import getpass
 from optparse import OptionParser, OptionValueError
 import os
-import sys; sys.path.append ('@@PYTHONPATH@@')
 import isc.util.process
 
 isc.util.process.rename()

+ 126 - 0
src/bin/xfrin/b10-xfrin.xml

@@ -213,6 +213,132 @@ operation
 -->
 
   </refsect1>
+  <refsect1>
+    <title>STATISTICS DATA</title>
+
+    <para>
+      The statistics data collected by the <command>b10-xfrin</command>
+      daemon for <quote>Xfrin</quote> include:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term>zones</term>
+        <listitem><simpara>
+          A directory name of per-zone statistics
+          </simpara>
+          <variablelist>
+
+            <varlistentry>
+              <term><replaceable>zonename</replaceable></term>
+              <listitem><simpara>
+                An actual zone name or special zone name
+                <quote>_SERVER_</quote> representing the entire server.
+                Zone classes (e.g. IN, CH, and HS) are mixed and counted so
+                far. But these will be distinguished in future release.
+                </simpara>
+                <variablelist>
+
+                  <varlistentry>
+                    <term>soaoutv4</term>
+                    <listitem><simpara>
+                      Number of IPv4 SOA queries sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>soaoutv6</term>
+                    <listitem><simpara>
+                      Number of IPv6 SOA queries sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>axfrreqv4</term>
+                    <listitem><simpara>
+                      Number of IPv4 AXFR requests sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>axfrreqv6</term>
+                    <listitem><simpara>
+                      Number of IPv6 AXFR requests sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>ixfrreqv4</term>
+                    <listitem><simpara>
+                      Number of IPv4 IXFR requests sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>ixfrreqv6</term>
+                    <listitem><simpara>
+                      Number of IPv6 IXFR requests sent from Xfrin
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>xfrsuccess</term>
+                    <listitem><simpara>
+                      Number of zone transfer requests succeeded.
+                      These include the case where the zone turns
+                      out to be the latest as a result of an
+                      initial SOA query (and there is actually no
+                      AXFR or IXFR transaction).
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>xfrfail</term>
+                    <listitem><simpara>
+                      Number of zone transfer requests failed
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>last_axfr_duration</term>
+                    <listitem><simpara>
+                      Duration in seconds of the last successful AXFR.  0.0
+                      means no successful AXFR done or means a successful AXFR
+                      done in less than a microsecond.  If an AXFR is aborted
+                      due to some failure, this duration won't be updated.
+                    </simpara></listitem>
+                  </varlistentry>
+
+                  <varlistentry>
+                    <term>last_ixfr_duration</term>
+                    <listitem><simpara>
+                      Duration in seconds of the last successful IXFR.  0.0
+                      means no successful IXFR done or means a successful IXFR
+                      done in less than a microsecond.  If an IXFR is aborted
+                      due to some failure, this duration won't be updated.
+                    </simpara></listitem>
+                  </varlistentry>
+
+                </variablelist>
+              </listitem>
+            </varlistentry><!-- end of zonename -->
+
+          </variablelist>
+        </listitem>
+      </varlistentry><!-- end of zones -->
+
+    </variablelist>
+
+    <para>
+      In per-zone counters the special zone name <quote>_SERVER_</quote>
+      exists.
+      It doesn't mean a specific zone. It represents the entire server
+      and the counter value of this special zone is the total of the
+      same counter for all zones.
+    </para>
+
+  </refsect1>
 
 <!--
   <refsect1>

File diff suppressed because it is too large
+ 1 - 1
src/bin/xfrin/tests/Makefile.am


+ 212 - 4
src/bin/xfrin/tests/xfrin_test.py

@@ -1,4 +1,4 @@
-# Copyright (C) 2009-2011  Internet Systems Consortium.
+# Copyright (C) 2009-2013  Internet Systems Consortium.
 #
 # Permission to use, copy, modify, and distribute this software for any
 # purpose with or without fee is hereby granted, provided that the above
@@ -19,6 +19,7 @@ import shutil
 import socket
 import sys
 import io
+from datetime import datetime
 from isc.testutils.tsigctx_mock import MockTSIGContext
 from isc.testutils.ccsession_mock import MockModuleCCSession
 from isc.testutils.rrset_utils import *
@@ -717,7 +718,7 @@ class TestXfrinConnection(unittest.TestCase):
         self.sock_map = {}
         self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
                                         TEST_RRCLASS, None, threading.Event(),
-                                        TEST_MASTER_IPV4_ADDRINFO)
+                                        self._master_addrinfo)
         self.conn.init_socket()
         self.soa_response_params = {
             'questions': [example_soa_question],
@@ -749,6 +750,10 @@ class TestXfrinConnection(unittest.TestCase):
             os.remove(TEST_DB_FILE)
         xfrin.check_zone = self.__orig_check_zone
 
+    @property
+    def _master_addrinfo(self):
+        return TEST_MASTER_IPV4_ADDRINFO
+
     def __check_zone(self, name, rrclass, rrsets, callbacks):
         '''
         A mock function used instead of dns.check_zone.
@@ -1065,6 +1070,20 @@ class TestAXFR(TestXfrinConnection):
         self.assertRaises(XfrinProtocolError,
                           self.conn._handle_xfrin_responses)
 
+    def test_ipver_str(self):
+        addrs = (((socket.AF_INET, socket.SOCK_STREAM), 'v4'),
+                 ((socket.AF_INET6, socket.SOCK_STREAM), 'v6'),
+                 ((socket.AF_UNIX, socket.SOCK_STREAM), None))
+        for (info, ver) in addrs:
+            c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH, None,
+                                    threading.Event(), info)
+            c.init_socket()
+            if ver is not None:
+                self.assertEqual(ver, c._get_ipver_str())
+            else:
+                self.assertRaises(ValueError, c._get_ipver_str)
+            c.close()
+
     def test_soacheck(self):
         # we need to defer the creation until we know the QID, which is
         # determined in _check_soa_serial(), so we use response_generator.
@@ -2104,6 +2123,187 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A))
 
+class TestStatisticsXfrinConn(TestXfrinConnection):
+    '''Test class based on TestXfrinConnection and including paramters
+    and methods related to statistics tests'''
+    def setUp(self):
+        super().setUp()
+        # clear all statistics counters before each test
+        self.conn._counters.clear_all()
+        # fake datetime
+        self.__orig_datetime = isc.statistics.counters.datetime
+        self.__orig_start_timer = isc.statistics.counters._start_timer
+        time1 = datetime(2000, 1, 1, 0, 0, 0, 0)
+        time2 = datetime(2000, 1, 1, 0, 0, 0, 1)
+        class FakeDateTime:
+            @classmethod
+            def now(cls): return time2
+        isc.statistics.counters.datetime = FakeDateTime
+        isc.statistics.counters._start_timer = lambda : time1
+        delta = time2 - time1
+        self._const_sec = round(delta.days * 86400 + delta.seconds +
+                                delta.microseconds * 1E-6, 6)
+        # List of statistics counter names and expected initial values
+        self.__name_to_counter = (('axfrreqv4', 0),
+                                 ('axfrreqv6', 0),
+                                 ('ixfrreqv4', 0),
+                                 ('ixfrreqv6', 0),
+                                 ('last_axfr_duration', 0.0),
+                                 ('last_ixfr_duration', 0.0),
+                                 ('soaoutv4', 0),
+                                 ('soaoutv6', 0),
+                                 ('xfrfail', 0),
+                                 ('xfrsuccess', 0))
+        self.__zones = 'zones'
+
+    def tearDown(self):
+        super().tearDown()
+        isc.statistics.counters.datetime = self.__orig_datetime
+        isc.statistics.counters._start_timer = self.__orig_start_timer
+
+    @property
+    def _ipver(self):
+        return 'v4'
+
+    def _check_init_statistics(self):
+        '''checks exception being raised if not incremented statistics
+        counter gotten'''
+        for (name, exp) in self.__name_to_counter:
+            self.assertRaises(isc.cc.data.DataNotFoundError,
+                              self.conn._counters.get, self.__zones,
+                              TEST_ZONE_NAME_STR, name)
+
+    def _check_updated_statistics(self, overwrite):
+        '''checks getting expect values after updating the pairs of
+        statistics counter name and value on to the "overwrite"
+        dictionary'''
+        name2count = dict(self.__name_to_counter)
+        name2count.update(overwrite)
+        for (name, exp) in name2count.items():
+            act = self.conn._counters.get(self.__zones,
+                                          TEST_ZONE_NAME_STR,
+                                          name)
+            msg = '%s is expected %s but actually %s' % (name, exp, act)
+            self.assertEqual(exp, act, msg=msg)
+
+class TestStatisticsXfrinAXFRv4(TestStatisticsXfrinConn):
+    '''Xfrin AXFR tests for IPv4 to check statistics counters'''
+    def test_soaout(self):
+        '''tests that an soaoutv4 or soaoutv6 counter is incremented
+        when an soa query succeeds'''
+        self.conn.response_generator = self._create_soa_response_data
+        self._check_init_statistics()
+        self.assertEqual(self.conn._check_soa_serial(), XFRIN_OK)
+        self._check_updated_statistics({'soaout' + self._ipver: 1})
+
+    def test_axfrreq_xfrsuccess_last_axfr_duration(self):
+        '''tests that axfrreqv4 or axfrreqv6 and xfrsuccess counters
+        and last_axfr_duration timer are incremented when xfr succeeds'''
+        self.conn.response_generator = self._create_normal_response_data
+        self._check_init_statistics()
+        self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+        self._check_updated_statistics({'axfrreq' + self._ipver: 1,
+                                        'xfrsuccess': 1,
+                                        'last_axfr_duration': self._const_sec})
+
+    def test_axfrreq_xfrsuccess_last_axfr_duration2(self):
+        '''tests that axfrreqv4 or axfrreqv6 and xfrsuccess counters
+        and last_axfr_duration timer are incremented when raising
+        XfrinZoneUptodate. The exception is treated as success.'''
+        def exception_raiser():
+            raise XfrinZoneUptodate()
+        self.conn._handle_xfrin_responses = exception_raiser
+        self._check_init_statistics()
+        self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+        self._check_updated_statistics({'axfrreq' + self._ipver: 1,
+                                        'xfrsuccess': 1,
+                                        'last_axfr_duration':
+                                            self._const_sec})
+
+    def test_axfrreq_xfrfail(self):
+        '''tests that axfrreqv4 or axfrreqv6 and xfrfail counters are
+        incremented even if some failure exceptions are expected to be
+        raised inside do_xfrin(): XfrinZoneError, XfrinProtocolError,
+        XfrinException, and Exception'''
+        self._check_init_statistics()
+        count = 0
+        for ex in [XfrinZoneError, XfrinProtocolError, XfrinException,
+                   Exception]:
+            def exception_raiser():
+                raise ex()
+            self.conn._handle_xfrin_responses = exception_raiser
+            self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+            count += 1
+            self._check_updated_statistics({'axfrreq' + self._ipver: count,
+                                            'xfrfail': count})
+
+class TestStatisticsXfrinIXFRv4(TestStatisticsXfrinConn):
+    '''Xfrin IXFR tests for IPv4 to check statistics counters'''
+    def test_ixfrreq_xfrsuccess_last_ixfr_duration(self):
+        '''tests that ixfrreqv4 or ixfrreqv6 and xfrsuccess counters
+        and last_ixfr_duration timer are incremented when xfr succeeds'''
+        def create_ixfr_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR)],
+                answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+        self.conn.response_generator = create_ixfr_response
+        self._check_init_statistics()
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
+        self._check_updated_statistics({'ixfrreq' + self._ipver: 1,
+                                        'xfrsuccess': 1,
+                                        'last_ixfr_duration':
+                                            self._const_sec})
+
+    def test_ixfrreq_xfrsuccess_last_ixfr_duration2(self):
+        '''tests that ixfrreqv4 or ixfrreqv6 and xfrsuccess counters
+        and last_ixfr_duration timer are incremented when raising
+        XfrinZoneUptodate. The exception is treated as success.'''
+        def exception_raiser():
+            raise XfrinZoneUptodate()
+        self.conn._handle_xfrin_responses = exception_raiser
+        self._check_init_statistics()
+        self.assertEqual(self.conn.do_xfrin(False, RRType.IXFR), XFRIN_OK)
+        self._check_updated_statistics({'ixfrreq' + self._ipver: 1,
+                                        'xfrsuccess': 1,
+                                        'last_ixfr_duration':
+                                            self._const_sec})
+
+    def test_ixfrreq_xfrfail(self):
+        '''tests that ixfrreqv4 or ixfrreqv6 and xfrfail counters are
+        incremented even if some failure exceptions are expected to be
+        raised inside do_xfrin(): XfrinZoneError, XfrinProtocolError,
+        XfrinException, and Exception'''
+        self._check_init_statistics()
+        count = 0
+        for ex in [XfrinZoneError, XfrinProtocolError, XfrinException,
+                   Exception]:
+            def exception_raiser():
+                raise ex()
+            self.conn._handle_xfrin_responses = exception_raiser
+            self.assertEqual(self.conn.do_xfrin(False, RRType.IXFR), XFRIN_FAIL)
+            count += 1
+            self._check_updated_statistics({'ixfrreq' + self._ipver: count,
+                                            'xfrfail': count})
+
+class TestStatisticsXfrinAXFRv6(TestStatisticsXfrinAXFRv4):
+    '''Same tests as TestStatisticsXfrinAXFRv4 for IPv6'''
+    @property
+    def _master_addrinfo(self):
+        return TEST_MASTER_IPV6_ADDRINFO
+    @property
+    def _ipver(self):
+        return 'v6'
+
+class TestStatisticsIXFRv6(TestStatisticsXfrinIXFRv4):
+    '''Same tests as TestStatisticsXfrinIXFRv4 for IPv6'''
+    @property
+    def _master_addrinfo(self):
+        return TEST_MASTER_IPV6_ADDRINFO
+    @property
+    def _ipver(self):
+        return 'v6'
+
 class TestXfrinRecorder(unittest.TestCase):
     def setUp(self):
         self.recorder = XfrinRecorder()
@@ -2193,7 +2393,7 @@ class TestXfrinProcess(unittest.TestCase):
                                    master_addrinfo, tsig_key)
 
         # An awkward check that would specifically identify an old bug
-        # where initialziation of XfrinConnection._tsig_ctx_creator caused
+        # where initialization of XfrinConnection._tsig_ctx_creator caused
         # self reference and subsequently led to reference leak.
         orig_ref = sys.getrefcount(conn)
         conn._tsig_ctx_creator = None
@@ -2421,7 +2621,7 @@ class TestXfrin(unittest.TestCase):
         # there can be one more outstanding transfer.
         self.assertEqual(self.xfr.command_handler("retransfer",
                                                   self.args)['result'][0], 0)
-        # make sure the # xfrs would excceed the quota
+        # make sure the # xfrs would exceed the quota
         self.xfr.recorder.increment(Name(str(self.xfr._max_transfers_in) + TEST_ZONE_NAME_STR))
         # this one should fail
         self.assertEqual(self.xfr.command_handler("retransfer",
@@ -2512,6 +2712,14 @@ class TestXfrin(unittest.TestCase):
         self.assertEqual(self.xfr.config_handler({'transfers_in': 3})['result'][0], 0)
         self.assertEqual(self.xfr._max_transfers_in, 3)
 
+    def test_command_handler_getstats(self):
+        module_spec = isc.config.module_spec_from_file(
+            xfrin.SPECFILE_LOCATION)
+        ans = isc.config.parse_answer(
+            self.xfr.command_handler("getstats", None))
+        self.assertEqual(0, ans[0])
+        self.assertTrue(module_spec.validate_statistics(False, ans[1]))
+
     def _check_zones_config(self, config_given):
         if 'transfers_in' in config_given:
             self.assertEqual(config_given['transfers_in'],

+ 0 - 0
src/bin/xfrin/xfrin.py.in


Some files were not shown because too many files changed in this diff