Browse Source

Merge branch 'master' into trac2507

Mukund Sivaraman 12 years ago
parent
commit
c8dcb23d36
92 changed files with 2679 additions and 1319 deletions
  1. 51 10
      ChangeLog
  2. 3 5
      configure.ac
  3. 21 28
      doc/guide/bind10-guide.xml
  4. 38 10
      src/bin/auth/b10-auth.xml
  5. 1 1
      src/bin/bind10/bind10.xml
  6. 1 1
      src/bin/cfgmgr/b10-cfgmgr.xml
  7. 1 1
      src/bin/cmdctl/b10-certgen.xml
  8. 6 6
      src/bin/cmdctl/b10-cmdctl.xml
  9. 1 1
      src/bin/dhcp4/Makefile.am
  10. 451 30
      src/bin/dhcp4/config_parser.cc
  11. 1 0
      src/bin/dhcp4/config_parser.h
  12. 65 3
      src/bin/dhcp4/dhcp4.spec
  13. 16 11
      src/bin/dhcp4/dhcp4_messages.mes
  14. 3 3
      src/bin/dhcp4/tests/Makefile.am
  15. 476 0
      src/bin/dhcp4/tests/config_parser_unittest.cc
  16. 6 6
      src/bin/dhcp6/Makefile.am
  17. 17 22
      src/bin/dhcp6/config_parser.cc
  18. 9 9
      src/bin/dhcp6/dhcp6_messages.mes
  19. 4 5
      src/bin/dhcp6/tests/Makefile.am
  20. 5 4
      src/bin/dhcp6/tests/config_parser_unittest.cc
  21. 3 0
      src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
  22. 1 1
      src/bin/loadzone/.gitignore
  23. 19 10
      src/bin/loadzone/Makefile.am
  24. 0 13
      src/bin/loadzone/TODO
  25. 0 94
      src/bin/loadzone/b10-loadzone.py.in
  26. 124 18
      src/bin/loadzone/b10-loadzone.xml
  27. 342 0
      src/bin/loadzone/loadzone.py.in
  28. 81 0
      src/bin/loadzone/loadzone_messages.mes
  29. 9 1
      src/bin/loadzone/run_loadzone.sh.in
  30. 37 0
      src/bin/loadzone/tests/Makefile.am
  31. 4 1
      src/bin/loadzone/tests/correct/Makefile.am
  32. 9 9
      src/bin/loadzone/tests/correct/correct_test.sh.in
  33. 10 4
      src/bin/loadzone/tests/correct/example.db
  34. 6 2
      src/bin/loadzone/tests/correct/include.db
  35. 6 2
      src/bin/loadzone/tests/correct/mix1.db
  36. 6 2
      src/bin/loadzone/tests/correct/mix2.db
  37. 2 2
      src/bin/loadzone/tests/correct/mix2sub2.txt
  38. 6 2
      src/bin/loadzone/tests/correct/ttl1.db
  39. 6 2
      src/bin/loadzone/tests/correct/ttl2.db
  40. 6 2
      src/bin/loadzone/tests/correct/ttlext.db
  41. 0 1
      src/bin/loadzone/tests/error/.gitignore
  42. 0 28
      src/bin/loadzone/tests/error/Makefile.am
  43. 0 11
      src/bin/loadzone/tests/error/error.known
  44. 0 82
      src/bin/loadzone/tests/error/error_test.sh.in
  45. 0 13
      src/bin/loadzone/tests/error/formerr1.db
  46. 0 12
      src/bin/loadzone/tests/error/formerr2.db
  47. 0 12
      src/bin/loadzone/tests/error/formerr3.db
  48. 0 12
      src/bin/loadzone/tests/error/formerr4.db
  49. 0 13
      src/bin/loadzone/tests/error/formerr5.db
  50. 0 1
      src/bin/loadzone/tests/error/include.txt
  51. 0 12
      src/bin/loadzone/tests/error/keyerror1.db
  52. 0 12
      src/bin/loadzone/tests/error/keyerror2.db
  53. 0 13
      src/bin/loadzone/tests/error/keyerror3.db
  54. 0 11
      src/bin/loadzone/tests/error/originerr1.db
  55. 0 12
      src/bin/loadzone/tests/error/originerr2.db
  56. 342 0
      src/bin/loadzone/tests/loadzone_test.py
  57. 11 0
      src/bin/loadzone/tests/testdata/broken-example.org.zone
  58. 10 0
      src/bin/loadzone/tests/testdata/example-nons.org.zone
  59. 3 0
      src/bin/loadzone/tests/testdata/example-nosoa.org.zone
  60. 10 0
      src/bin/loadzone/tests/testdata/example.org.zone
  61. 1 1
      src/bin/msgq/msgq.xml
  62. 3 1
      src/bin/resolver/b10-resolver.xml
  63. 4 4
      src/bin/stats/b10-stats-httpd.xml
  64. 1 1
      src/bin/stats/b10-stats.xml
  65. 71 3
      src/lib/dhcpsrv/alloc_engine.cc
  66. 18 0
      src/lib/dhcpsrv/alloc_engine.h
  67. 8 0
      src/lib/dhcpsrv/lease_mgr.cc
  68. 4 0
      src/lib/dhcpsrv/lease_mgr.h
  69. 2 3
      src/lib/dhcpsrv/memfile_lease_mgr.cc
  70. 1 0
      src/lib/dhcpsrv/tests/Makefile.am
  71. 160 24
      src/lib/dhcpsrv/tests/alloc_engine_unittest.cc
  72. 26 1
      src/lib/dhcpsrv/tests/lease_mgr_unittest.cc
  73. 2 42
      src/lib/dhcpsrv/tests/mysql_lease_mgr_unittest.cc
  74. 58 0
      src/lib/dhcpsrv/tests/test_utils.cc
  75. 49 0
      src/lib/dhcpsrv/tests/test_utils.h
  76. 13 4
      src/lib/dns/master_loader.cc
  77. 4 3
      src/lib/dns/tests/master_loader_unittest.cc
  78. 1 1
      src/lib/python/isc/datasrc/Makefile.am
  79. 0 616
      src/lib/python/isc/datasrc/master.py
  80. 1 2
      src/lib/python/isc/datasrc/tests/Makefile.am
  81. 0 35
      src/lib/python/isc/datasrc/tests/master_test.py
  82. 2 0
      src/lib/python/isc/log_messages/Makefile.am
  83. 1 0
      src/lib/python/isc/log_messages/loadzone_messages.py
  84. 2 1
      src/lib/util/encode/base_n.cc
  85. 2 2
      tests/system/bindctl/setup.sh
  86. 4 0
      tests/system/bindctl/tests.sh
  87. 4 1
      tests/system/conf.sh.in
  88. 4 4
      tests/system/glue/setup.sh.in
  89. 1 1
      tests/system/ixfr/in-1/setup.sh.in
  90. 1 1
      tests/system/ixfr/in-2/setup.sh.in
  91. 1 1
      tests/system/ixfr/in-3/setup.sh.in
  92. 1 1
      tests/system/ixfr/in-4/setup.sh.in

+ 51 - 10
ChangeLog

@@ -1,14 +1,55 @@
+bind10-1.0.0-beta released on December 20, 2012
+
+533.	[build]*		jreed
+	Changed the package name in configure.ac from bind10-devel
+	to bind10. This means the default sub-directories for
+	etc, include, libexec, share, share/doc, and var are changed.
+	If upgrading from a previous version, you may need to move
+	and update your configurations or change references for the
+	old locations.
+	(git bf53fbd4e92ae835280d49fbfdeeebd33e0ce3f2)
+
+532.	[func]		marcin
+	Implemented configuration of DHCPv4 option values using
+	the configuration manager. In order to set values for the
+	data fields carried by a particular option, the user
+	specifies a string of hexadecimal digits that is converted
+	to binary data and stored in the option buffer. A more
+	user-friendly way of specifying option content is planned.
+	(Trac #2544, git fed1aab5a0f813c41637807f8c0c5f8830d71942)
+
+531.	[func]		tomek
+	b10-dhcp6: Added support for expired leases. Leases for IPv6
+	addresses that are past their valid lifetime may be recycled, i.e.
+	rellocated to other clients if needed.
+	(Trac #2327, git 62a23854f619349d319d02c3a385d9bc55442d5e)
+
+530.	[func]*		team
+	b10-loadzone was fully overhauled.  It now uses C++-based zone
+	parser and loader library, performing stricter checks, having
+	more complete support for master file formats, producing more
+	helpful logs, is more extendable for various types of data
+	sources, and yet much faster than the old version.  In
+	functionality the new version should be generally backwards
+	compatible to the old version, but there are some
+	incompatibilities: name fields of RDATA (in NS, SOA, etc) must
+	be absolute for now; due to the stricter checks some input that was
+	(incorrectly) accepted by the old version may now be rejected;
+	command line options and arguments are not compatible.
+	(Trac #2380, git 689b015753a9e219bc90af0a0b818ada26cc5968)
+
 529.	[func]*		team
-	The in-memory data source now uses a more complete master file
-	parser to load textual zone files.  As of this change it supports
-	multi-line RR representation and more complete support for escaped
-	and quoted strings.  It also produces more helpful log when there
-	is an error in the zone file.  It will be enhanced as more
-	specific tasks in the #2368 meta ticket are completed.  The new
-	parser is generally upper compatible to the previous one, but due
-	to the tighter checks some input that has been accepted so far
-	could now be rejected, so it's advisable to check if you use
-	textual zone files directly loaded to memory.
+	The in-memory data source now uses a more complete master
+	file parser to load textual zone files.  As of this change
+	it supports multi-line RR representation and more complete
+	support for escaped and quoted strings.  It also produces
+	more helpful log messages when there is an error in the zone
+	file.  It will be enhanced as more specific tasks in the
+	#2368 meta ticket are completed.  The new parser is generally
+	backward compatible to the previous one, but due to the
+	tighter checks some input that has been accepted so far
+	could now be rejected, so it's advisable to check if you
+	use textual zone files directly loaded to memory.
 	(Trac #2470, git c4cf36691115c15440b65cac16f1c7fcccc69521)
 
 528.	[func]		marcin

+ 3 - 5
configure.ac

@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20120817, bind10-dev@isc.org)
+AC_INIT(bind10, 20121219, bind10-dev@isc.org)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE([foreign])
 m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])dnl be backward compatible
@@ -1176,8 +1176,8 @@ AC_CONFIG_FILES([Makefile
                  src/bin/dbutil/tests/Makefile
                  src/bin/dbutil/tests/testdata/Makefile
                  src/bin/loadzone/Makefile
+                 src/bin/loadzone/tests/Makefile
                  src/bin/loadzone/tests/correct/Makefile
-                 src/bin/loadzone/tests/error/Makefile
                  src/bin/msgq/Makefile
                  src/bin/msgq/tests/Makefile
                  src/bin/auth/Makefile
@@ -1350,8 +1350,7 @@ AC_OUTPUT([doc/version.ent
            src/bin/bindctl/tests/bindctl_test
            src/bin/loadzone/run_loadzone.sh
            src/bin/loadzone/tests/correct/correct_test.sh
-           src/bin/loadzone/tests/error/error_test.sh
-           src/bin/loadzone/b10-loadzone.py
+           src/bin/loadzone/loadzone.py
            src/bin/usermgr/run_b10-cmdctl-usermgr.sh
            src/bin/usermgr/b10-cmdctl-usermgr.py
            src/bin/msgq/msgq.py
@@ -1418,7 +1417,6 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/bin/bindctl/run_bindctl.sh
            chmod +x src/bin/loadzone/run_loadzone.sh
            chmod +x src/bin/loadzone/tests/correct/correct_test.sh
-           chmod +x src/bin/loadzone/tests/error/error_test.sh
            chmod +x src/bin/sysinfo/run_sysinfo.sh
            chmod +x src/bin/usermgr/run_b10-cmdctl-usermgr.sh
            chmod +x src/bin/msgq/run_msgq.sh

+ 21 - 28
doc/guide/bind10-guide.xml

@@ -449,8 +449,10 @@ var/
 
         <listitem>
           <para>Load desired zone file(s), for example:
-            <screen>$ <userinput>b10-loadzone <replaceable>your.zone.example.org</replaceable></userinput></screen>
+            <screen>$ <userinput>b10-loadzone <replaceable>-c '{"database_file": "/usr/local/var/bind10/zone.sqlite3"}'</replaceable> <replaceable>your.zone.example.org</replaceable> <replaceable>your.zone.file</replaceable></userinput></screen>
           </para>
+	  (If you use the sqlite3 data source with the default DB
+	  file, you can omit the -c option).
         </listitem>
 
         <listitem>
@@ -501,7 +503,7 @@ var/
           </listitem>
           <listitem>
           <simpara>
-            <filename>etc/bind10-devel/</filename> &mdash;
+            <filename>etc/bind10/</filename> &mdash;
             configuration files.
           </simpara>
           </listitem>
@@ -513,7 +515,7 @@ var/
           </listitem>
           <listitem>
             <simpara>
-              <filename>libexec/bind10-devel/</filename> &mdash;
+              <filename>libexec/bind10/</filename> &mdash;
               executables that a user wouldn't normally run directly and
               are not run independently.
               These are the BIND 10 modules which are daemons started by
@@ -528,13 +530,13 @@ var/
           </listitem>
           <listitem>
             <simpara>
-              <filename>share/bind10-devel/</filename> &mdash;
+              <filename>share/bind10/</filename> &mdash;
               configuration specifications.
             </simpara>
           </listitem>
           <listitem>
             <simpara>
-              <filename>share/doc/bind10-devel/</filename> &mdash;
+              <filename>share/doc/bind10/</filename> &mdash;
               this guide and other supplementary documentation.
             </simpara>
           </listitem>
@@ -546,7 +548,7 @@ var/
           </listitem>
           <listitem>
             <simpara>
-              <filename>var/bind10-devel/</filename> &mdash;
+              <filename>var/bind10/</filename> &mdash;
               data source and configuration databases.
             </simpara>
           </listitem>
@@ -908,7 +910,7 @@ as a dependency earlier -->
         Administrators do not communicate directly with the
         <command>b10-msgq</command> daemon.
         By default, BIND 10 uses a UNIX domain socket file named
-        <filename>/usr/local/var/bind10-devel/msg_socket</filename>
+        <filename>/usr/local/var/bind10/msg_socket</filename>
         for this interprocess communication.
       </para>
 
@@ -970,7 +972,7 @@ config changes are actually commands to cfgmgr
 <!-- TODO: what about command line switch to change this? -->
       <para>
         The stored configuration file is at
-        <filename>/usr/local/var/bind10-devel/b10-config.db</filename>.
+        <filename>/usr/local/var/bind10/b10-config.db</filename>.
         (The directory is what was defined at build configure time for
         <option>--localstatedir</option>.
         The default is <filename>/usr/local/var/</filename>.)
@@ -1063,13 +1065,13 @@ but you might wanna check with likun
     <para>The HTTPS server requires a private key,
       such as a RSA PRIVATE KEY.
       The default location is at
-      <filename>/usr/local/etc/bind10-devel/cmdctl-keyfile.pem</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-keyfile.pem</filename>.
       (A sample key is at
-      <filename>/usr/local/share/bind10-devel/cmdctl-keyfile.pem</filename>.)
+      <filename>/usr/local/share/bind10/cmdctl-keyfile.pem</filename>.)
       It also uses a certificate located at
-      <filename>/usr/local/etc/bind10-devel/cmdctl-certfile.pem</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-certfile.pem</filename>.
       (A sample certificate is at
-      <filename>/usr/local/share/bind10-devel/cmdctl-certfile.pem</filename>.)
+      <filename>/usr/local/share/bind10/cmdctl-certfile.pem</filename>.)
       This may be a self-signed certificate or purchased from a
       certification authority.
     </para>
@@ -1105,11 +1107,11 @@ but that is a single file, maybe this should go back to that format?
     <para>
       The <command>b10-cmdctl</command> daemon also requires
       the user account file located at
-      <filename>/usr/local/etc/bind10-devel/cmdctl-accounts.csv</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-accounts.csv</filename>.
       This comma-delimited file lists the accounts with a user name,
       hashed password, and salt.
       (A sample file is at
-      <filename>/usr/local/share/bind10-devel/cmdctl-accounts.csv</filename>.
+      <filename>/usr/local/share/bind10/cmdctl-accounts.csv</filename>.
       It contains the user named <quote>root</quote> with the password
       <quote>bind10</quote>.)
     </para>
@@ -1139,14 +1141,14 @@ or accounts database -->
         The configuration items for <command>b10-cmdctl</command> are:
         <varname>accounts_file</varname> which defines the path to the
         user accounts database (the default is
-        <filename>/usr/local/etc/bind10-devel/cmdctl-accounts.csv</filename>);
+        <filename>/usr/local/etc/bind10/cmdctl-accounts.csv</filename>);
         <varname>cert_file</varname> which defines the path to the
         PEM certificate file (the default is
-        <filename>/usr/local/etc/bind10-devel/cmdctl-certfile.pem</filename>);
+        <filename>/usr/local/etc/bind10/cmdctl-certfile.pem</filename>);
         and
 	<varname>key_file</varname> which defines the path to the
 	PEM private key file (the default is
-        <filename>/usr/local/etc/bind10-devel/cmdctl-keyfile.pem</filename>).
+        <filename>/usr/local/etc/bind10/cmdctl-keyfile.pem</filename>).
       </para>
 
     </section>
@@ -2457,7 +2459,7 @@ can use various data source backends.
         data source &mdash; one that serves things like
         <quote>AUTHORS.BIND.</quote>. The IN class contains single SQLite3
         data source with database file located at
-        <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>.
+        <filename>/usr/local/var/bind10/zone.sqlite3</filename>.
       </para>
 
       <para>
@@ -2636,19 +2638,10 @@ can use various data source backends.
 
       </para>
 
-      <para>
-        The <option>-o</option> argument may be used to define the
-        default origin for loaded zone file records.
-      </para>
-
       <note>
       <para>
         In the current release, only the SQLite3 back
         end is used by <command>b10-loadzone</command>.
-        By default, it stores the zone data in
-        <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
-        unless the <option>-d</option> switch is used to set the
-        database filename.
         Multiple zones are stored in a single SQLite3 zone database.
       </para>
       </note>
@@ -3680,7 +3673,7 @@ mysql></screen>
          <para>
           3. Create the database tables:
           <screen>mysql> <userinput>CONNECT kea;</userinput>
-mysql> <userinput>SOURCE <replaceable>&lt;path-to-bind10&gt;</replaceable>/share/bind10-devel/dhcpdb_create.mysql</userinput></screen>
+mysql> <userinput>SOURCE <replaceable>&lt;path-to-bind10&gt;</replaceable>/share/bind10/dhcpdb_create.mysql</userinput></screen>
         </para>
          <para>
           4. Create the user under which BIND 10 will access the database and grant it access to the database tables:

+ 38 - 10
src/bin/auth/b10-auth.xml

@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>June 20, 2012</date>
+    <date>December 18, 2012</date>
   </refentryinfo>
 
   <refmeta>
@@ -100,7 +100,7 @@
       <varname>database_file</varname> defines the path to the
       SQLite3 zone file when using the sqlite datasource.
       The default is
-      <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>.
+      <filename>/usr/local/var/bind10/zone.sqlite3</filename>.
     </para>
 
     <para>
@@ -157,6 +157,7 @@
       incoming TCP connections, in milliseconds. If the query
       is not sent within this time, the connection is closed.
       Setting this to 0 will disable TCP timeouts completely.
+      The default is 5000 (five seconds).
     </para>
 
 <!-- TODO: formating -->
@@ -165,6 +166,15 @@
     </para>
 
     <para>
+      <command>getstats</command> tells <command>b10-auth</command>
+      to report its defined statistics data in JSON format.
+      It will not report about unused counters.
+      This is used by the
+      <citerefentry><refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum></citerefentry> daemon.
+      (The <command>sendstats</command> command is deprecated.)
+    </para>
+
+    <para>
       <command>loadzone</command> tells <command>b10-auth</command>
       to load or reload a zone file. The arguments include:
       <varname>class</varname> which optionally defines the class
@@ -181,13 +191,6 @@
     </para>
 
     <para>
-      <command>sendstats</command> tells <command>b10-auth</command>
-      to send its statistics data to
-      <citerefentry><refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum></citerefentry>
-      immediately.
-    </para>
-
-    <para>
       <command>shutdown</command> exits <command>b10-auth</command>.
       This has an optional <varname>pid</varname> argument to
       select the process ID to stop.
@@ -195,6 +198,28 @@
       if configured.)
     </para>
 
+    <para>
+      <command>start_ddns_forwarder</command> starts (or restarts) the
+      internal forwarding of DDNS Update messages.
+      This is used by the
+      <citerefentry><refentrytitle>b10-ddns</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+      daemon to tell <command>b10-auth</command> that DDNS Update
+      messages can be forwarded.
+      <note><simpara>This is not expected to be called by administrators;
+        it will be removed as a public command in the future.</simpara></note>
+    </para>
+
+    <para>
+      <command>stop_ddns_forwarder</command> stops the internal
+      forwarding of DDNS Update messages.
+      This is used by the
+      <citerefentry><refentrytitle>b10-ddns</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+      daemon to tell <command>b10-auth</command> that DDNS Update
+      messages should not be forwarded.
+      <note><simpara>This is not expected to be called by administrators;
+        it will be removed as a public command in the future.</simpara></note>
+    </para>
+
   </refsect1>
 
   <refsect1>
@@ -230,7 +255,7 @@
   <refsect1>
     <title>FILES</title>
     <para>
-      <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
+      <filename>/usr/local/var/bind10/zone.sqlite3</filename>
       &mdash; Location for the SQLite3 zone database
       when <emphasis>database_file</emphasis> configuration is not
       defined.
@@ -244,6 +269,9 @@
         <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
       </citerefentry>,
       <citerefentry>
+        <refentrytitle>b10-ddns</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>,
+      <citerefentry>
         <refentrytitle>b10-loadzone</refentrytitle><manvolnum>8</manvolnum>
       </citerefentry>,
       <citerefentry>

+ 1 - 1
src/bin/bind10/bind10.xml

@@ -160,7 +160,7 @@
 	    <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
             daemon to use.
             The default is
-            <filename>/usr/local/var/bind10-devel/msg_socket</filename>.
+            <filename>/usr/local/var/bind10/msg_socket</filename>.
 <!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
            </para>
          </listitem>

+ 1 - 1
src/bin/cfgmgr/b10-cfgmgr.xml

@@ -136,7 +136,7 @@
   <refsect1>
     <title>FILES</title>
 <!-- TODO: fix path -->
-    <para><filename>/usr/local/var/bind10-devel/b10-config.db</filename>
+    <para><filename>/usr/local/var/bind10/b10-config.db</filename>
       &mdash; Configuration storage file.
     </para>
   </refsect1>

+ 1 - 1
src/bin/cmdctl/b10-certgen.xml

@@ -190,7 +190,7 @@
       To update an expired certificate in BIND 10 that has been installed to
       /usr/local:
       <screen>
-$> cd /usr/local/etc/bind10-devel/
+$> cd /usr/local/etc/bind10/
 
 $> b10-certgen
 cmdctl-certfile.pem failed to verify: certificate has expired

+ 6 - 6
src/bin/cmdctl/b10-cmdctl.xml

@@ -147,21 +147,21 @@
       <varname>accounts_file</varname> defines the path to the
       user accounts database.
       The default is
-      <filename>/usr/local/etc/bind10-devel/cmdctl-accounts.csv</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-accounts.csv</filename>.
     </para>
 
     <para>
       <varname>cert_file</varname> defines the path to the
       PEM certificate file.
       The default is
-      <filename>/usr/local/etc/bind10-devel/cmdctl-certfile.pem</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-certfile.pem</filename>.
     </para>
 
     <para>
       <varname>key_file</varname> defines the path to the PEM private key
       file.
       The default is
-      <filename>/usr/local/etc/bind10-devel/cmdctl-keyfile.pem</filename>.
+      <filename>/usr/local/etc/bind10/cmdctl-keyfile.pem</filename>.
     </para>
 
 <!-- TODO: formating -->
@@ -187,17 +187,17 @@
 <!-- TODO: permissions -->
 <!-- TODO: what about multiple accounts? -->
 <!-- TODO: shouldn't the password file name say cmdctl in it? -->
-    <para><filename>/usr/local/etc/bind10-devel/cmdctl-accounts.csv</filename>
+    <para><filename>/usr/local/etc/bind10/cmdctl-accounts.csv</filename>
       &mdash; account database containing the name, hashed password,
       and the salt.
     </para>
 <!-- TODO: replace /usr/local -->
 <!-- TODO: permissions -->
 <!-- TODO: shouldn't have both in same file, will be configurable -->
-    <para><filename>/usr/local/etc/bind10-devel/cmdctl-keyfile.pem</filename>
+    <para><filename>/usr/local/etc/bind10/cmdctl-keyfile.pem</filename>
       &mdash; contains the Private key.
     </para>
-    <para><filename>/usr/local/etc/bind10-devel/cmdctl-certfile.pem</filename>
+    <para><filename>/usr/local/etc/bind10/cmdctl-certfile.pem</filename>
       &mdash; contains the Certificate.
     </para>
   </refsect1>

+ 1 - 1
src/bin/dhcp4/Makefile.am

@@ -58,6 +58,7 @@ b10_dhcp4_CXXFLAGS = -Wno-unused-parameter
 endif
 
 b10_dhcp4_LDADD  = $(top_builddir)/src/lib/dhcp/libb10-dhcp++.la
+b10_dhcp4_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/dhcpsrv/libb10-dhcpsrv.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
@@ -65,6 +66,5 @@ b10_dhcp4_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
 b10_dhcp4_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
 
-
 b10_dhcp4dir = $(pkgdatadir)
 b10_dhcp4_DATA = dhcp4.spec

+ 451 - 30
src/bin/dhcp4/config_parser.cc

@@ -13,9 +13,12 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <config/ccsession.h>
-#include <dhcpsrv/cfgmgr.h>
 #include <dhcp4/config_parser.h>
 #include <dhcp4/dhcp4_log.h>
+#include <dhcp/libdhcp++.h>
+#include <dhcp/option_definition.h>
+#include <dhcpsrv/cfgmgr.h>
+#include <util/encode/hex.h>
 #include <boost/foreach.hpp>
 #include <boost/lexical_cast.hpp>
 #include <boost/algorithm/string.hpp>
@@ -46,12 +49,20 @@ typedef std::map<std::string, ParserFactory*> FactoryMap;
 /// no subnet object created yet to store them.
 typedef std::vector<Pool4Ptr> PoolStorage;
 
+/// @brief Collection of option descriptors. This container allows searching for
+/// options using the option code or persistency flag. This is useful when merging
+/// existing options with newly configured options.
+typedef Subnet::OptionContainer OptionStorage;
+
 /// @brief Global uint32 parameters that will be used as defaults.
 Uint32Storage uint32_defaults;
 
 /// @brief global string parameters that will be used as defaults.
 StringStorage string_defaults;
 
+/// @brief Global storage for options that will be used as defaults.
+OptionStorage option_defaults;
+
 /// @brief a dummy configuration parser
 ///
 /// It is a debugging parser. It does not configure anything,
@@ -451,6 +462,344 @@ private:
     PoolStorage* pools_;
 };
 
+/// @brief Parser for option data value.
+///
+/// This parser parses configuration entries that specify value of
+/// a single option. These entries include option name, option code
+/// and data carried by the option. If parsing is successful then an
+/// instance of an option is created and added to the storage provided
+/// by the calling class.
+///
+/// @todo This class parses and validates the option name. However it is
+/// not used anywhere until support for option spaces is implemented
+/// (see tickets #2319, #2314). When option spaces are implemented
+/// there will be a way to reference the particular option using
+/// its type (code) or option name.
+class OptionDataParser : public Dhcp4ConfigParser {
+public:
+
+    /// @brief Constructor.
+    ///
+    /// Class constructor.
+    OptionDataParser(const std::string&)
+        : options_(NULL),
+          // initialize option to NULL ptr
+          option_descriptor_(false) { }
+
+    /// @brief Parses the single option data.
+    ///
+    /// This method parses the data of a single option from the configuration.
+    /// The option data includes option name, option code and data being
+    /// carried by this option. Eventually it creates the instance of the
+    /// option.
+    ///
+    /// @warning setStorage must be called with valid storage pointer prior
+    /// to calling this method.
+    ///
+    /// @param option_data_entries collection of entries that define value
+    /// for a particular option.
+    /// @throw Dhcp4ConfigError if invalid parameter specified in
+    /// the configuration.
+    /// @throw isc::InvalidOperation if failed to set storage prior to
+    /// calling build.
+    /// @throw isc::BadValue if option data storage is invalid.
+    virtual void build(ConstElementPtr option_data_entries) {
+        if (options_ == NULL) {
+            isc_throw(isc::InvalidOperation, "Parser logic error: storage must be set before "
+                      "parsing option data.");
+        }
+        BOOST_FOREACH(ConfigPair param, option_data_entries->mapValue()) {
+            ParserPtr parser;
+            if (param.first == "name") {
+                boost::shared_ptr<StringParser>
+                    name_parser(dynamic_cast<StringParser*>(StringParser::Factory(param.first)));
+                if (name_parser) {
+                    name_parser->setStorage(&string_values_);
+                    parser = name_parser;
+                }
+            } else if (param.first == "code") {
+                boost::shared_ptr<Uint32Parser>
+                    code_parser(dynamic_cast<Uint32Parser*>(Uint32Parser::Factory(param.first)));
+                if (code_parser) {
+                    code_parser->setStorage(&uint32_values_);
+                    parser = code_parser;
+                }
+            } else if (param.first == "data") {
+                boost::shared_ptr<StringParser>
+                    value_parser(dynamic_cast<StringParser*>(StringParser::Factory(param.first)));
+                if (value_parser) {
+                    value_parser->setStorage(&string_values_);
+                    parser = value_parser;
+                }
+            } else {
+                isc_throw(Dhcp4ConfigError,
+                          "Parser error: option-data parameter not supported: "
+                          << param.first);
+            }
+            parser->build(param.second);
+        }
+        // Try to create the option instance.
+        createOption();
+    }
+
+    /// @brief Commits option value.
+    ///
+    /// This function adds a new option to the storage or replaces an existing option
+    /// with the same code.
+    ///
+    /// @throw isc::InvalidOperation if failed to set pointer to storage or failed
+    /// to call build() prior to commit. If that happens data in the storage
+    /// remain un-modified.
+    virtual void commit() {
+        if (options_ == NULL) {
+            isc_throw(isc::InvalidOperation, "Parser logic error: storage must be set before "
+                      "commiting option data.");
+        } else  if (!option_descriptor_.option) {
+            // Before we can commit the new option should be configured. If it is not
+            // than somebody must have called commit() before build().
+            isc_throw(isc::InvalidOperation, "Parser logic error: no option has been configured and"
+                      " thus there is nothing to commit. Has build() been called?");
+        }
+        uint16_t opt_type = option_descriptor_.option->getType();
+        Subnet::OptionContainerTypeIndex& idx = options_->get<1>();
+        // Try to find options with the particular option code in the main
+        // storage. If found, remove these options because they will be
+        // replaced with new one.
+        Subnet::OptionContainerTypeRange range =
+            idx.equal_range(opt_type);
+        if (std::distance(range.first, range.second) > 0) {
+            idx.erase(range.first, range.second);
+        }
+        // Append new option to the main storage.
+        options_->push_back(option_descriptor_);
+    }
+
+    /// @brief Set storage for the parser.
+    ///
+    /// Sets storage for the parser. This storage points to the
+    /// vector of options and is used by multiple instances of
+    /// OptionDataParser. Each instance creates exactly one object
+    /// of dhcp::Option or derived type and appends it to this
+    /// storage.
+    ///
+    /// @param storage pointer to the options storage
+    void setStorage(OptionStorage* storage) {
+        options_ = storage;
+    }
+
+private:
+
+    /// @brief Create option instance.
+    ///
+    /// Creates an instance of an option and adds it to the provided
+    /// options storage. If the option data parsed by \ref build function
+    /// are invalid or insufficient this function emits an exception.
+    ///
+    /// @warning this function does not check if options_ storage pointer
+    /// is intitialized but this check is not needed here because it is done
+    /// in the \ref build function.
+    ///
+    /// @throw Dhcp4ConfigError if parameters provided in the configuration
+    /// are invalid.
+    void createOption() {
+        // Option code is held in the uint32_t storage but is supposed to
+        // be uint16_t value. We need to check that value in the configuration
+        // does not exceed range of uint16_t and is not zero.
+        uint32_t option_code = getUint32Param("code");
+        if (option_code == 0) {
+            isc_throw(Dhcp4ConfigError, "Parser error: value of 'code' must not"
+                      << " be equal to zero. Option code '0' is reserved in"
+                      << " DHCPv4.");
+        } else if (option_code > std::numeric_limits<uint16_t>::max()) {
+            isc_throw(Dhcp4ConfigError, "Parser error: value of 'code' must not"
+                      << " exceed " << std::numeric_limits<uint16_t>::max());
+        }
+        // Check that the option name has been specified, is non-empty and does not
+        // contain spaces.
+        // @todo possibly some more restrictions apply here?
+        std::string option_name = getStringParam("name");
+        if (option_name.empty()) {
+            isc_throw(Dhcp4ConfigError, "Parser error: option name must not be"
+                      << " empty");
+        } else if (option_name.find(" ") != std::string::npos) {
+            isc_throw(Dhcp4ConfigError, "Parser error: option name must not contain"
+                      << " spaces");
+        }
+
+        // Get option data from the configuration database ('data' field).
+        // Option data is specified by the user as case insensitive string
+        // of hexadecimal digits for each option.
+        std::string option_data = getStringParam("data");
+        // Transform string of hexadecimal digits into binary format.
+        std::vector<uint8_t> binary;
+        try {
+            util::encode::decodeHex(option_data, binary);
+        } catch (...) {
+            isc_throw(Dhcp4ConfigError, "Parser error: option data is not a valid"
+                      << " string of hexadecimal digits: " << option_data);
+        }
+        // Get all existing DHCPv4 option definitions. The one that matches
+        // our option will be picked and used to create it.
+        OptionDefContainer option_defs = LibDHCP::getOptionDefs(Option::V4);
+        // Get search index #1. It allows searching for options definitions
+        // using option type value.
+        const OptionDefContainerTypeIndex& idx = option_defs.get<1>();
+        // Get all option definitions matching option code we want to create.
+        const OptionDefContainerTypeRange& range = idx.equal_range(option_code);
+        size_t num_defs = std::distance(range.first, range.second);
+        OptionPtr option;
+        // Currently we do not allow duplicated definitions and if there are
+        // any duplicates we issue internal server error.
+        if (num_defs > 1) {
+            isc_throw(Dhcp4ConfigError, "Internal error: currently it is not"
+                      << " supported to initialize multiple option definitions"
+                      << " for the same option code. This will be supported once"
+                      << " there option spaces are implemented.");
+        } else if (num_defs == 0) {
+            // @todo We have a limited set of option definitions intiialized at the moment.
+            // In the future we want to initialize option definitions for all options.
+            // Consequently an error will be issued if an option definition does not exist
+            // for a particular option code. For now it is ok to create generic option
+            // if definition does not exist.
+            OptionPtr option(new Option(Option::V4, static_cast<uint16_t>(option_code),
+                                        binary));
+            // The created option is stored in option_descriptor_ class member until the
+            // commit stage when it is inserted into the main storage. If an option with the
+            // same code exists in main storage already the old option is replaced.
+            option_descriptor_.option = option;
+            option_descriptor_.persistent = false;
+        } else {
+            // We have exactly one option definition for the particular option code
+            // use it to create the option instance.
+            const OptionDefinitionPtr& def = *(range.first);
+            try {
+                OptionPtr option = def->optionFactory(Option::V4, option_code, binary);
+                Subnet::OptionDescriptor desc(option, false);
+                option_descriptor_.option = option;
+                option_descriptor_.persistent = false;
+            } catch (const isc::Exception& ex) {
+                isc_throw(Dhcp4ConfigError, "Parser error: option data does not match"
+                          << " option definition (code " << option_code << "): "
+                          << ex.what());
+            }
+        }
+    }
+
+    /// @brief Get a parameter from the strings storage.
+    ///
+    /// @param param_id parameter identifier.
+    /// @throw Dhcp4ConfigError if parameter has not been found.
+    std::string getStringParam(const std::string& param_id) const {
+        StringStorage::const_iterator param = string_values_.find(param_id);
+        if (param == string_values_.end()) {
+            isc_throw(Dhcp4ConfigError, "Parser error: option-data parameter"
+                      << " '" << param_id << "' not specified");
+        }
+        return (param->second);
+    }
+
+    /// @brief Get a parameter from the uint32 values storage.
+    ///
+    /// @param param_id parameter identifier.
+    /// @throw Dhcp4ConfigError if parameter has not been found.
+    uint32_t getUint32Param(const std::string& param_id) const {
+        Uint32Storage::const_iterator param = uint32_values_.find(param_id);
+        if (param == uint32_values_.end()) {
+            isc_throw(Dhcp4ConfigError, "Parser error: option-data parameter"
+                      << " '" << param_id << "' not specified");
+        }
+        return (param->second);
+    }
+
+    /// Storage for uint32 values (e.g. option code).
+    Uint32Storage uint32_values_;
+    /// Storage for string values (e.g. option name or data).
+    StringStorage string_values_;
+    /// Pointer to options storage. This storage is provided by
+    /// the calling class and is shared by all OptionDataParser objects.
+    OptionStorage* options_;
+    /// Option descriptor holds newly configured option.
+    Subnet::OptionDescriptor option_descriptor_;
+};
+
+/// @brief Parser for option data values within a subnet.
+///
+/// This parser iterates over all entries that define options
+/// data for a particular subnet and creates a collection of options.
+/// If parsing is successful, all these options are added to the Subnet
+/// object.
+class OptionDataListParser : public Dhcp4ConfigParser {
+public:
+
+    /// @brief Constructor.
+    ///
+    /// Unless otherwise specified, parsed options will be stored in
+    /// a global option container (option_default). That storage location
+    /// is overriden on a subnet basis.
+    OptionDataListParser(const std::string&)
+        : options_(&option_defaults), local_options_() { }
+
+    /// @brief Parses entries that define options' data for a subnet.
+    ///
+    /// This method iterates over all entries that define option data
+    /// for options within a single subnet and creates options' instances.
+    ///
+    /// @param option_data_list pointer to a list of options' data sets.
+    /// @throw Dhcp4ConfigError if option parsing failed.
+    void build(ConstElementPtr option_data_list) {
+        BOOST_FOREACH(ConstElementPtr option_value, option_data_list->listValue()) {
+            boost::shared_ptr<OptionDataParser> parser(new OptionDataParser("option-data"));
+            // options_ member will hold instances of all options thus
+            // each OptionDataParser takes it as a storage.
+            parser->setStorage(&local_options_);
+            // Build the instance of a single option.
+            parser->build(option_value);
+            // Store a parser as it will be used to commit.
+            parsers_.push_back(parser);
+        }
+    }
+
+    /// @brief Set storage for option instances.
+    ///
+    /// @param storage pointer to options storage.
+    void setStorage(OptionStorage* storage) {
+        options_ = storage;
+    }
+
+
+    /// @brief Commit all option values.
+    ///
+    /// This function invokes commit for all option values.
+    void commit() {
+        BOOST_FOREACH(ParserPtr parser, parsers_) {
+            parser->commit();
+        }
+        // Parsing was successful and we have all configured
+        // options in local storage. We can now replace old values
+        // with new values.
+        std::swap(local_options_, *options_);
+    }
+
+    /// @brief Create DhcpDataListParser object
+    ///
+    /// @param param_name param name.
+    ///
+    /// @return DhcpConfigParser object.
+    static Dhcp4ConfigParser* Factory(const std::string& param_name) {
+        return (new OptionDataListParser(param_name));
+    }
+
+    /// Intermediate option storage. This storage is used by
+    /// lower level parsers to add new options.  Values held
+    /// in this storage are assigned to main storage (options_)
+    /// if overall parsing was successful.
+    OptionStorage local_options_;
+    /// Pointer to options instances storage.
+    OptionStorage* options_;
+    /// Collection of parsers;
+    ParserCollection parsers_;
+};
+
 /// @brief this class parses a single subnet
 ///
 /// This class parses the whole subnet definition. It creates parsers
@@ -470,35 +819,31 @@ public:
     void build(ConstElementPtr subnet) {
 
         BOOST_FOREACH(ConfigPair param, subnet->mapValue()) {
-
             ParserPtr parser(createSubnet4ConfigParser(param.first));
-
-            // if this is an Uint32 parser, tell it to store the values
-            // in values_, rather than in global storage
-            boost::shared_ptr<Uint32Parser> uint_parser =
-                boost::dynamic_pointer_cast<Uint32Parser>(parser);
-            if (uint_parser) {
-                uint_parser->setStorage(&uint32_values_);
-            } else {
-
-                boost::shared_ptr<StringParser> string_parser =
-                    boost::dynamic_pointer_cast<StringParser>(parser);
-                if (string_parser) {
-                    string_parser->setStorage(&string_values_);
-                } else {
-
-                    boost::shared_ptr<PoolParser> pool_parser =
-                        boost::dynamic_pointer_cast<PoolParser>(parser);
-                    if (pool_parser) {
-                        pool_parser->setStorage(&pools_);
-                    }
-                }
+            // The actual type of the parser is unknown here. We have to discover
+            // the parser type here to invoke the corresponding setStorage function
+            // on it.  We discover parser type by trying to cast the parser to various
+            // parser types and checking which one was successful. For this one
+            // a setStorage and build methods are invoked.
+
+            // Try uint32 type parser.
+            if (!buildParser<Uint32Parser, Uint32Storage >(parser, uint32_values_,
+                                                          param.second) &&
+                // Try string type parser.
+                !buildParser<StringParser, StringStorage >(parser, string_values_,
+                                                           param.second) &&
+                // Try pool parser.
+                !buildParser<PoolParser, PoolStorage >(parser, pools_,
+                                                       param.second) &&
+                // Try option data parser.
+                !buildParser<OptionDataListParser, OptionStorage >(parser, options_,
+                                                                   param.second)) {
+                // Appropriate parsers are created in the createSubnet6ConfigParser
+                // and they should be limited to those that we check here for. Thus,
+                // if we fail to find a matching parser here it is a programming error.
+                isc_throw(Dhcp4ConfigError, "failed to find suitable parser");
             }
-
-            parser->build(param.second);
-            parsers_.push_back(parser);
         }
-
         // Ok, we now have subnet parsed
     }
 
@@ -510,6 +855,10 @@ public:
     /// objects. Subnet4 are then added to DHCP CfgMgr.
     /// @throw Dhcp4ConfigError if there are any issues encountered during commit
     void commit() {
+        // Invoke commit on all sub-data parsers.
+        BOOST_FOREACH(ParserPtr parser, parsers_) {
+            parser->commit();
+        }
 
         StringStorage::const_iterator it = string_values_.find("subnet");
         if (it == string_values_.end()) {
@@ -545,11 +894,79 @@ public:
             subnet->addPool4(*it);
         }
 
+        const Subnet::OptionContainer& options = subnet->getOptions();
+        const Subnet::OptionContainerTypeIndex& idx = options.get<1>();
+
+        // Add subnet specific options.
+        BOOST_FOREACH(Subnet::OptionDescriptor desc, options_) {
+            Subnet::OptionContainerTypeRange range = idx.equal_range(desc.option->getType());
+            if (std::distance(range.first, range.second) > 0) {
+                LOG_WARN(dhcp4_logger, DHCP4_CONFIG_OPTION_DUPLICATE)
+                    .arg(desc.option->getType()).arg(addr.toText());
+            }
+            subnet->addOption(desc.option);
+        }
+
+        // Check all global options and add them to the subnet object if
+        // they have been configured in the global scope. If they have been
+        // configured in the subnet scope we don't add global option because
+        // the one configured in the subnet scope always takes precedence.
+        BOOST_FOREACH(Subnet::OptionDescriptor desc, option_defaults) {
+            // Get all options specified locally in the subnet and having
+            // code equal to global option's code.
+            Subnet::OptionContainerTypeRange range = idx.equal_range(desc.option->getType());
+            // @todo: In the future we will be searching for options using either
+            // an option code or namespace. Currently we have only the option
+            // code available so if there is at least one option found with the
+            // specific code we don't add the globally configured option.
+            // @todo with this code the first globally configured option
+            // with the given code will be added to a subnet. We may
+            // want to issue a warning about dropping the configuration of
+            // a global option if one already exsists.
+            if (std::distance(range.first, range.second) == 0) {
+                subnet->addOption(desc.option);
+            }
+        }
+
         CfgMgr::instance().addSubnet4(subnet);
     }
 
 private:
 
+    /// @brief Set storage for a parser and invoke build.
+    ///
+    /// This helper method casts the provided parser pointer to the specified
+    /// type. If the cast is successful it sets the corresponding storage for
+    /// this parser, invokes build on it and saves the parser.
+    ///
+    /// @tparam T parser type to which parser argument should be cast.
+    /// @tparam Y storage type for the specified parser type.
+    /// @param parser parser on which build must be invoked.
+    /// @param storage reference to a storage that will be set for a parser.
+    /// @param subnet subnet element read from the configuration and being parsed.
+    /// @return true if parser pointer was successfully cast to specialized
+    /// parser type provided as Y.
+    template<typename T, typename Y>
+    bool buildParser(const ParserPtr& parser, Y& storage, const ConstElementPtr& subnet) {
+        // We need to cast to T in order to set storage for the parser.
+        boost::shared_ptr<T> cast_parser = boost::dynamic_pointer_cast<T>(parser);
+        // It is common that this cast is not successful because we try to cast to all
+        // supported parser types as we don't know the type of a parser in advance.
+        if (cast_parser) {
+            // Cast, successful so we go ahead with setting storage and actual parse.
+            cast_parser->setStorage(&storage);
+            parser->build(subnet);
+            parsers_.push_back(parser);
+            // We indicate that cast was successful so as the calling function
+            // may skip attempts to cast to other parser types and proceed to
+            // next element.
+            return (true);
+        }
+        // It was not successful. Indicate that another parser type
+        // should be tried.
+        return (false);
+    }
+
     /// @brief creates parsers for entries in subnet definition
     ///
     /// @todo Add subnet-specific things here (e.g. subnet-specific options)
@@ -565,6 +982,7 @@ private:
         factories["rebind-timer"] = Uint32Parser::Factory;
         factories["subnet"] = StringParser::Factory;
         factories["pool"] = PoolParser::Factory;
+        factories["option-data"] = OptionDataListParser::Factory;
 
         FactoryMap::iterator f = factories.find(config_id);
         if (f == factories.end()) {
@@ -620,6 +1038,9 @@ private:
     /// storage for pools belonging to this subnet
     PoolStorage pools_;
 
+    /// storage for options belonging to this subnet
+    OptionStorage options_;
+
     /// parsers are stored here
     ParserCollection parsers_;
 };
@@ -650,7 +1071,6 @@ public:
         // used: Subnet4ConfigParser
 
         BOOST_FOREACH(ConstElementPtr subnet, subnets_list->listValue()) {
-
             ParserPtr parser(new Subnet4ConfigParser("subnet"));
             parser->build(subnet);
             subnets_.push_back(parser);
@@ -702,6 +1122,7 @@ Dhcp4ConfigParser* createGlobalDhcp4ConfigParser(const std::string& config_id) {
     factories["rebind-timer"] = Uint32Parser::Factory;
     factories["interface"] = InterfaceListConfigParser::Factory;
     factories["subnet4"] = Subnets4ListConfigParser::Factory;
+    factories["option-data"] = OptionDataListParser::Factory;
     factories["version"] = StringParser::Factory;
 
     FactoryMap::iterator f = factories.find(config_id);
@@ -739,7 +1160,7 @@ configureDhcp4Server(Dhcpv4Srv& , ConstElementPtr config_set) {
         }
     } catch (const isc::Exception& ex) {
         ConstElementPtr answer = isc::config::createAnswer(1,
-                                 string("Configuration parsing failed:") + ex.what());
+                                 string("Configuration parsing failed: ") + ex.what());
         return (answer);
     } catch (...) {
         // for things like bad_cast in boost::lexical_cast
@@ -754,7 +1175,7 @@ configureDhcp4Server(Dhcpv4Srv& , ConstElementPtr config_set) {
     }
     catch (const isc::Exception& ex) {
         ConstElementPtr answer = isc::config::createAnswer(2,
-                                 string("Configuration commit failed:") + ex.what());
+                                 string("Configuration commit failed: ") + ex.what());
         return (answer);
     } catch (...) {
         // for things like bad_cast in boost::lexical_cast

+ 1 - 0
src/bin/dhcp4/config_parser.h

@@ -14,6 +14,7 @@
 
 #include <exceptions/exceptions.h>
 #include <cc/data.h>
+#include <stdint.h>
 #include <string>
 
 #ifndef DHCP4_CONFIG_PARSER_H

+ 65 - 3
src/bin/dhcp4/dhcp4.spec

@@ -34,6 +34,37 @@
         "item_default": 4000
       },
 
+      { "item_name": "option-data",
+        "item_type": "list",
+        "item_optional": false,
+        "item_default": [],
+        "list_item_spec":
+        {
+          "item_name": "single-option-data",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+          {
+            "item_name": "name",
+            "item_type": "string",
+            "item_optional": false,
+            "item_default": ""
+          },
+
+          { "item_name": "code",
+            "item_type": "integer",
+            "item_optional": false,
+            "item_default": 0
+          },
+          { "item_name": "data",
+            "item_type": "string",
+            "item_optional": false,
+            "item_default": ""
+          } ]
+        }
+      },
+
       { "item_name": "subnet4",
         "item_type": "list",
         "item_optional": false,
@@ -80,9 +111,40 @@
                         "item_optional": false,
                         "item_default": ""
                     }
-                }
-            ]
-        }
+                },
+
+                { "item_name": "option-data",
+                  "item_type": "list",
+                  "item_optional": false,
+                  "item_default": [],
+                  "list_item_spec":
+                  {
+                    "item_name": "single-option-data",
+                    "item_type": "map",
+                    "item_optional": false,
+                    "item_default": {},
+                    "map_item_spec": [
+                    {
+                      "item_name": "name",
+                      "item_type": "string",
+                      "item_optional": false,
+                      "item_default": ""
+                    },
+                    {
+                      "item_name": "code",
+                      "item_type": "integer",
+                      "item_optional": false,
+                      "item_default": 0
+                    },
+                    {
+                      "item_name": "data",
+                      "item_type": "string",
+                      "item_optional": false,
+                      "item_default": ""
+                    } ]
+                  }
+                } ]
+         }
       }
     ],
     "commands": [

+ 16 - 11
src/bin/dhcp4/dhcp4_messages.mes

@@ -26,29 +26,34 @@ to establish a session with the BIND 10 control channel.
 A debug message listing the command (and possible arguments) received
 from the BIND 10 control system by the IPv4 DHCP server.
 
+% DHCP4_CONFIG_COMPLETE DHCPv4 server has completed configuration: %1
+This is an informational message announcing the successful processing of a
+new configuration. it is output during server startup, and when an updated
+configuration is committed by the administrator.  Additional information
+may be provided.
+
 % DHCP4_CONFIG_LOAD_FAIL failed to load configuration: %1
 This critical error message indicates that the initial DHCPv4
 configuration has failed. The server will start, but nothing will be
 served until the configuration has been corrected.
 
-% DHCP4_CONFIG_UPDATE updated configuration received: %1
-A debug message indicating that the IPv4 DHCP server has received an
-updated configuration from the BIND 10 configuration system.
+% DHCP4_CONFIG_NEW_SUBNET A new subnet has been added to configuration: %1
+This is an informational message reporting that the configuration has
+been extended to include the specified IPv4 subnet.
 
 % DHCP4_CONFIG_START DHCPv4 server is processing the following configuration: %1
 This is a debug message that is issued every time the server receives a
 configuration. That happens at start up and also when a server configuration
 change is committed by the administrator.
 
-% DHCP4_CONFIG_NEW_SUBNET A new subnet has been added to configuration: %1
-This is an informational message reporting that the configuration has
-been extended to include the specified IPv4 subnet.
+% DHCP4_CONFIG_UPDATE updated configuration received: %1
+A debug message indicating that the IPv4 DHCP server has received an
+updated configuration from the BIND 10 configuration system.
 
-% DHCP4_CONFIG_COMPLETE DHCPv4 server has completed configuration: %1
-This is an informational message announcing the successful processing of a
-new configuration. it is output during server startup, and when an updated
-configuration is committed by the administrator.  Additional information
-may be provided.
+% DHCP4_CONFIG_OPTION_DUPLICATE multiple options with the code: %1 added to the subnet: %2
+This warning message is issued on an attempt to configure multiple options with the
+same option code for the particular subnet. Adding multiple options is uncommon
+for DHCPv4, but it is not prohibited.
 
 % DHCP4_NOT_RUNNING IPv4 DHCP server is not running
 A warning message is issued when an attempt is made to shut down the

+ 3 - 3
src/bin/dhcp4/tests/Makefile.am

@@ -66,13 +66,13 @@ dhcp4_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 dhcp4_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 dhcp4_unittests_LDADD = $(GTEST_LDADD)
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
+dhcp4_unittests_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
+dhcp4_unittests_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libb10-dhcp++.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/dhcpsrv/libb10-dhcpsrv.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
 dhcp4_unittests_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
-dhcp4_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
-dhcp4_unittests_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
-dhcp4_unittests_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
+dhcp4_unittests_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
 endif
 
 noinst_PROGRAMS = $(TESTS)

+ 476 - 0
src/bin/dhcp4/tests/config_parser_unittest.cc

@@ -22,6 +22,7 @@
 #include <config/ccsession.h>
 #include <dhcpsrv/subnet.h>
 #include <dhcpsrv/cfgmgr.h>
+#include <boost/foreach.hpp>
 #include <iostream>
 #include <fstream>
 #include <sstream>
@@ -73,9 +74,188 @@ public:
     }
 
     ~Dhcp4ParserTest() {
+        resetConfiguration();
         delete srv_;
     };
 
+    /// @brief Create the simple configuration with single option.
+    ///
+    /// This function allows to set one of the parameters that configure
+    /// option value. These parameters are: "name", "code" and "data".
+    ///
+    /// @param param_value string holiding option parameter value to be
+    /// injected into the configuration string.
+    /// @param parameter name of the parameter to be configured with
+    /// param value.
+    /// @return configuration string containing custom values of parameters
+    /// describing an option.
+    std::string createConfigWithOption(const std::string& param_value,
+                                       const std::string& parameter) {
+        std::map<std::string, std::string> params;
+        if (parameter == "name") {
+            params["name"] = param_value;
+            params["code"] = "56";
+            params["data"] = "AB CDEF0105";
+        } else if (parameter == "code") {
+            params["name"] = "option_foo";
+            params["code"] = param_value;
+            params["data"] = "AB CDEF0105";
+        } else if (parameter == "data") {
+            params["name"] = "option_foo";
+            params["code"] = "56";
+            params["data"] = param_value;
+        }
+        return (createConfigWithOption(params));
+    }
+
+    /// @brief Create simple configuration with single option.
+    ///
+    /// This function creates a configuration for a single option with
+    /// custom values for all parameters that describe the option.
+    ///
+    /// @params params map holding parameters and their values.
+    /// @return configuration string containing custom values of parameters
+    /// describing an option.
+    std::string createConfigWithOption(const std::map<std::string, std::string>& params) {
+        std::ostringstream stream;
+        stream << "{ \"interface\": [ \"all\" ],"
+            "\"rebind-timer\": 2000, "
+            "\"renew-timer\": 1000, "
+            "\"subnet4\": [ { "
+            "    \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+            "    \"subnet\": \"192.0.2.0/24\", "
+            "    \"option-data\": [ {";
+        bool first = true;
+        typedef std::pair<std::string, std::string> ParamPair;
+        BOOST_FOREACH(ParamPair param, params) {
+            if (!first) {
+                stream << ", ";
+            } else {
+                // cppcheck-suppress unreadVariable
+                first = false;
+            }
+            if (param.first == "name") {
+                stream << "\"name\": \"" << param.second << "\"";
+            } else if (param.first == "code") {
+                stream << "\"code\": " << param.second << "";
+            } else if (param.first == "data") {
+                stream << "\"data\": \"" << param.second << "\"";
+            }
+        }
+        stream <<
+            "        } ]"
+            " } ],"
+            "\"valid-lifetime\": 4000 }";
+        return (stream.str());
+    }
+
+    /// @brief Test invalid option parameter value.
+    ///
+    /// This test function constructs the simple configuration
+    /// string and injects invalid option configuration into it.
+    /// It expects that parser will fail with provided option code.
+    ///
+    /// @param param_value string holding invalid option parameter value
+    /// to be injected into configuration string.
+    /// @param parameter name of the parameter to be configured with
+    /// param_value (can be any of "name", "code", "data")
+    void testInvalidOptionParam(const std::string& param_value,
+                                const std::string& parameter) {
+        ConstElementPtr x;
+        std::string config = createConfigWithOption(param_value, parameter);
+        ElementPtr json = Element::fromJSON(config);
+        EXPECT_NO_THROW(x = configureDhcp4Server(*srv_, json));
+        ASSERT_TRUE(x);
+        comment_ = parseAnswer(rcode_, x);
+        ASSERT_EQ(1, rcode_);
+    }
+
+    /// @brief Test option against given code and data.
+    ///
+    /// @param option_desc option descriptor that carries the option to
+    /// be tested.
+    /// @param expected_code expected code of the option.
+    /// @param expected_data expected data in the option.
+    /// @param expected_data_len length of the reference data.
+    /// @param extra_data if true extra data is allowed in an option
+    /// after tested data.
+    void testOption(const Subnet::OptionDescriptor& option_desc,
+                    uint16_t expected_code, const uint8_t* expected_data,
+                    size_t expected_data_len,
+                    bool extra_data = false) {
+        // Check if option descriptor contains valid option pointer.
+        ASSERT_TRUE(option_desc.option);
+        // Verify option type.
+        EXPECT_EQ(expected_code, option_desc.option->getType());
+        // We may have many different option types being created. Some of them
+        // have dedicated classes derived from Option class. In such case if
+        // we want to verify the option contents against expected_data we have
+        // to prepare raw buffer with the contents of the option. The easiest
+        // way is to call pack() which will prepare on-wire data.
+        util::OutputBuffer buf(option_desc.option->getData().size());
+        option_desc.option->pack(buf);
+        if (extra_data) {
+            // The length of the buffer must be at least equal to size of the
+            // reference data but it can sometimes be greater than that. This is
+            // because some options carry suboptions that increase the overall
+            // length.
+            ASSERT_GE(buf.getLength() - option_desc.option->getHeaderLen(),
+                      expected_data_len);
+        } else {
+            ASSERT_EQ(buf.getLength() - option_desc.option->getHeaderLen(),
+                      expected_data_len);
+        }
+        // Verify that the data is correct. Do not verify suboptions and a header.
+        const uint8_t* data = static_cast<const uint8_t*>(buf.getData());
+        EXPECT_EQ(0, memcmp(expected_data, data + option_desc.option->getHeaderLen(),
+                            expected_data_len));
+    }
+
+    /// @brief Reset configuration database.
+    ///
+    /// This function resets configuration data base by
+    /// removing all subnets and option-data. Reset must
+    /// be performed after each test to make sure that
+    /// contents of the database do not affect result of
+    /// subsequent tests.
+    void resetConfiguration() {
+        ConstElementPtr status;
+
+        string config = "{ \"interface\": [ \"all\" ],"
+            "\"rebind-timer\": 2000, "
+            "\"renew-timer\": 1000, "
+            "\"valid-lifetime\": 4000, "
+            "\"subnet4\": [ ], "
+            "\"option-data\": [ ] }";
+
+        try {
+            ElementPtr json = Element::fromJSON(config);
+            status = configureDhcp4Server(*srv_, json);
+        } catch (const std::exception& ex) {
+            FAIL() << "Fatal error: unable to reset configuration database"
+                   << " after the test. The following configuration was used"
+                   << " to reset database: " << std::endl
+                   << config << std::endl
+                   << " and the following error message was returned:"
+                   << ex.what() << std::endl;
+        }
+
+        // status object must not be NULL
+        if (!status) {
+            FAIL() << "Fatal error: unable to reset configuration database"
+                   << " after the test. Configuration function returned"
+                   << " NULL pointer" << std::endl;
+        }
+
+        comment_ = parseAnswer(rcode_, status);
+        // returned value should be 0 (configuration success)
+        if (rcode_ != 0) {
+            FAIL() << "Fatal error: unable to reset configuration database"
+                   << " after the test. Configuration function returned"
+                   << " error code " << rcode_ << std::endl;
+        }
+    }
+
     Dhcpv4Srv* srv_;
 
     int rcode_;
@@ -248,6 +428,302 @@ TEST_F(Dhcp4ParserTest, poolPrefixLen) {
     EXPECT_EQ(4000, subnet->getValid());
 }
 
+// Goal of this test is to verify that global option
+// data is configured for the subnet if the subnet
+// configuration does not include options configuration.
+TEST_F(Dhcp4ParserTest, optionDataDefaults) {
+    ConstElementPtr x;
+    string config = "{ \"interface\": [ \"all\" ],"
+        "\"rebind-timer\": 2000,"
+        "\"renew-timer\": 1000,"
+        "\"option-data\": [ {"
+        "    \"name\": \"option_foo\","
+        "    \"code\": 56,"
+        "    \"data\": \"AB CDEF0105\""
+        " },"
+        " {"
+        "    \"name\": \"option_foo2\","
+        "    \"code\": 23,"
+        "    \"data\": \"01\""
+        " } ],"
+        "\"subnet4\": [ { "
+        "    \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+        "    \"subnet\": \"192.0.2.0/24\""
+        " } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    EXPECT_NO_THROW(x = configureDhcp4Server(*srv_, json));
+    ASSERT_TRUE(x);
+    comment_ = parseAnswer(rcode_, x);
+    ASSERT_EQ(0, rcode_);
+
+    Subnet4Ptr subnet = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.200"));
+    ASSERT_TRUE(subnet);
+    const Subnet::OptionContainer& options = subnet->getOptions();
+    ASSERT_EQ(2, options.size());
+
+    // Get the search index. Index #1 is to search using option code.
+    const Subnet::OptionContainerTypeIndex& idx = options.get<1>();
+
+    // Get the options for specified index. Expecting one option to be
+    // returned but in theory we may have multiple options with the same
+    // code so we get the range.
+    std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
+              Subnet::OptionContainerTypeIndex::const_iterator> range =
+        idx.equal_range(56);
+    // Expect single option with the code equal to 56.
+    ASSERT_EQ(1, std::distance(range.first, range.second));
+    const uint8_t foo_expected[] = {
+        0xAB, 0xCD, 0xEF, 0x01, 0x05
+    };
+    // Check if option is valid in terms of code and carried data.
+    testOption(*range.first, 56, foo_expected, sizeof(foo_expected));
+
+    range = idx.equal_range(23);
+    ASSERT_EQ(1, std::distance(range.first, range.second));
+    // Do another round of testing with second option.
+    const uint8_t foo2_expected[] = {
+        0x01
+    };
+    testOption(*range.first, 23, foo2_expected, sizeof(foo2_expected));
+}
+
+// Goal of this test is to verify options configuration
+// for a single subnet. In particular this test checks
+// that local options configuration overrides global
+// option setting.
+TEST_F(Dhcp4ParserTest, optionDataInSingleSubnet) {
+    ConstElementPtr x;
+    string config = "{ \"interface\": [ \"all\" ],"
+        "\"rebind-timer\": 2000, "
+        "\"renew-timer\": 1000, "
+        "\"option-data\": [ {"
+        "      \"name\": \"option_foo\","
+        "      \"code\": 56,"
+        "      \"data\": \"AB\""
+        " } ],"
+        "\"subnet4\": [ { "
+        "    \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+        "    \"subnet\": \"192.0.2.0/24\", "
+        "    \"option-data\": [ {"
+        "          \"name\": \"option_foo\","
+        "          \"code\": 56,"
+        "          \"data\": \"AB CDEF0105\""
+        "        },"
+        "        {"
+        "          \"name\": \"option_foo2\","
+        "          \"code\": 23,"
+        "          \"data\": \"01\""
+        "        } ]"
+        " } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    EXPECT_NO_THROW(x = configureDhcp4Server(*srv_, json));
+    ASSERT_TRUE(x);
+    comment_ = parseAnswer(rcode_, x);
+    ASSERT_EQ(0, rcode_);
+
+    Subnet4Ptr subnet = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.24"));
+    ASSERT_TRUE(subnet);
+    const Subnet::OptionContainer& options = subnet->getOptions();
+    ASSERT_EQ(2, options.size());
+
+    // Get the search index. Index #1 is to search using option code.
+    const Subnet::OptionContainerTypeIndex& idx = options.get<1>();
+
+    // Get the options for specified index. Expecting one option to be
+    // returned but in theory we may have multiple options with the same
+    // code so we get the range.
+    std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
+              Subnet::OptionContainerTypeIndex::const_iterator> range =
+        idx.equal_range(56);
+    // Expect single option with the code equal to 100.
+    ASSERT_EQ(1, std::distance(range.first, range.second));
+    const uint8_t foo_expected[] = {
+        0xAB, 0xCD, 0xEF, 0x01, 0x05
+    };
+    // Check if option is valid in terms of code and carried data.
+    testOption(*range.first, 56, foo_expected, sizeof(foo_expected));
+
+    range = idx.equal_range(23);
+    ASSERT_EQ(1, std::distance(range.first, range.second));
+    // Do another round of testing with second option.
+    const uint8_t foo2_expected[] = {
+        0x01
+    };
+    testOption(*range.first, 23, foo2_expected, sizeof(foo2_expected));
+}
+
+// Goal of this test is to verify options configuration
+// for multiple subnets.
+TEST_F(Dhcp4ParserTest, optionDataInMultipleSubnets) {
+    ConstElementPtr x;
+    string config = "{ \"interface\": [ \"all\" ],"
+        "\"rebind-timer\": 2000, "
+        "\"renew-timer\": 1000, "
+        "\"subnet4\": [ { "
+        "    \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+        "    \"subnet\": \"192.0.2.0/24\", "
+        "    \"option-data\": [ {"
+        "          \"name\": \"option_foo\","
+        "          \"code\": 56,"
+        "          \"data\": \"0102030405060708090A\""
+        "        } ]"
+        " },"
+        " {"
+        "    \"pool\": [ \"192.0.3.101 - 192.0.3.150\" ],"
+        "    \"subnet\": \"192.0.3.0/24\", "
+        "    \"option-data\": [ {"
+        "          \"name\": \"option_foo2\","
+        "          \"code\": 23,"
+        "          \"data\": \"FF\""
+        "        } ]"
+        " } ],"
+        "\"valid-lifetime\": 4000 }";
+
+    ElementPtr json = Element::fromJSON(config);
+
+    EXPECT_NO_THROW(x = configureDhcp4Server(*srv_, json));
+    ASSERT_TRUE(x);
+    comment_ = parseAnswer(rcode_, x);
+    ASSERT_EQ(0, rcode_);
+
+    Subnet4Ptr subnet1 = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.100"));
+    ASSERT_TRUE(subnet1);
+    const Subnet::OptionContainer& options1 = subnet1->getOptions();
+    ASSERT_EQ(1, options1.size());
+
+    // Get the search index. Index #1 is to search using option code.
+    const Subnet::OptionContainerTypeIndex& idx1 = options1.get<1>();
+
+    // Get the options for specified index. Expecting one option to be
+    // returned but in theory we may have multiple options with the same
+    // code so we get the range.
+    std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
+              Subnet::OptionContainerTypeIndex::const_iterator> range1 =
+        idx1.equal_range(56);
+    // Expect single option with the code equal to 56.
+    ASSERT_EQ(1, std::distance(range1.first, range1.second));
+    const uint8_t foo_expected[] = {
+        0x01, 0x02, 0x03, 0x04, 0x05,
+        0x06, 0x07, 0x08, 0x09, 0x0A
+    };
+    // Check if option is valid in terms of code and carried data.
+    testOption(*range1.first, 56, foo_expected, sizeof(foo_expected));
+
+    // Test another subnet in the same way.
+    Subnet4Ptr subnet2 = CfgMgr::instance().getSubnet4(IOAddress("192.0.3.102"));
+    ASSERT_TRUE(subnet2);
+    const Subnet::OptionContainer& options2 = subnet2->getOptions();
+    ASSERT_EQ(1, options2.size());
+
+    const Subnet::OptionContainerTypeIndex& idx2 = options2.get<1>();
+    std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
+              Subnet::OptionContainerTypeIndex::const_iterator> range2 =
+        idx2.equal_range(23);
+    ASSERT_EQ(1, std::distance(range2.first, range2.second));
+
+    const uint8_t foo2_expected[] = { 0xFF };
+    testOption(*range2.first, 23, foo2_expected, sizeof(foo2_expected));
+}
+
+// Verify that empty option name is rejected in the configuration.
+TEST_F(Dhcp4ParserTest, optionNameEmpty) {
+    // Empty option names not allowed.
+    testInvalidOptionParam("", "name");
+}
+
+// Verify that empty option name with spaces is rejected
+// in the configuration.
+TEST_F(Dhcp4ParserTest, optionNameSpaces) {
+    // Spaces in option names not allowed.
+    testInvalidOptionParam("option foo", "name");
+}
+
+// Verify that negative option code is rejected in the configuration.
+TEST_F(Dhcp4ParserTest, optionCodeNegative) {
+    // Check negative option code -4. This should fail too.
+    testInvalidOptionParam("-4", "code");
+}
+
+// Verify that out of bounds option code is rejected in the configuration.
+TEST_F(Dhcp4ParserTest, optionCodeNonUint8) {
+    // The valid option codes are uint16_t values so passing
+    // uint16_t maximum value incremented by 1 should result
+    // in failure.
+    testInvalidOptionParam("257", "code");
+}
+
+// Verify that zero option code is rejected in the configuration.
+TEST_F(Dhcp4ParserTest, optionCodeZero) {
+    // Option code 0 is reserved and should not be accepted
+    // by configuration parser.
+    testInvalidOptionParam("0", "code");
+}
+
+// Verify that option data which contains non hexadecimal characters
+// is rejected by the configuration.
+TEST_F(Dhcp4ParserTest, optionDataInvalidChar) {
+    // Option code 0 is reserved and should not be accepted
+    // by configuration parser.
+    testInvalidOptionParam("01020R", "data");
+}
+
+// Verify that option data containins '0x' prefix is rejected
+// by the configuration.
+TEST_F(Dhcp4ParserTest, optionDataUnexpectedPrefix) {
+    // Option code 0 is reserved and should not be accepted
+    // by configuration parser.
+    testInvalidOptionParam("0x0102", "data");
+}
+
+// Verify that option data consisting od an odd number of
+// hexadecimal digits is rejected in the configuration.
+TEST_F(Dhcp4ParserTest, optionDataOddLength) {
+    // Option code 0 is reserved and should not be accepted
+    // by configuration parser.
+    testInvalidOptionParam("123", "data");
+}
+
+// Verify that either lower or upper case characters are allowed
+// to specify the option data.
+TEST_F(Dhcp4ParserTest, optionDataLowerCase) {
+    ConstElementPtr x;
+    std::string config = createConfigWithOption("0a0b0C0D", "data");
+    ElementPtr json = Element::fromJSON(config);
+
+    EXPECT_NO_THROW(x = configureDhcp4Server(*srv_, json));
+    ASSERT_TRUE(x);
+    comment_ = parseAnswer(rcode_, x);
+    ASSERT_EQ(0, rcode_);
+
+    Subnet4Ptr subnet = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.5"));
+    ASSERT_TRUE(subnet);
+    const Subnet::OptionContainer& options = subnet->getOptions();
+    ASSERT_EQ(1, options.size());
+
+    // Get the search index. Index #1 is to search using option code.
+    const Subnet::OptionContainerTypeIndex& idx = options.get<1>();
+
+    // Get the options for specified index. Expecting one option to be
+    // returned but in theory we may have multiple options with the same
+    // code so we get the range.
+    std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
+              Subnet::OptionContainerTypeIndex::const_iterator> range =
+        idx.equal_range(56);
+    // Expect single option with the code equal to 100.
+    ASSERT_EQ(1, std::distance(range.first, range.second));
+    const uint8_t foo_expected[] = {
+        0x0A, 0x0B, 0x0C, 0x0D
+    };
+    // Check if option is valid in terms of code and carried data.
+    testOption(*range.first, 56, foo_expected, sizeof(foo_expected));
+}
+
 /// This test checks if Uint32Parser can really parse the whole range
 /// and properly err of out of range values. As we can't call Uint32Parser
 /// directly, we are exploiting the fact that it is used to parse global

+ 6 - 6
src/bin/dhcp6/Makefile.am

@@ -59,14 +59,14 @@ if USE_CLANGPP
 b10_dhcp6_CXXFLAGS = -Wno-unused-parameter
 endif
 
-b10_dhcp6_LDADD  = $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
+b10_dhcp6_LDADD  = $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/dhcp/libb10-dhcp++.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/dhcpsrv/libb10-dhcpsrv.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
 
 b10_dhcp6dir = $(pkgdatadir)
 b10_dhcp6_DATA = dhcp6.spec

+ 17 - 22
src/bin/dhcp6/config_parser.cc

@@ -496,12 +496,12 @@ private:
 ///
 /// This parser parses configuration entries that specify value of
 /// a single option. These entries include option name, option code
-/// and data carried by the option. If parsing is successful than an
+/// and data carried by the option. If parsing is successful then an
 /// instance of an option is created and added to the storage provided
 /// by the calling class.
 ///
 /// @todo This class parses and validates the option name. However it is
-/// not used anywhere util support for option spaces is implemented
+/// not used anywhere until support for option spaces is implemented
 /// (see tickets #2319, #2314). When option spaces are implemented
 /// there will be a way to reference the particular option using
 /// its type (code) or option name.
@@ -857,26 +857,21 @@ public:
             // a setStorage and build methods are invoked.
 
             // Try uint32 type parser.
-            if (buildParser<Uint32Parser, Uint32Storage >(parser, uint32_values_,
-                                                          param.second)) {
-                // Storage set, build invoked on the parser, proceed with
-                // next configuration element.
-                continue;
-            }
-            // Try string type parser.
-            if (buildParser<StringParser, StringStorage >(parser, string_values_,
-                                                          param.second)) {
-                continue;
-            }
-            // Try pools parser.
-            if (buildParser<PoolParser, PoolStorage >(parser, pools_,
-                                                      param.second)) {
-                continue;
-            }
-            // Try option data parser.
-            if (buildParser<OptionDataListParser, OptionStorage >(parser, options_,
-                                                                  param.second)) {
-                continue;
+            if (!buildParser<Uint32Parser, Uint32Storage >(parser, uint32_values_,
+                                                           param.second) &&
+                // Try string type parser.
+                !buildParser<StringParser, StringStorage >(parser, string_values_,
+                                                           param.second) &&
+                // Try pool parser.
+                !buildParser<PoolParser, PoolStorage >(parser, pools_,
+                                                       param.second) &&
+                // Try option data parser.
+                !buildParser<OptionDataListParser, OptionStorage >(parser, options_,
+                                                                   param.second)) {
+                // Appropriate parsers are created in the createSubnet6ConfigParser
+                // and they should be limited to those that we check here for. Thus,
+                // if we fail to find a matching parser here it is a programming error.
+                isc_throw(Dhcp6ConfigError, "failed to find suitable parser");
             }
         }
         // Ok, we now have subnet parsed

+ 9 - 9
src/bin/dhcp6/dhcp6_messages.mes

@@ -47,9 +47,9 @@ This is an informational message reporting that the configuration has
 been extended to include the specified subnet.
 
 % DHCP6_CONFIG_OPTION_DUPLICATE multiple options with the code: %1 added to the subnet: %2
-This warning message is issued on attempt to configure multiple options with the
+This warning message is issued on an attempt to configure multiple options with the
 same option code for the particular subnet. Adding multiple options is uncommon
-for DHCPv6, yet it is not prohibited.
+for DHCPv6, but it is not prohibited.
 
 % DHCP6_CONFIG_START DHCPv6 server is processing the following configuration: %1
 This is a debug message that is issued every time the server receives a
@@ -86,13 +86,6 @@ This message indicates that the server failed to grant (in response to
 received REQUEST) a lease for a given client. There may be many reasons for
 such failure. Each specific failure is logged in a separate log entry.
 
-% DHCP6_REQUIRED_OPTIONS_CHECK_FAIL %1 message received from %2 failed the following check: %3
-This message indicates that received DHCPv6 packet is invalid.  This may be due
-to a number of reasons, e.g. the mandatory client-id option is missing,
-the server-id forbidden in that particular type of message is present,
-there is more than one instance of client-id or server-id present,
-etc. The exact reason for rejecting the packet is included in the message.
-
 % DHCP6_NOT_RUNNING IPv6 DHCP server is not running
 A warning message is issued when an attempt is made to shut down the
 IPv6 DHCP server but it is not running.
@@ -149,6 +142,13 @@ as a hint for possible requested address.
 % DHCP6_QUERY_DATA received packet length %1, data length %2, data is %3
 A debug message listing the data received from the client or relay.
 
+% DHCP6_REQUIRED_OPTIONS_CHECK_FAIL %1 message received from %2 failed the following check: %3
+This message indicates that received DHCPv6 packet is invalid.  This may be due
+to a number of reasons, e.g. the mandatory client-id option is missing,
+the server-id forbidden in that particular type of message is present,
+there is more than one instance of client-id or server-id present,
+etc. The exact reason for rejecting the packet is included in the message.
+
 % DHCP6_RESPONSE_DATA responding with packet type %1 data is %2
 A debug message listing the data returned to the client.
 

+ 4 - 5
src/bin/dhcp6/tests/Makefile.am

@@ -63,14 +63,13 @@ dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 dhcp6_unittests_LDADD = $(GTEST_LDADD)
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libb10-dhcp++.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcpsrv/libb10-dhcpsrv.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libb10-exceptions.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
-
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/libb10-log.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
 endif
 
 noinst_PROGRAMS = $(TESTS)

+ 5 - 4
src/bin/dhcp6/tests/config_parser_unittest.cc

@@ -147,14 +147,14 @@ public:
                    << ex.what() << std::endl;
         }
 
-
-        // returned value should be 0 (configuration success)
+        // status object must not be NULL
         if (!status) {
             FAIL() << "Fatal error: unable to reset configuration database"
                    << " after the test. Configuration function returned"
                    << " NULL pointer" << std::endl;
         }
         comment_ = parseAnswer(rcode_, status);
+        // returned value should be 0 (configuration success)
         if (rcode_ != 0) {
             FAIL() << "Fatal error: unable to reset configuration database"
                    << " after the test. Configuration function returned"
@@ -218,9 +218,10 @@ public:
             ASSERT_EQ(buf.getLength() - option_desc.option->getHeaderLen(),
                       expected_data_len);
         }
-        // Verify that the data is correct. However do not verify suboptions.
+        // Verify that the data is correct. Do not verify suboptions and a header.
         const uint8_t* data = static_cast<const uint8_t*>(buf.getData());
-        EXPECT_TRUE(memcmp(expected_data, data, expected_data_len));
+        EXPECT_EQ(0, memcmp(expected_data, data + option_desc.option->getHeaderLen(),
+                            expected_data_len));
     }
 
     Dhcpv6Srv srv_;

+ 3 - 0
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc

@@ -575,6 +575,9 @@ TEST_F(Dhcpv6SrvTest, SolicitInvalidHint) {
     checkClientId(reply, clientid);
 }
 
+/// @todo: Add a test that client sends hint that is in pool, but currently
+/// being used by a different client.
+
 // This test checks that the server is offering different addresses to different
 // clients in ADVERTISEs. Please note that ADVERTISE is not a guarantee that such
 // and address will be assigned. Had the pool was very small and contained only

+ 1 - 1
src/bin/loadzone/.gitignore

@@ -1,4 +1,4 @@
 /b10-loadzone
-/b10-loadzone.py
+/loadzone.py
 /run_loadzone.sh
 /b10-loadzone.8

+ 19 - 10
src/bin/loadzone/Makefile.am

@@ -1,12 +1,17 @@
-SUBDIRS = . tests/correct tests/error
+SUBDIRS = . tests
 bin_SCRIPTS = b10-loadzone
 noinst_SCRIPTS = run_loadzone.sh
 
-CLEANFILES = b10-loadzone
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-loadzone loadzone.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.pyc
 
 man_MANS = b10-loadzone.8
 DISTCLEANFILES = $(man_MANS)
-EXTRA_DIST = $(man_MANS) b10-loadzone.xml
+EXTRA_DIST = $(man_MANS) b10-loadzone.xml loadzone_messages.mes
 
 if GENERATE_DOCS
 
@@ -21,10 +26,13 @@ $(man_MANS):
 
 endif
 
-b10-loadzone: b10-loadzone.py
-	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" \
-	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" b10-loadzone.py >$@
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py : loadzone_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/loadzone_messages.mes
+
+b10-loadzone: loadzone.py $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
+	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" loadzone.py >$@
 	chmod a+x $@
 
 EXTRA_DIST += tests/normal/README
@@ -48,6 +56,7 @@ EXTRA_DIST += tests/normal/sql1.example.com.signed
 EXTRA_DIST += tests/normal/sql2.example.com
 EXTRA_DIST += tests/normal/sql2.example.com.signed
 
-pytest:
-	$(SHELL) tests/correct/correct_test.sh
-	$(SHELL) tests/error/error_test.sh
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)

+ 0 - 13
src/bin/loadzone/TODO

@@ -1,16 +1,3 @@
-Support optional origin in $INCLUDE:
-$INCLUDE filename origin
-
-Support optional comment in $INCLUDE:
-$INCLUDE filename origin comment
-
-Support optional comment in $TTL (RFC 2308):
-$TTL number comment
-
-Do not assume "." is origin if origin is not set and sees a @ or
-a label without a ".". It should probably fail.  (Don't assume a
-mistake means it is a root level label.)
-
 Add verbose option to show what it is adding, not necessarily
 in master file format, but in the context of the data source.
 

+ 0 - 94
src/bin/loadzone/b10-loadzone.py.in

@@ -1,94 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import re, getopt
-import isc.datasrc
-import isc.util.process
-from isc.datasrc.master import MasterFile
-import time
-import os
-
-isc.util.process.rename()
-
-#########################################################################
-# usage: print usage note and exit
-#########################################################################
-def usage():
-    print("Usage: %s [-d <database>] [-o <origin>] <file>" % sys.argv[0], \
-          file=sys.stderr)
-    exit(1)
-
-#########################################################################
-# main
-#########################################################################
-def main():
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "d:o:h", \
-                                                ["dbfile", "origin", "help"])
-    except getopt.GetoptError as e:
-        print(str(e))
-        usage()
-        exit(2)
-
-    dbfile = '@@LOCALSTATEDIR@@/@PACKAGE@/zone.sqlite3'
-    initial_origin = ''
-    for o, a in opts:
-        if o in ("-d", "--dbfile"):
-            dbfile = a
-        elif o in ("-o", "--origin"):
-            if a[-1] != '.':
-                a += '.'
-            initial_origin = a
-        elif o in ("-h", "--help"):
-            usage()
-        else:
-            assert False, "unhandled option"
-
-    if len(args) != 1:
-        usage()
-    zonefile = args[0]
-    verbose = os.isatty(sys.stdout.fileno())
-    try:
-        master = MasterFile(zonefile, initial_origin, verbose)
-    except Exception as e:
-        sys.stderr.write("Error reading zone file: %s\n" % str(e))
-        exit(1)
-
-    try:
-        zone = master.zonename()
-        if verbose:
-            sys.stdout.write("Using SQLite3 database file %s\n" % dbfile)
-            sys.stdout.write("Zone name is %s\n" % zone)
-            sys.stdout.write("Loading file \"%s\"\n" % zonefile)
-    except Exception as e:
-        sys.stdout.write("\n")
-        sys.stderr.write("Error reading zone file: %s\n" % str(e))
-        exit(1)
-
-    try:
-        isc.datasrc.sqlite3_ds.load(dbfile, zone, master.zonedata)
-        if verbose:
-            master.closeverbose()
-            sys.stdout.write("\nDone.\n")
-    except Exception as e:
-        sys.stdout.write("\n")
-        sys.stderr.write("Error loading database: %s\n"% str(e))
-        exit(1)
-
-if __name__ == "__main__":
-    main()

+ 124 - 18
src/bin/loadzone/b10-loadzone.xml

@@ -2,7 +2,7 @@
                "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
 	       [<!ENTITY mdash "&#8212;">]>
 <!--
- - Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2012  Internet Systems Consortium, Inc. ("ISC")
  -
  - Permission to use, copy, modify, and/or distribute this software for any
  - purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>March 26, 2012</date>
+    <date>December 15, 2012</date>
   </refentryinfo>
 
   <refmeta>
@@ -36,7 +36,7 @@
 
   <docinfo>
     <copyright>
-      <year>2010</year>
+      <year>2012</year>
       <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
     </copyright>
   </docinfo>
@@ -44,9 +44,13 @@
   <refsynopsisdiv>
     <cmdsynopsis>
       <command>b10-loadzone</command>
-      <arg><option>-d <replaceable class="parameter">database</replaceable></option></arg>
-      <arg><option>-o <replaceable class="parameter">origin</replaceable></option></arg>
-      <arg choice="req">filename</arg>
+      <arg><option>-c <replaceable class="parameter">datasrc_config</replaceable></option></arg>
+      <arg><option>-d <replaceable class="parameter">debug_level</replaceable></option></arg>
+      <arg><option>-i <replaceable class="parameter">report_interval</replaceable></option></arg>
+      <arg><option>-t <replaceable class="parameter">datasrc_type</replaceable></option></arg>
+      <arg><option>-C <replaceable class="parameter">zone_class</replaceable></option></arg>
+      <arg choice="req">zone name</arg>
+      <arg choice="req">zone file</arg>
     </cmdsynopsis>
   </refsynopsisdiv>
 
@@ -66,8 +70,6 @@
     $ORIGIN is followed by a domain name, and sets the the origin
     that will be used for relative domain names in subsequent records.
     $INCLUDE is followed by a filename to load.
-<!-- TODO: and optionally a
-    domain name used to set the relative domain name origin. -->
     The previous origin is restored after the file is included.
 <!-- the current domain name is also restored -->
     $TTL is followed by a time-to-live value which is used
@@ -75,11 +77,31 @@
     </para>
 
     <para>
+      If the specified zone does not exist in the specified data
+      source, <command>b10-loadzone</command> will first create a
+      new empty zone in the data source, then fill it with the RRs
+      given in the specified master zone file.  In this case, if
+      loading fails for some reason, the creation of the new zone
+      is also canceled.
+      <note><simpara>
+	Due to an implementation limitation, the current version
+	does not make the zone creation and subsequent loading an
+	atomic operation; an empty zone will be visible and used by
+	other application (e.g., the <command>b10-auth</command>
+	authoritative server) while loading.  If this is an issue,
+	make sure the initial loading of a new zone is done before
+	starting other BIND 10 applications.
+      </simpara></note>
+    </para>
+
+    <para>
       When re-loading an existing zone, the prior version is completely
       removed.  While the new version of the zone is being loaded, the old
       version remains accessible to queries.  After the new version is
       completely loaded, the old version is swapped out and replaced
-      with the new one in a single operation.
+      with the new one in a single operation.  If loading fails for
+      some reason, the loaded RRs will be effectively deleted, and the
+      old version will still remain accessible for other applications.
     </para>
 
   </refsect1>
@@ -88,21 +110,82 @@
     <title>ARGUMENTS</title>
 
     <variablelist>
+      <varlistentry>
+        <term>-c <replaceable class="parameter">datasrc_config</replaceable></term>
+        <listitem><para>
+          Specifies configuration of the data source in the JSON
+          format.  The configuration contents depend on the type of
+	  the data source, and that's the same as what would be
+	  specified for the BIND 10 servers (see the data source
+          configuration section of the BIND 10 guide).  For example,
+	  for an SQLite3 data source, it would look like
+	  '{"database_file": "path-to-sqlite3-db-file"}'.
+	  <note>
+	    <simpara>For SQLite3 data source with the default DB file,
+	      this option can be omitted; in other cases including
+	      for any other types of data sources when supported,
+	      this option is currently mandatory in practice.
+	      In a future version it will be possible to retrieve the
+	      configuration from the BIND 10 server configuration (if
+	      it exists).
+	  </simpara></note>
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>-d <replaceable class="parameter">debug_level</replaceable> </term>
+        <listitem><para>
+	    Enable dumping debug level logging with the specified
+	    level.  By default, only log messages at the severity of
+	    informational or higher levels will be produced.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>-i <replaceable class="parameter">report_interval</replaceable></term>
+        <listitem><para>
+          Specifies the interval of status update by the number of RRs
+	  loaded in the interval.
+	  The <command>b10-loadzone</command> tool periodically
+          reports the progress of loading with the total number of
+          loaded RRs and elapsed time.  This option specifies the
+	  interval of the reports.  If set to 0, status reports will
+          be suppressed.  The default is 10,000.
+        </para></listitem>
+      </varlistentry>
 
       <varlistentry>
-        <term>-d <replaceable class="parameter">database</replaceable> </term>
+        <term>-t <replaceable class="parameter">datasrc_type</replaceable></term>
         <listitem><para>
-          Defines the filename for the database.
-	  The default is
-	  <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>.
-<!-- TODO: fix filename -->
+          Specifies the type of data source to store the zone.
+	  Currently, only the "sqlite3" type is supported (which is
+          the default of this option), which means the SQLite3 data
+          source.
         </para></listitem>
       </varlistentry>
 
       <varlistentry>
-        <term>-o <replaceable class="parameter">origin</replaceable></term>
+        <term>-C <replaceable class="parameter">zone_class</replaceable></term>
         <listitem><para>
-          Defines the default origin for the zone file records.
+          Specifies the RR class of the zone.
+	  Currently, only class IN is supported (which is the default
+          of this option) due to limitation of the underlying data
+          source implementation.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><replaceable class="parameter">zone name</replaceable></term>
+        <listitem><para>
+          The name of the zone to create or update.  This must be a valid DNS
+	  domain name.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><replaceable class="parameter">zone file</replaceable></term>
+        <listitem><para>
+          A path to the master zone file to be loaded.
         </para></listitem>
       </varlistentry>
 
@@ -131,8 +214,31 @@
   <refsect1>
     <title>AUTHORS</title>
     <para>
-      The <command>b10-loadzone</command> tool was initial written
-      by Evan Hunt of ISC.
+      A prior version of the <command>b10-loadzone</command> tool was
+      written by Evan Hunt of ISC.
+      The new version that this manual refers to was rewritten from
+      the scratch by the BIND 10 development team in around December 2012.
+    </para>
+  </refsect1>
+
+  <refsect1>
+    <title>BUGS</title>
+    <para>
+      As of the initial implementation, the underlying library that
+      this tool uses does not fully validate the loaded zone; for
+      example, loading will succeed even if it doesn't have the SOA or
+      NS record at its origin name.  Such checks will be implemented
+      in a near future version, but until then, the
+      <command>b10-loadzone</command> performs the existence of the
+      SOA and NS records by itself.  However, <command>b10-loadzone</command>
+      only warns about it, and does not cancel the load itself.
+      If this warning message is produced, it's the user's
+      responsibility to fix the errors and reload it.  When the
+      library is updated with the post load checks, it will be more
+      sophisticated and the such zone won't be successfully loaded.
+    </para>
+    <para>
+      There are some other issues noted in the DESCRIPTION section.
     </para>
   </refsect1>
 </refentry><!--

+ 342 - 0
src/bin/loadzone/loadzone.py.in

@@ -0,0 +1,342 @@
+#!@PYTHON@
+
+# Copyright (C) 2012  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import sys
+sys.path.append('@@PYTHONPATH@@')
+import time
+import signal
+from optparse import OptionParser
+from isc.dns import *
+from isc.datasrc import *
+import isc.util.process
+import isc.log
+from isc.log_messages.loadzone_messages import *
+
+isc.util.process.rename()
+
+# These are needed for logger settings
+import bind10_config
+import json
+from isc.config import module_spec_from_file
+from isc.config.ccsession import path_search
+
+isc.log.init("b10-loadzone")
+logger = isc.log.Logger("loadzone")
+
+# The default value for the interval of progress report in terms of the
+# number of RRs loaded in that interval.  Arbitrary choice, but intended to
+# be reasonably small to handle emergency exit.
+LOAD_INTERVAL_DEFAULT = 10000
+
+class BadArgument(Exception):
+    '''An exception indicating an error in command line argument.
+
+    '''
+    pass
+
+class LoadFailure(Exception):
+    '''An exception indicating failure in loading operation.
+
+    '''
+    pass
+
+def set_cmd_options(parser):
+    '''Helper function to set command-line options.
+
+    '''
+    parser.add_option("-c", "--datasrc-conf", dest="conf", action="store",
+                      help="""configuration of datasrc to load the zone in.
+Example: '{"database_file": "/path/to/dbfile/db.sqlite3"}'""",
+                      metavar='CONFIG')
+    parser.add_option("-d", "--debug", dest="debug_level",
+                      type='int', action="store", default=None,
+                      help="enable debug logs with the specified level [0-99]")
+    parser.add_option("-i", "--report-interval", dest="report_interval",
+                      type='int', action="store",
+                      default=LOAD_INTERVAL_DEFAULT,
+                      help="""report logs progress per specified number of RRs
+(specify 0 to suppress report) [default: %default]""")
+    parser.add_option("-t", "--datasrc-type", dest="datasrc_type",
+                      action="store", default='sqlite3',
+                      help="""type of data source (e.g., 'sqlite3')\n
+[default: %default]""")
+    parser.add_option("-C", "--class", dest="zone_class", action="store",
+                      default='IN',
+                      help="""RR class of the zone; currently must be 'IN'
+[default: %default]""")
+
+class LoadZoneRunner:
+    '''Main logic for the loadzone.
+
+    This is implemented as a class mainly for the convenience of tests.
+
+    '''
+    def __init__(self, command_args):
+        self.__command_args = command_args
+        self.__loaded_rrs = 0
+        self.__interrupted = False # will be set to True on receiving signal
+
+        # system-wide log configuration.  We need to configure logging this
+        # way so that the logging policy applies to underlying libraries, too.
+        self.__log_spec = json.dumps(isc.config.module_spec_from_file(
+                path_search('logging.spec', bind10_config.PLUGIN_PATHS)).
+                                     get_full_spec())
+        # "severity" and "debuglevel" are the tunable parameters, which will
+        # be set in _config_log().
+        self.__log_conf_base = {"loggers":
+                                    [{"name": "*",
+                                      "output_options":
+                                          [{"output": "stderr",
+                                            "destination": "console"}]}]}
+
+        # These are essentially private, and defined as "protected" for the
+        # convenience of tests inspecting them
+        self._zone_class = None
+        self._zone_name = None
+        self._zone_file = None
+        self._datasrc_config = None
+        self._datasrc_type = None
+        self._log_severity = 'INFO'
+        self._log_debuglevel = 0
+        self._report_interval = LOAD_INTERVAL_DEFAULT
+
+        self._config_log()
+
+    def _config_log(self):
+        '''Configure logging policy.
+
+        This is essentially private, but defined as "protected" for tests.
+
+        '''
+        self.__log_conf_base['loggers'][0]['severity'] = self._log_severity
+        self.__log_conf_base['loggers'][0]['debuglevel'] = self._log_debuglevel
+        isc.log.log_config_update(json.dumps(self.__log_conf_base),
+                                  self.__log_spec)
+
+    def _parse_args(self):
+        '''Parse command line options and other arguments.
+
+        This is essentially private, but defined as "protected" for tests.
+
+        '''
+
+        usage_txt = \
+            'usage: %prog [options] -c datasrc_config zonename zonefile'
+        parser = OptionParser(usage=usage_txt)
+        set_cmd_options(parser)
+        (options, args) = parser.parse_args(args=self.__command_args)
+
+        # Configure logging policy as early as possible
+        if options.debug_level is not None:
+            self._log_severity = 'DEBUG'
+            # optparse performs type check
+            self._log_debuglevel = int(options.debug_level)
+            if self._log_debuglevel < 0:
+                raise BadArgument(
+                    'Invalid debug level (must be non negative): %d' %
+                    self._log_debuglevel)
+        self._config_log()
+
+        self._datasrc_type = options.datasrc_type
+        self._datasrc_config = options.conf
+        if options.conf is None:
+            self._datasrc_config = self._get_datasrc_config(self._datasrc_type)
+        try:
+            self._zone_class = RRClass(options.zone_class)
+        except isc.dns.InvalidRRClass as ex:
+            raise BadArgument('Invalid zone class: ' + str(ex))
+        if self._zone_class != RRClass.IN():
+            raise BadArgument("RR class is not supported: " +
+                              str(self._zone_class))
+
+        self._report_interval = int(options.report_interval)
+        if self._report_interval < 0:
+            raise BadArgument(
+                'Invalid report interval (must be non negative): %d' %
+                self._report_interval)
+
+        if len(args) != 2:
+            raise BadArgument('Unexpected number of arguments: %d (must be 2)'
+                              % (len(args)))
+        try:
+            self._zone_name = Name(args[0])
+        except Exception as ex: # too broad, but there's no better granurality
+            raise BadArgument("Invalid zone name '" + args[0] + "': " +
+                              str(ex))
+        self._zone_file = args[1]
+
+    def _get_datasrc_config(self, datasrc_type):
+        ''''Return the default data source configuration of given type.
+
+        Right now, it only supports SQLite3, and hardcodes the syntax
+        of the default configuration.  It's a kind of workaround to balance
+        convenience of users and minimizing hardcoding of data source
+        specific logic in the entire tool.  In future this should be
+        more sophisticated.
+
+        This is essentially a private helper method for _parse_arg(),
+        but defined as "protected" so tests can use it directly.
+
+        '''
+        if datasrc_type != 'sqlite3':
+            raise BadArgument('default config is not available for ' +
+                              datasrc_type)
+
+        default_db_file = bind10_config.DATA_PATH + '/zone.sqlite3'
+        logger.info(LOADZONE_SQLITE3_USING_DEFAULT_CONFIG, default_db_file)
+        return '{"database_file": "' + default_db_file + '"}'
+
+    def __cancel_create(self):
+        '''sqlite3-only hack: delete the zone just created on load failure.
+
+        This should eventually be done via generic datasrc API, but right now
+        we don't have that interface.  Leaving the zone in this situation
+        is too bad, so we handle it with a workaround.
+
+        '''
+        if self._datasrc_type is not 'sqlite3':
+            return
+
+        import sqlite3          # we need the module only here
+        import json
+
+        # If we are here, the following should basically succeed; since
+        # this is considered a temporary workaround we don't bother to catch
+        # and recover rare failure cases.
+        dbfile = json.loads(self._datasrc_config)['database_file']
+        with sqlite3.connect(dbfile) as conn:
+            cur = conn.cursor()
+            cur.execute("DELETE FROM zones WHERE name = ?",
+                        [self._zone_name.to_text()])
+
+    def _report_progress(self, loaded_rrs):
+        '''Dump the current progress report to stdout.
+
+        This is essentially private, but defined as "protected" for tests.
+
+        '''
+        elapsed = time.time() - self.__start_time
+        sys.stdout.write("\r" + (80 * " "))
+        sys.stdout.write("\r%d RRs loaded in %.2f seconds" %
+                         (loaded_rrs, elapsed))
+
+    def _do_load(self):
+        '''Main part of the load logic.
+
+        This is essentially private, but defined as "protected" for tests.
+
+        '''
+        created = False
+        try:
+            datasrc_client = DataSourceClient(self._datasrc_type,
+                                              self._datasrc_config)
+            created = datasrc_client.create_zone(self._zone_name)
+            if created:
+                logger.info(LOADZONE_ZONE_CREATED, self._zone_name,
+                            self._zone_class)
+            else:
+                logger.info(LOADZONE_ZONE_UPDATING, self._zone_name,
+                            self._zone_class)
+            loader = ZoneLoader(datasrc_client, self._zone_name,
+                                self._zone_file)
+            self.__start_time = time.time()
+            if self._report_interval > 0:
+                limit = self._report_interval
+            else:
+                # Even if progress report is suppressed, we still load
+                # incrementally so we won't delay catching signals too long.
+                limit = LOAD_INTERVAL_DEFAULT
+            while (not self.__interrupted and
+                   not loader.load_incremental(limit)):
+                self.__loaded_rrs += self._report_interval
+                if self._report_interval > 0:
+                    self._report_progress(self.__loaded_rrs)
+            if self.__interrupted:
+                raise LoadFailure('loading interrupted by signal')
+
+            # On successful completion, add final '\n' to the progress
+            # report output (on failure don't bother to make it prettier).
+            if (self._report_interval > 0 and
+                self.__loaded_rrs >= self._report_interval):
+                sys.stdout.write('\n')
+        except Exception as ex:
+            # release any remaining lock held in the client/loader
+            loader, datasrc_client = None, None
+            if created:
+                self.__cancel_create()
+                logger.error(LOADZONE_CANCEL_CREATE_ZONE, self._zone_name,
+                             self._zone_class)
+            raise LoadFailure(str(ex))
+
+    def _post_load_checks(self):
+        '''Perform minimal validity checks on the loaded zone.
+
+        We do this ourselves because the underlying library currently
+        doesn't do any checks.  Once the library support post-load validation
+        this check should be removed.
+
+        '''
+        datasrc_client = DataSourceClient(self._datasrc_type,
+                                          self._datasrc_config)
+        _, finder = datasrc_client.find_zone(self._zone_name) # should succeed
+        result = finder.find(self._zone_name, RRType.SOA())[0]
+        if result is not finder.SUCCESS:
+            self._post_load_warning('zone has no SOA')
+        result = finder.find(self._zone_name, RRType.NS())[0]
+        if result is not finder.SUCCESS:
+            self._post_load_warning('zone has no NS')
+
+    def _post_load_warning(self, msg):
+        logger.warn(LOADZONE_POSTLOAD_ISSUE, self._zone_name,
+                    self._zone_class, msg)
+
+    def _set_signal_handlers(self):
+        signal.signal(signal.SIGINT, self._interrupt_handler)
+        signal.signal(signal.SIGTERM, self._interrupt_handler)
+
+    def _interrupt_handler(self, signal, frame):
+        self.__interrupted = True
+
+    def run(self):
+        '''Top-level method, simply calling other helpers'''
+
+        try:
+            self._set_signal_handlers()
+            self._parse_args()
+            self._do_load()
+            total_elapsed_txt = "%.2f" % (time.time() - self.__start_time)
+            logger.info(LOADZONE_DONE, self.__loaded_rrs, self._zone_name,
+                        self._zone_class, total_elapsed_txt)
+            self._post_load_checks()
+            return 0
+        except BadArgument as ex:
+            logger.error(LOADZONE_ARGUMENT_ERROR, ex)
+        except LoadFailure as ex:
+            logger.error(LOADZONE_LOAD_ERROR, self._zone_name,
+                         self._zone_class, ex)
+        except Exception as ex:
+            logger.error(LOADZONE_UNEXPECTED_FAILURE, ex)
+        return 1
+
+if '__main__' == __name__:
+    runner = LoadZoneRunner(sys.argv[1:])
+    ret = runner.run()
+    sys.exit(ret)
+
+## Local Variables:
+## mode: python
+## End:

+ 81 - 0
src/bin/loadzone/loadzone_messages.mes

@@ -0,0 +1,81 @@
+# Copyright (C) 2012  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# When you add a message to this file, it is a good idea to run
+# <topsrcdir>/tools/reorder_message_file.py to make sure the
+# messages are in the correct order.
+
+% LOADZONE_ARGUMENT_ERROR Error in command line arguments: %1
+Some semantics error in command line arguments or options to b10-loadzone
+is detected.  b10-loadzone does effectively nothing and immediately
+terminates.
+
+% LOADZONE_CANCEL_CREATE_ZONE Creation of new zone %1/%2 was canceled
+b10-loadzone has created a new zone in the data source (see
+LOADZONE_ZONE_CREATED), but the loading operation has subsequently
+failed.  The newly created zone has been removed from the data source,
+so that the data source will go back to the original state.
+
+% LOADZONE_DONE Loaded (at least) %1 RRs into zone %2/%3 in %4 seconds
+b10-loadzone has successfully loaded the specified zone.  If there was
+an old version of the zone in the data source, it is now deleted.
+It also prints (a lower bound of) the number of RRs that have been loaded
+and the time spent for the loading.  Due to a limitation of the
+current implementation of the underlying library however, it cannot show the
+exact number of the loaded RRs; it's counted for every N-th RR where N
+is the value of the -i command line option.  So, for smaller zones that
+don't even contain N RRs, the reported value will be 0.  This will be
+improved in a future version.
+
+% LOADZONE_LOAD_ERROR Failed to load zone %1/%2: %3
+Loading a zone by b10-loadzone fails for some reason in the middle of
+the loading.  This is most likely due to an error in the specified
+arguments to b10-loadzone (such as non-existent zone file) or an error
+in the zone file.  When this happens, the RRs loaded so far are
+effectively deleted from the zone, and the old version (if exists)
+will still remain valid for operations.
+
+% LOADZONE_POSTLOAD_ISSUE New version of zone %1/%2 has an issue: %3
+b10-loadzone detected a problem after a successful load of zone:
+either or both of SOA and NS records are missing at the zone origin.
+In the current implementation the load will not be canceled for such
+problems.  The operator will need to fix the issues and reload the
+zone; otherwise applications (such as b10-auth) that use this data
+source will not work as expected.
+
+% LOADZONE_SQLITE3_USING_DEFAULT_CONFIG Using default configuration with SQLite3 DB file %1
+The SQLite3 data source is specified as the data source type without a
+data source configuration.  b10-loadzone uses the default
+configuration with the default DB file for the BIND 10 system.
+
+% LOADZONE_UNEXPECTED_FAILURE Unexpected exception: %1
+b10-loadzone encounters an unexpected failure and terminates itself.
+This is generally a bug of b10-loadzone itself or the underlying
+data source library, so it's advisable to submit a bug report if
+this message is logged.  The incomplete attempt of loading should
+have been cleanly canceled in this case, too.
+
+% LOADZONE_ZONE_CREATED Zone %1/%2 does not exist in the data source, newly created
+The specified zone to b10-loadzone to load does not exist in the
+specified data source.  b10-loadzone has created a new empty zone
+in the data source.
+
+% LOADZONE_ZONE_UPDATING Started updating zone %1/%2 with removing old data (this can take a while)
+b10-loadzone started loading a new version of the zone as specified,
+beginning with removing the current contents of the zone (in a
+transaction, so the removal won't take effect until and unless the entire
+load is completed successfully).  If the old version of the zone is large,
+this can take time, such as a few minutes or more, without any visible
+feedback.  This is not a problem as long as the b10-loadzone process
+is working at a moderate load.

+ 9 - 1
src/bin/loadzone/run_loadzone.sh.in

@@ -18,7 +18,7 @@
 PYTHON_EXEC=${PYTHON_EXEC:-@PYTHON@}
 export PYTHON_EXEC
 
-PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs
 export PYTHONPATH
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
@@ -32,5 +32,13 @@ fi
 BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
 export BIND10_MSGQ_SOCKET_FILE
 
+# For bind10_config
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+
+# For data source loadable modules
+B10_FROM_BUILD=@abs_top_builddir@
+export B10_FROM_BUILD
+
 LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
 exec ${LOADZONE_PATH}/b10-loadzone "$@"

File diff suppressed because it is too large
+ 37 - 0
src/bin/loadzone/tests/Makefile.am


+ 4 - 1
src/bin/loadzone/tests/correct/Makefile.am

@@ -26,5 +26,8 @@ endif
 # TODO: maybe use TESTS?
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
-	echo Running test: correct_test.sh 
+	echo Running test: correct_test.sh
+	B10_FROM_SOURCE=$(abs_top_srcdir) \
+	B10_FROM_BUILD=$(abs_top_builddir) \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/loadzone:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/correct_test.sh

+ 9 - 9
src/bin/loadzone/tests/correct/correct_test.sh.in

@@ -18,7 +18,7 @@
 PYTHON_EXEC=${PYTHON_EXEC:-@PYTHON@}
 export PYTHON_EXEC
 
-PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python:$PYTHONPATH
 export PYTHONPATH
 
 LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
@@ -28,28 +28,28 @@ TEST_OUTPUT_PATH=@abs_top_builddir@/src/bin/loadzone//tests/correct
 status=0
 echo "Loadzone include. from include.db file"
 cd ${TEST_FILE_PATH}
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 include.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' include. include.db >> /dev/null
 
 echo "loadzone  ttl1. from ttl1.db file"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 ttl1.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' ttl1. ttl1.db >> /dev/null
 
 echo "loadzone ttl2. from ttl2.db file"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 ttl2.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' ttl2. ttl2.db >> /dev/null
 
 echo "loadzone mix1. from mix1.db"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 mix1.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' mix1. mix1.db >> /dev/null
 
 echo "loadzone mix2. from mix2.db"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 mix2.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' mix2. mix2.db >> /dev/null
 
 echo "loadzone ttlext. from ttlext.db"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 ttlext.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' ttlext. ttlext.db >> /dev/null
 
 echo "loadzone example.com. from example.db"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 example.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' example.com. example.db >> /dev/null
 
 echo "loadzone comment.example.com. from comment.db"
-${LOADZONE_PATH}/b10-loadzone -d ${TEST_OUTPUT_PATH}/zone.sqlite3 comment.db >> /dev/null
+${LOADZONE_PATH}/b10-loadzone -c '{"database_file": "'${TEST_OUTPUT_PATH}/zone.sqlite3'"}' comment.example.com. comment.db >> /dev/null
 
 echo "I:test master file \$INCLUDE semantics"
 echo "I:test master file BIND 8 compatibility TTL and \$TTL semantics"

+ 10 - 4
src/bin/loadzone/tests/correct/example.db

@@ -2,11 +2,17 @@
 $ORIGIN example.com.
 $TTL 60
 @    IN SOA   ns1.example.com. hostmaster.example.com. (1 43200 900 1814400 7200)
-     IN     20      NS  ns1
-                    NS  ns2
+; these need #2390
+;     IN     20      NS  ns1
+;                    NS  ns2
+     IN     20      NS  ns1.example.com.
+                    NS  ns2.example.com.
 ns1  IN     30      A   192.168.1.102
-            70      NS  ns3
-     IN             NS  ns4
+; these need #2390
+;            70      NS  ns3
+;     IN             NS  ns4
+            70      NS  ns3.example.com.
+     IN             NS  ns4.example.com.
      10     IN      MX  10  mail.example.com.
 ns2         80      A   1.1.1.1
 ns3  IN             A   2.2.2.2

+ 6 - 2
src/bin/loadzone/tests/correct/include.db

@@ -1,13 +1,17 @@
 $ORIGIN include.   ; initialize origin
 $TTL 300
-@			IN SOA	ns hostmaster (
+; this needs #2500
+;@			IN SOA	ns hostmaster (
+@			IN SOA	ns.include. hostmaster.include. (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3600
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.include.
 
 ns			A	127.0.0.1
 

+ 6 - 2
src/bin/loadzone/tests/correct/mix1.db

@@ -1,12 +1,16 @@
 $ORIGIN mix1.
-@			IN SOA	ns hostmaster (
+; this needs #2500
+;@			IN SOA	ns hostmaster (
+@			IN SOA	ns.mix1. hostmaster.mix1. (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.mix1.
 ns			A	10.53.0.1
 a			TXT	"soa minttl 3"
 b		2	TXT	"explicit ttl 2"

+ 6 - 2
src/bin/loadzone/tests/correct/mix2.db

@@ -1,12 +1,16 @@
 $ORIGIN mix2.
-@		1	IN SOA	ns hostmaster (
+; this needs #2500
+;@		1	IN SOA	ns hostmaster (
+@		1	IN SOA	ns.mix2. hostmaster.mix2. (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.mix2.
 ns			A	10.53.0.1
 a			TXT	"inherited ttl 1"
 $INCLUDE mix2sub1.txt

+ 2 - 2
src/bin/loadzone/tests/correct/mix2sub2.txt

@@ -1,3 +1,3 @@
-f                       TXT     "default  ttl 3"
+f                       TXT     "default ttl 3"
 $TTL 5
-g                       TXT     "default  ttl 5"
+g                       TXT     "default ttl 5"

+ 6 - 2
src/bin/loadzone/tests/correct/ttl1.db

@@ -1,12 +1,16 @@
 $ORIGIN ttl1.
-@			IN SOA	ns hostmaster (
+; this needs #2500
+;@			IN SOA	ns hostmaster (
+@			IN SOA	ns.ttl1. hostmaster.ttl1. (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.ttl1.
 ns			A	10.53.0.1
 a			TXT	"soa minttl 3"
 b		2	TXT	"explicit ttl 2"

+ 6 - 2
src/bin/loadzone/tests/correct/ttl2.db

@@ -1,12 +1,16 @@
 $ORIGIN ttl2.
-@		1	IN SOA	ns hostmaster (
+; this needs #2500
+;@		1	IN SOA	ns hostmaster (
+@		1	IN SOA	ns.ttl2. hostmaster.ttl2 (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.ttl2.
 ns			A	10.53.0.1
 a			TXT	"inherited ttl 1"
 b		2	TXT	"explicit ttl 2"

+ 6 - 2
src/bin/loadzone/tests/correct/ttlext.db

@@ -1,12 +1,16 @@
 $ORIGIN ttlext.
-@			IN SOA	ns hostmaster (
+; this needs #2500
+;@			IN SOA	ns hostmaster (
+@			IN SOA	ns.ttlext. hostmaster.ttlext. (
 				1        ; serial
 				3600
 				1800
 				1814400
 				3
 				)
-			NS	ns
+; this needs #2390
+;			NS	ns
+			NS	ns.ttlext.
 ns			A	10.53.0.1
 a			TXT	"soa minttl 3"
 b		2S	TXT	"explicit ttl 2"

+ 0 - 1
src/bin/loadzone/tests/error/.gitignore

@@ -1 +0,0 @@
-/error_test.sh

+ 0 - 28
src/bin/loadzone/tests/error/Makefile.am

@@ -1,28 +0,0 @@
-EXTRA_DIST = error.known
-EXTRA_DIST += formerr1.db 
-EXTRA_DIST += formerr2.db
-EXTRA_DIST += formerr3.db
-EXTRA_DIST += formerr4.db
-EXTRA_DIST += formerr5.db
-EXTRA_DIST += include.txt
-EXTRA_DIST += keyerror1.db
-EXTRA_DIST += keyerror2.db
-EXTRA_DIST += keyerror3.db
-#EXTRA_DIST += nofilenane.db
-EXTRA_DIST += originerr1.db
-EXTRA_DIST += originerr2.db
-
-noinst_SCRIPTS = error_test.sh
-
-# If necessary (rare cases), explicitly specify paths to dynamic libraries
-# required by loadable python modules.
-LIBRARY_PATH_PLACEHOLDER =
-if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
-endif
-
-# TODO: use TESTS ?
-# test using command-line arguments, so use check-local target instead of TESTS
-check-local:
-	echo Running test: error_test.sh
-	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/error_test.sh

+ 0 - 11
src/bin/loadzone/tests/error/error.known

@@ -1,11 +0,0 @@
-Error reading zone file: Cannot parse RR, No $ORIGIN: @ IN SOA ns hostmaster 1 3600 1800 1814400 3600
-Error reading zone file: $ORIGIN is not absolute in record: $ORIGIN com
-Error reading zone file: Cannot parse RR: $TL 300
-Error reading zone file: Cannot parse RR: $OIGIN com.
-Error loading database: Error while loading com.: Cannot parse RR: $INLUDE file.txt
-Error loading database: Error while loading com.: Invalid $include format
-Error loading database: Error while loading com.: Cannot parse RR, No $ORIGIN:  include.txt sub
-Error reading zone file: Invalid TTL: ""
-Error reading zone file: Invalid TTL: "M"
-Error loading database: Error while loading com.: Cannot parse RR: b "no type error!"
-Error reading zone file: Could not open bogusfile

+ 0 - 82
src/bin/loadzone/tests/error/error_test.sh.in

@@ -1,82 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:-@PYTHON@}
-export PYTHON_EXEC
-
-PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
-export PYTHONPATH
-
-LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
-TEST_OUTPUT_PATH=@abs_top_builddir@/src/bin/loadzone/tests/error
-TEST_FILE_PATH=@abs_top_srcdir@/src/bin/loadzone/tests/error
-
-cd ${LOADZONE_PATH}/tests/error
-
-export LOADZONE_PATH
-status=0
-
-echo "PYTHON PATH: $PYTHONPATH"
-
-echo "Test no \$ORIGIN error in zone file"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/originerr1.db 1> /dev/null 2> error.out
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/originerr2.db 1> /dev/null 2>> error.out
-
-echo "Test: key word TTL spell error"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/keyerror1.db 1> /dev/null 2>> error.out
-
-echo "Test: key word ORIGIN spell error"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/keyerror2.db 1> /dev/null 2>> error.out
-
-echo "Test: key INCLUDE spell error"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/keyerror3.db 1> /dev/null 2>> error.out
-
-echo "Test: include formal error, miss filename"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/formerr1.db 1> /dev/null 2>>error.out
-
-echo "Test: include form error, domain is not absolute"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/formerr2.db 1> /dev/null 2>> error.out
-
-echo "Test: TTL form error, no ttl value"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/formerr3.db 1> /dev/null 2>> error.out
-
-echo "Test: TTL form error, ttl value error"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/formerr4.db 1> /dev/null 2>> error.out
-
-echo "Test: rr form error, no type"
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  ${TEST_FILE_PATH}/formerr5.db 1> /dev/null 2>> error.out
-
-echo "Test: zone file is bogus"
-# since bogusfile doesn't exist anyway, we *don't* specify the directory
-${LOADZONE_PATH}/b10-loadzone -d zone.sqlite3  bogusfile 1> /dev/null 2>> error.out
-
-diff error.out ${TEST_FILE_PATH}/error.known || status=1
-
-echo "Clean tmp file."
-rm -f error.out
-rm -f zone.sqlite3
-
-echo "I:exit status:$status"
-echo "-----------------------------------------------------------------------------"
-echo "Ran 11 test files"
-echo ""
-if [ "$status" -eq 1 ];then
-    echo "ERROR"
-else 
-    echo "OK"
-fi
-exit $status

+ 0 - 13
src/bin/loadzone/tests/error/formerr1.db

@@ -1,13 +0,0 @@
-$TTL 300
-$ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-$INCLUDE
-a			A	10.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/formerr2.db

@@ -1,12 +0,0 @@
-$TTL 300
-com.			IN SOA	ns.com. hostmaster.com. (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns.example.com.
-ns.com.			A	127.0.0.1
-$INCLUDE include.txt sub
-a.com.			A	10.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/formerr3.db

@@ -1,12 +0,0 @@
-$TTL 
-$ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/formerr4.db

@@ -1,12 +0,0 @@
-$TTL M
-$ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 0 - 13
src/bin/loadzone/tests/error/formerr5.db

@@ -1,13 +0,0 @@
-$TTL 2M
-$ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1 ; ip value
-b               "no type error!"
-a			A	10.0.0.1

+ 0 - 1
src/bin/loadzone/tests/error/include.txt

@@ -1 +0,0 @@
-a  300 A 127.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/keyerror1.db

@@ -1,12 +0,0 @@
-$TL 300
-@ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/keyerror2.db

@@ -1,12 +0,0 @@
-$TTL 300
-$OIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 0 - 13
src/bin/loadzone/tests/error/keyerror3.db

@@ -1,13 +0,0 @@
-$TTL 300
-$ORIGIN com.
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-$INLUDE file.txt
-a			A	10.0.0.1

+ 0 - 11
src/bin/loadzone/tests/error/originerr1.db

@@ -1,11 +0,0 @@
-$TTL 300
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 0 - 12
src/bin/loadzone/tests/error/originerr2.db

@@ -1,12 +0,0 @@
-$TTL 300
-$ORIGIN com
-@			IN SOA	ns hostmaster (
-				1        ; serial
-				3600
-				1800
-				1814400
-				3600
-				)
-			NS	ns
-ns			A	127.0.0.1
-a			A	10.0.0.1

+ 342 - 0
src/bin/loadzone/tests/loadzone_test.py

@@ -0,0 +1,342 @@
+# Copyright (C) 2012  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Tests for the loadzone module'''
+
+import unittest
+from loadzone import *
+from isc.dns import *
+from isc.datasrc import *
+import isc.log
+import bind10_config
+import os
+import shutil
+
+# Some common test parameters
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
+LOCAL_TESTDATA_PATH = os.environ['LOCAL_TESTDATA_PATH'] + os.sep
+READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
+NEW_ZONE_TXT_FILE = LOCAL_TESTDATA_PATH + "example.org.zone"
+ALT_NEW_ZONE_TXT_FILE = TESTDATA_PATH + "example.com.zone"
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+TEST_ZONE_NAME = Name('example.org')
+DATASRC_CONFIG = '{"database_file": "' + WRITE_ZONE_DB_FILE + '"}'
+
+# before/after SOAs: different in mname and serial
+ORIG_SOA_TXT = 'example.org. 3600 IN SOA ns1.example.org. ' +\
+    'admin.example.org. 1234 3600 1800 2419200 7200\n'
+NEW_SOA_TXT = 'example.org. 3600 IN SOA ns.example.org. ' +\
+    'admin.example.org. 1235 3600 1800 2419200 7200\n'
+# This is the brandnew SOA for a newly created zone
+ALT_NEW_SOA_TXT = 'example.com. 3600 IN SOA ns.example.com. ' +\
+    'admin.example.com. 1234 3600 1800 2419200 7200\n'
+
+class TestLoadZoneRunner(unittest.TestCase):
+    def setUp(self):
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+        # default command line arguments
+        self.__args = ['-c', DATASRC_CONFIG, 'example.org', NEW_ZONE_TXT_FILE]
+        self.__runner = LoadZoneRunner(self.__args)
+
+    def tearDown(self):
+        # Delete the used DB file; if some of the tests unexpectedly fail
+        # unexpectedly in the middle of updating the DB, a lock could stay
+        # there and would affect the other tests that would otherwise succeed.
+        os.unlink(WRITE_ZONE_DB_FILE)
+
+    def test_init(self):
+        '''
+        Checks initial class attributes
+        '''
+        self.assertIsNone(self.__runner._zone_class)
+        self.assertIsNone(self.__runner._zone_name)
+        self.assertIsNone(self.__runner._zone_file)
+        self.assertIsNone(self.__runner._datasrc_config)
+        self.assertIsNone(self.__runner._datasrc_type)
+        self.assertEqual(10000, self.__runner._report_interval)
+        self.assertEqual('INFO', self.__runner._log_severity)
+        self.assertEqual(0, self.__runner._log_debuglevel)
+
+    def test_parse_args(self):
+        self.__runner._parse_args()
+        self.assertEqual(TEST_ZONE_NAME, self.__runner._zone_name)
+        self.assertEqual(NEW_ZONE_TXT_FILE, self.__runner._zone_file)
+        self.assertEqual(DATASRC_CONFIG, self.__runner._datasrc_config)
+        self.assertEqual('sqlite3', self.__runner._datasrc_type) # default
+        self.assertEqual(10000, self.__runner._report_interval) # default
+        self.assertEqual(RRClass.IN(), self.__runner._zone_class) # default
+        self.assertEqual('INFO', self.__runner._log_severity) # default
+        self.assertEqual(0, self.__runner._log_debuglevel)
+
+    def test_set_loglevel(self):
+        runner = LoadZoneRunner(['-d', '1'] + self.__args)
+        runner._parse_args()
+        self.assertEqual('DEBUG', runner._log_severity)
+        self.assertEqual(1, runner._log_debuglevel)
+
+    def test_parse_bad_args(self):
+        # There must be exactly 2 non-option arguments: zone name and zone file
+        self.assertRaises(BadArgument, LoadZoneRunner([])._parse_args)
+        self.assertRaises(BadArgument, LoadZoneRunner(['example']).
+                          _parse_args)
+        self.assertRaises(BadArgument, LoadZoneRunner(self.__args + ['0']).
+                          _parse_args)
+
+        # Bad zone name
+        args = ['example.org', 'example.zone'] # otherwise valid args
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['bad..name', 'example.zone'] + args).
+                          _parse_args)
+
+        # Bad class name
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['-C', 'badclass'] + args).
+                          _parse_args)
+        # Unsupported class
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['-C', 'CH'] + args)._parse_args)
+
+        # bad debug level
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['-d', '-10'] + args)._parse_args)
+
+        # bad report interval
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['-i', '-5'] + args)._parse_args)
+
+        # -c cannot be omitted unless it's type sqlite3 (right now)
+        self.assertRaises(BadArgument,
+                          LoadZoneRunner(['-t', 'memory'] + args)._parse_args)
+
+    def test_get_datasrc_config(self):
+        # For sqlite3, we use the config with the well-known DB file.
+        expected_conf = \
+            '{"database_file": "' + bind10_config.DATA_PATH + '/zone.sqlite3"}'
+        self.assertEqual(expected_conf,
+                         self.__runner._get_datasrc_config('sqlite3'))
+
+        # For other types, config must be given by hand for now
+        self.assertRaises(BadArgument, self.__runner._get_datasrc_config,
+                          'memory')
+
+    def __common_load_setup(self):
+        self.__runner._zone_class = RRClass.IN()
+        self.__runner._zone_name = TEST_ZONE_NAME
+        self.__runner._zone_file = NEW_ZONE_TXT_FILE
+        self.__runner._datasrc_type = 'sqlite3'
+        self.__runner._datasrc_config = DATASRC_CONFIG
+        self.__runner._report_interval = 1
+        self.__reports = []
+        self.__runner._report_progress = lambda x: self.__reports.append(x)
+
+    def __check_zone_soa(self, soa_txt, zone_name=TEST_ZONE_NAME):
+        """Check that the given SOA RR exists and matches the expected string
+
+        If soa_txt is None, the zone is expected to be non-existent.
+        Otherwise, if soa_txt is False, the zone should exist but SOA is
+        expected to be missing.
+
+        """
+
+        client = DataSourceClient('sqlite3', DATASRC_CONFIG)
+        result, finder = client.find_zone(zone_name)
+        if soa_txt is None:
+            self.assertEqual(client.NOTFOUND, result)
+            return
+        self.assertEqual(client.SUCCESS, result)
+        result, rrset, _ = finder.find(zone_name, RRType.SOA())
+        if soa_txt:
+            self.assertEqual(finder.SUCCESS, result)
+            self.assertEqual(soa_txt, rrset.to_text())
+        else:
+            self.assertEqual(finder.NXRRSET, result)
+
+    def test_load_update(self):
+        '''successful case to loading new contents to an existing zone.'''
+        self.__common_load_setup()
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.__runner._do_load()
+        # In this test setup every loaded RR will be reported, and there will
+        # be 3 RRs
+        self.assertEqual([1, 2, 3], self.__reports)
+        self.__check_zone_soa(NEW_SOA_TXT)
+
+    def test_load_update_skipped_report(self):
+        '''successful loading, with reports for every 2 RRs'''
+        self.__common_load_setup()
+        self.__runner._report_interval = 2
+        self.__runner._do_load()
+        self.assertEqual([2], self.__reports)
+
+    def test_load_update_no_report(self):
+        '''successful loading, without progress reports'''
+        self.__common_load_setup()
+        self.__runner._report_interval = 0
+        self.__runner._do_load()
+        self.assertEqual([], self.__reports) # no report
+        self.__check_zone_soa(NEW_SOA_TXT)   # but load is completed
+
+    def test_create_and_load(self):
+        '''successful case to loading contents to a new zone (created).'''
+        self.__common_load_setup()
+        self.__runner._zone_name = Name('example.com')
+        self.__runner._zone_file = ALT_NEW_ZONE_TXT_FILE
+        self.__check_zone_soa(None, zone_name=Name('example.com'))
+        self.__runner._do_load()
+        self.__check_zone_soa(ALT_NEW_SOA_TXT, zone_name=Name('example.com'))
+
+    def test_load_fail_badconfig(self):
+        '''Load attempt fails due to broken datasrc config.'''
+        self.__common_load_setup()
+        self.__runner._datasrc_config = "invalid config"
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        self.__check_zone_soa(ORIG_SOA_TXT) # no change to the zone
+
+    def test_load_fail_badzone(self):
+        '''Load attempt fails due to broken zone file.'''
+        self.__common_load_setup()
+        self.__runner._zone_file = \
+            LOCAL_TESTDATA_PATH + '/broken-example.org.zone'
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        self.__check_zone_soa(ORIG_SOA_TXT)
+
+    def test_load_fail_noloader(self):
+        '''Load attempt fails because loading isn't supported'''
+        self.__common_load_setup()
+        self.__runner._datasrc_type = 'memory'
+        self.__runner._datasrc_config = '{"type": "memory"}'
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        self.__check_zone_soa(ORIG_SOA_TXT)
+
+    def test_load_fail_create_cancel(self):
+        '''Load attempt fails and new creation of zone is canceled'''
+        self.__common_load_setup()
+        self.__runner._zone_name = Name('example.com')
+        self.__runner._zone_file = 'no-such-file'
+        self.__check_zone_soa(None, zone_name=Name('example.com'))
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        # _do_load() should have once created the zone but then canceled it.
+        self.__check_zone_soa(None, zone_name=Name('example.com'))
+
+    def __common_post_load_setup(self, zone_file):
+        '''Common setup procedure for post load tests.'''
+        # replace the LoadZoneRunner's original _post_load_warning() for
+        # inspection
+        self.__warnings = []
+        self.__runner._post_load_warning = \
+            lambda msg: self.__warnings.append(msg)
+
+        # perform load and invoke checks
+        self.__common_load_setup()
+        self.__runner._zone_file = zone_file
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.__runner._do_load()
+        self.__runner._post_load_checks()
+
+    def test_load_post_check_fail_soa(self):
+        '''Load succeeds but warns about missing SOA, should cause warn'''
+        self.__common_load_setup()
+        self.__common_post_load_setup(LOCAL_TESTDATA_PATH +
+                                      '/example-nosoa.org.zone')
+        self.__check_zone_soa(False)
+        self.assertEqual(1, len(self.__warnings))
+        self.assertEqual('zone has no SOA', self.__warnings[0])
+
+    def test_load_post_check_fail_ns(self):
+        '''Load succeeds but warns about missing NS, should cause warn'''
+        self.__common_load_setup()
+        self.__common_post_load_setup(LOCAL_TESTDATA_PATH +
+                                      '/example-nons.org.zone')
+        self.__check_zone_soa(NEW_SOA_TXT)
+        self.assertEqual(1, len(self.__warnings))
+        self.assertEqual('zone has no NS', self.__warnings[0])
+
+    def __interrupt_progress(self, loaded_rrs):
+        '''A helper emulating a signal in the middle of loading.
+
+        On the second progress report, it internally invokes the signal
+        handler to see if it stops the loading.
+
+        '''
+        self.__reports.append(loaded_rrs)
+        if len(self.__reports) == 2:
+            self.__runner._interrupt_handler()
+
+    def test_load_interrupted(self):
+        '''Load attempt fails due to signal interruption'''
+        self.__common_load_setup()
+        self.__runner._report_progress = lambda x: self.__interrupt_progress(x)
+        # The interrupting _report_progress() will terminate the loading
+        # in the middle.  the number of reports is smaller, and the zone
+        # won't be changed.
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        self.assertEqual([1, 2], self.__reports)
+        self.__check_zone_soa(ORIG_SOA_TXT)
+
+    def test_load_interrupted_create_cancel(self):
+        '''Load attempt for a new zone fails due to signal interruption
+
+        It cancels the zone creation.
+
+        '''
+        self.__common_load_setup()
+        self.__runner._report_progress = lambda x: self.__interrupt_progress(x)
+        self.__runner._zone_name = Name('example.com')
+        self.__runner._zone_file = ALT_NEW_ZONE_TXT_FILE
+        self.__check_zone_soa(None, zone_name=Name('example.com'))
+        self.assertRaises(LoadFailure, self.__runner._do_load)
+        self.assertEqual([1, 2], self.__reports)
+        self.__check_zone_soa(None, zone_name=Name('example.com'))
+
+    def test_run_success(self):
+        '''Check for the top-level method.
+
+        Detailed behavior is tested in other tests.  We only check the
+        return value of run(), and the zone is successfully loaded.
+
+        '''
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.assertEqual(0, self.__runner.run())
+        self.__check_zone_soa(NEW_SOA_TXT)
+
+    def test_run_fail(self):
+        '''Check for the top-level method, failure case.
+
+        Similar to the success test, but loading will fail, and return
+        value should be 1.
+
+        '''
+        runner = LoadZoneRunner(['-c', DATASRC_CONFIG, 'example.org',
+                                 LOCAL_TESTDATA_PATH +
+                                 '/broken-example.org.zone'])
+        self.__check_zone_soa(ORIG_SOA_TXT)
+        self.assertEqual(1, runner.run())
+        self.__check_zone_soa(ORIG_SOA_TXT)
+
+if __name__== "__main__":
+    isc.log.resetUnitTestRootLogger()
+    # Disable the internal logging setup so the test output won't be too
+    # verbose by default.
+    LoadZoneRunner._config_log = lambda x: None
+
+    # Cancel signal handlers so we can stop tests when they hang
+    LoadZoneRunner._set_signal_handlers = lambda x: None
+    unittest.main()

+ 11 - 0
src/bin/loadzone/tests/testdata/broken-example.org.zone

@@ -0,0 +1,11 @@
+example.org.    3600    IN  SOA (
+		ns.example.org.
+		admin.example.org.
+		1235
+		3600		;1H
+		1800		;30M
+		2419200
+		7200)
+example.org.    3600    IN  NS ns.example.org.
+ns.example.org.	3600    IN  A 192.0.2.1
+bad..name.example.org. 3600 IN AAAA 2001:db8::1

+ 10 - 0
src/bin/loadzone/tests/testdata/example-nons.org.zone

@@ -0,0 +1,10 @@
+;; Intentionally missing SOA for testing post-load checks
+example.org.    3600    IN  SOA (
+		ns.example.org.
+		admin.example.org.
+		1235
+		3600		;1H
+		1800		;30M
+		2419200
+		7200)
+ns.example.org.	3600    IN  A 192.0.2.1

+ 3 - 0
src/bin/loadzone/tests/testdata/example-nosoa.org.zone

@@ -0,0 +1,3 @@
+;; Intentionally missing SOA for testing post-load checks
+example.org.    3600    IN  NS ns.example.org.
+ns.example.org.	3600    IN  A 192.0.2.1

+ 10 - 0
src/bin/loadzone/tests/testdata/example.org.zone

@@ -0,0 +1,10 @@
+example.org.    3600    IN  SOA (
+		ns.example.org.
+		admin.example.org.
+		1235
+		3600		;1H
+		1800		;30M
+		2419200
+		7200)
+example.org.    3600    IN  NS ns.example.org.
+ns.example.org.	3600    IN  A 192.0.2.1

+ 1 - 1
src/bin/msgq/msgq.xml

@@ -111,7 +111,7 @@
         <listitem><para>
           The UNIX domain socket file this daemon will use.
           The default is
-          <filename>/usr/local/var/bind10-devel/msg_socket</filename>.
+          <filename>/usr/local/var/bind10/msg_socket</filename>.
 <!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
           </para></listitem>
       </varlistentry>

+ 3 - 1
src/bin/resolver/b10-resolver.xml

@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>February 28, 2012</date>
+    <date>August 16, 2012</date>
   </refentryinfo>
 
   <refmeta>
@@ -148,6 +148,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
       address or special keyword.
       The <varname>key</varname> is a TSIG key name.
       The default configuration accepts queries from 127.0.0.1 and ::1.
+      The default action is REJECT for newly added
+      <varname>query_acl</varname> items.
     </para>
 
     <para>

+ 4 - 4
src/bin/stats/b10-stats-httpd.xml

@@ -103,7 +103,7 @@
   <refsect1>
     <title>FILES</title>
     <para>
-      <filename>/usr/local/share/bind10-devel/stats-httpd.spec</filename>
+      <filename>/usr/local/share/bind10/stats-httpd.spec</filename>
       <!--TODO: The filename should be computed from prefix-->
       &mdash; the spec file of <command>b10-stats-httpd</command>. This file
       contains configurable settings
@@ -115,17 +115,17 @@
       how to configure the settings.
     </para>
     <para>
-      <filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
+      <filename>/usr/local/share/bind10/stats-httpd-xml.tpl</filename>
       <!--TODO: The filename should be computed from prefix-->
       &mdash; the template file of XML document.
     </para>
     <para>
-      <filename>/usr/local/share/bind10-devel/stats-httpd-xsd.tpl</filename>
+      <filename>/usr/local/share/bind10/stats-httpd-xsd.tpl</filename>
       <!--TODO: The filename should be computed from prefix-->
       &mdash; the template file of XSD document.
     </para>
     <para>
-      <filename>/usr/local/share/bind10-devel/stats-httpd-xsl.tpl</filename>
+      <filename>/usr/local/share/bind10/stats-httpd-xsl.tpl</filename>
       <!--TODO: The filename should be computed from prefix-->
       &mdash; the template file of XSL document.
     </para>

+ 1 - 1
src/bin/stats/b10-stats.xml

@@ -210,7 +210,7 @@
 
   <refsect1>
     <title>FILES</title>
-    <para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
+    <para><filename>/usr/local/share/bind10/stats.spec</filename>
       <!--TODO: The filename should be computed from prefix-->
       &mdash; This is a spec file for <command>b10-stats</command>. It
       contains commands for <command>b10-stats</command>. They can be

+ 71 - 3
src/lib/dhcpsrv/alloc_engine.cc

@@ -44,9 +44,10 @@ AllocEngine::IterativeAllocator::increaseAddress(const isc::asiolink::IOAddress&
     // Copy the address. It can be either V4 or V6.
     std::memcpy(packed, &vec[0], len);
 
-    // Increase the address.
+    // Start increasing the least significant byte
     for (int i = len - 1; i >= 0; --i) {
         ++packed[i];
+        // if we haven't overflowed (0xff -> 0x0), than we are done
         if (packed[i] != 0) {
             break;
         }
@@ -198,9 +199,31 @@ AllocEngine::allocateAddress6(const Subnet6Ptr& subnet,
             if (lease) {
                 return (lease);
             }
+        } else {
+            if (existing->expired()) {
+                return (reuseExpiredLease(existing, subnet, duid, iaid,
+                                          fake_allocation));
+            }
+
         }
     }
 
+    // Hint is in the pool but is not available. Search the pool until first of
+    // the following occurs:
+    // - we find a free address
+    // - we find an address for which the lease has expired
+    // - we exhaust number of tries
+    //
+    // @todo: Current code does not handle pool exhaustion well. It will be
+    // improved. Current problems:
+    // 1. with attempts set to too large value (e.g. 1000) and a small pool (e.g.
+    // 10 addresses), we will iterate over it 100 times before giving up
+    // 2. attempts 0 mean unlimited (this is really UINT_MAX, not infinite)
+    // 3. the whole concept of infinite attempts is just asking for infinite loop
+    // We may consider some form or reference counting (this pool has X addresses
+    // left), but this has one major problem. We exactly control allocation
+    // moment, but we currently do not control expiration time at all
+
     unsigned int i = attempts_;
     do {
         IOAddress candidate = allocator_->pickAddress(subnet, duid, hint);
@@ -209,9 +232,9 @@ AllocEngine::allocateAddress6(const Subnet6Ptr& subnet,
         /// implemented
 
         Lease6Ptr existing = LeaseMgrFactory::instance().getLease6(candidate);
-        // there's no existing lease for selected candidate, so it is
-        // free. Let's allocate it.
         if (!existing) {
+            // there's no existing lease for selected candidate, so it is
+            // free. Let's allocate it.
             Lease6Ptr lease = createLease(subnet, duid, iaid, candidate,
                                           fake_allocation);
             if (lease) {
@@ -221,6 +244,11 @@ AllocEngine::allocateAddress6(const Subnet6Ptr& subnet,
             // Although the address was free just microseconds ago, it may have
             // been taken just now. If the lease insertion fails, we continue
             // allocation attempts.
+        } else {
+            if (existing->expired()) {
+                return (reuseExpiredLease(existing, subnet, duid, iaid,
+                                          fake_allocation));
+            }
         }
 
         // continue trying allocation until we run out of attempts
@@ -232,6 +260,46 @@ AllocEngine::allocateAddress6(const Subnet6Ptr& subnet,
               << " tries");
 }
 
+Lease6Ptr AllocEngine::reuseExpiredLease(Lease6Ptr& expired,
+                                         const Subnet6Ptr& subnet,
+                                         const DuidPtr& duid,
+                                         uint32_t iaid,
+                                         bool fake_allocation /*= false */ ) {
+
+    if (!expired->expired()) {
+        isc_throw(BadValue, "Attempt to recycle lease that is still valid");
+    }
+
+    // address, lease type and prefixlen (0) stay the same
+    expired->iaid_ = iaid;
+    expired->duid_ = duid;
+    expired->preferred_lft_ = subnet->getPreferred();
+    expired->valid_lft_ = subnet->getValid();
+    expired->t1_ = subnet->getT1();
+    expired->t2_ = subnet->getT2();
+    expired->cltt_ = time(NULL);
+    expired->subnet_id_ = subnet->getID();
+    expired->fixed_ = false;
+    expired->hostname_ = std::string("");
+    expired->fqdn_fwd_ = false;
+    expired->fqdn_rev_ = false;
+
+    /// @todo: log here that the lease was reused (there's ticket #2524 for
+    /// logging in libdhcpsrv)
+
+    if (!fake_allocation) {
+        // for REQUEST we do update the lease
+        LeaseMgrFactory::instance().updateLease6(expired);
+    }
+
+    // We do nothing for SOLICIT. We'll just update database when
+    // the client gets back to us with REQUEST message.
+
+    // it's not really expired at this stage anymore - let's return it as
+    // an updated lease
+    return (expired);
+}
+
 Lease6Ptr AllocEngine::createLease(const Subnet6Ptr& subnet,
                                    const DuidPtr& duid,
                                    uint32_t iaid,

+ 18 - 0
src/lib/dhcpsrv/alloc_engine.h

@@ -216,6 +216,24 @@ private:
                           uint32_t iaid, const isc::asiolink::IOAddress& addr,
                           bool fake_allocation = false);
 
+    /// @brief reuses expired lease
+    ///
+    /// Updates existing expired lease with new information. Lease database
+    /// is updated if this is real (i.e. REQUEST, fake_allocation = false), not
+    /// dummy allocation request (i.e. SOLICIT, fake_allocation = true).
+    ///
+    /// @param expired old, expired lease
+    /// @param subnet subnet the lease is allocated from
+    /// @param duid client's DUID
+    /// @param iaid IAID from the IA_NA container the client sent to us
+    /// @param fake_allocation is this real i.e. REQUEST (false) or just picking
+    ///        an address for SOLICIT that is not really allocated (true)
+    /// @return refreshed lease
+    /// @throw BadValue if trying to recycle lease that is still valid
+    Lease6Ptr reuseExpiredLease(Lease6Ptr& expired, const Subnet6Ptr& subnet,
+                                const DuidPtr& duid, uint32_t iaid,
+                                bool fake_allocation = false);
+
     /// @brief a pointer to currently used allocator
     boost::shared_ptr<Allocator> allocator_;
 

+ 8 - 0
src/lib/dhcpsrv/lease_mgr.cc

@@ -46,6 +46,14 @@ Lease6::Lease6(LeaseType type, const isc::asiolink::IOAddress& addr,
     cltt_ = time(NULL);
 }
 
+bool Lease6::expired() const {
+
+    // Let's use int64 to avoid problems with negative/large uint32 values
+    int64_t expire_time = cltt_ + valid_lft_;
+    return (expire_time < time(NULL));
+}
+
+
 std::string LeaseMgr::getParameter(const std::string& name) const {
     ParameterMap::const_iterator param = parameters_.find(name);
     if (param == parameters_.end()) {

+ 4 - 0
src/lib/dhcpsrv/lease_mgr.h

@@ -379,6 +379,10 @@ struct Lease6 {
     /// @return String form of the lease
     std::string toText();
 
+    /// @brief returns true if the lease is expired
+    /// @return true if the lease is expired
+    bool expired() const;
+
     /// @brief Compare two leases for equality
     ///
     /// @param other lease6 object with which to compare

+ 2 - 3
src/lib/dhcpsrv/memfile_lease_mgr.cc

@@ -20,9 +20,8 @@ using namespace isc::dhcp;
 
 Memfile_LeaseMgr::Memfile_LeaseMgr(const ParameterMap& parameters)
     : LeaseMgr(parameters) {
-    std::cout << "Warning: Using memfile database backend. It is usable for" << std::endl;
-    std::cout << "Warning: limited testing only. File support not implemented yet." << std::endl;
-    std::cout << "Warning: Leases will be lost after restart." << std::endl;
+    std::cout << "Warning: Using memfile database backend. It is usable for limited"
+              << " testing only. Leases will be lost after restart." << std::endl;
 }
 
 Memfile_LeaseMgr::~Memfile_LeaseMgr() {

+ 1 - 0
src/lib/dhcpsrv/tests/Makefile.am

@@ -40,6 +40,7 @@ libdhcpsrv_unittests_SOURCES += pool_unittest.cc
 libdhcpsrv_unittests_SOURCES += schema_copy.h
 libdhcpsrv_unittests_SOURCES += subnet_unittest.cc
 libdhcpsrv_unittests_SOURCES += triplet_unittest.cc
+libdhcpsrv_unittests_SOURCES += test_utils.cc test_utils.h
 
 libdhcpsrv_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
 if HAVE_MYSQL

+ 160 - 24
src/lib/dhcpsrv/tests/alloc_engine_unittest.cc

@@ -22,6 +22,8 @@
 #include <dhcpsrv/lease_mgr_factory.h>
 #include <dhcpsrv/memfile_lease_mgr.h>
 
+#include <dhcpsrv/tests/test_utils.h>
+
 #include <boost/shared_ptr.hpp>
 #include <boost/scoped_ptr.hpp>
 #include <gtest/gtest.h>
@@ -29,11 +31,13 @@
 #include <iostream>
 #include <sstream>
 #include <map>
+#include <time.h>
 
 using namespace std;
 using namespace isc;
 using namespace isc::asiolink;
 using namespace isc::dhcp;
+using namespace isc::dhcp::test;
 
 namespace {
 
@@ -107,26 +111,6 @@ TEST_F(AllocEngineTest, constructor) {
     ASSERT_NO_THROW(x.reset(new AllocEngine(AllocEngine::ALLOC_ITERATIVE, 100)));
 }
 
-/// @todo: This method is taken from mysql_lease_mgr_utilities.cc from ticket
-/// #2342. Get rid of one instance once the code is merged
-void
-detailCompareLease6(const Lease6Ptr& first, const Lease6Ptr& second) {
-    EXPECT_EQ(first->type_, second->type_);
-
-    // Compare address strings - odd things happen when they are different
-    // as the EXPECT_EQ appears to call the operator uint32_t() function,
-    // which causes an exception to be thrown for IPv6 addresses.
-    EXPECT_EQ(first->addr_.toText(), second->addr_.toText());
-    EXPECT_EQ(first->prefixlen_, second->prefixlen_);
-    EXPECT_EQ(first->iaid_, second->iaid_);
-    EXPECT_TRUE(*first->duid_ == *second->duid_);
-    EXPECT_EQ(first->preferred_lft_, second->preferred_lft_);
-    EXPECT_EQ(first->valid_lft_, second->valid_lft_);
-    EXPECT_EQ(first->cltt_, second->cltt_);
-    EXPECT_EQ(first->subnet_id_, second->subnet_id_);
-}
-
-
 // This test checks if the simple allocation can succeed
 TEST_F(AllocEngineTest, simpleAlloc) {
     boost::scoped_ptr<AllocEngine> engine;
@@ -147,7 +131,7 @@ TEST_F(AllocEngineTest, simpleAlloc) {
     ASSERT_TRUE(from_mgr);
 
     // Now check that the lease in LeaseMgr has the same parameters
-    detailCompareLease6(lease, from_mgr);
+    detailCompareLease(lease, from_mgr);
 }
 
 // This test checks if the fake allocation (for SOLICIT) can succeed
@@ -195,7 +179,7 @@ TEST_F(AllocEngineTest, allocWithValidHint) {
     ASSERT_TRUE(from_mgr);
 
     // Now check that the lease in LeaseMgr has the same parameters
-    detailCompareLease6(lease, from_mgr);
+    detailCompareLease(lease, from_mgr);
 }
 
 // This test checks if the allocation with a hint that is in range,
@@ -234,7 +218,7 @@ TEST_F(AllocEngineTest, allocWithUsedHint) {
     ASSERT_TRUE(from_mgr);
 
     // Now check that the lease in LeaseMgr has the same parameters
-    detailCompareLease6(lease, from_mgr);
+    detailCompareLease(lease, from_mgr);
 }
 
 // This test checks if the allocation with a hint that is out the blue
@@ -264,7 +248,7 @@ TEST_F(AllocEngineTest, allocBogusHint) {
     ASSERT_TRUE(from_mgr);
 
     // Now check that the lease in LeaseMgr has the same parameters
-    detailCompareLease6(lease, from_mgr);
+    detailCompareLease(lease, from_mgr);
 }
 
 // This test verifies that the allocator picks addresses that belong to the
@@ -337,4 +321,156 @@ TEST_F(AllocEngineTest, IterativeAllocator_manyPools) {
     delete alloc;
 }
 
+// This test checks if really small pools are working
+TEST_F(AllocEngineTest, smallPool) {
+    boost::scoped_ptr<AllocEngine> engine;
+    ASSERT_NO_THROW(engine.reset(new AllocEngine(AllocEngine::ALLOC_ITERATIVE, 100)));
+    ASSERT_TRUE(engine);
+
+    IOAddress addr("2001:db8:1::ad");
+    CfgMgr& cfg_mgr = CfgMgr::instance();
+    cfg_mgr.deleteSubnets6(); // Get rid of the default test configuration
+
+    // Create configuration similar to other tests, but with a single address pool
+    subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 56, 1, 2, 3, 4));
+    pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, addr, addr)); // just a single address
+    subnet_->addPool6(pool_);
+    cfg_mgr.addSubnet6(subnet_);
+
+    Lease6Ptr lease = engine->allocateAddress6(subnet_, duid_, iaid_, IOAddress("::"),
+                                               false);
+
+    // Check that we got that single lease
+    ASSERT_TRUE(lease);
+
+    EXPECT_EQ("2001:db8:1::ad", lease->addr_.toText());
+
+    // do all checks on the lease
+    checkLease6(lease);
+
+    // Check that the lease is indeed in LeaseMgr
+    Lease6Ptr from_mgr = LeaseMgrFactory::instance().getLease6(lease->addr_);
+    ASSERT_TRUE(from_mgr);
+
+    // Now check that the lease in LeaseMgr has the same parameters
+    detailCompareLease(lease, from_mgr);
+}
+
+// This test checks if all addresses in a pool are currently used, the attempt
+// to find out a new lease fails.
+TEST_F(AllocEngineTest, outOfAddresses) {
+    boost::scoped_ptr<AllocEngine> engine;
+    ASSERT_NO_THROW(engine.reset(new AllocEngine(AllocEngine::ALLOC_ITERATIVE, 100)));
+    ASSERT_TRUE(engine);
+
+    IOAddress addr("2001:db8:1::ad");
+    CfgMgr& cfg_mgr = CfgMgr::instance();
+    cfg_mgr.deleteSubnets6(); // Get rid of the default test configuration
+
+    // Create configuration similar to other tests, but with a single address pool
+    subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 56, 1, 2, 3, 4));
+    pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, addr, addr)); // just a single address
+    subnet_->addPool6(pool_);
+    cfg_mgr.addSubnet6(subnet_);
+
+    // Just a different duid
+    DuidPtr other_duid = DuidPtr(new DUID(vector<uint8_t>(12, 0xff)));
+    const uint32_t other_iaid = 3568;
+    Lease6Ptr lease(new Lease6(Lease6::LEASE_IA_NA, addr, other_duid, other_iaid,
+                               501, 502, 503, 504, subnet_->getID(), 0));
+    lease->cltt_ = time(NULL) - 10; // Allocated 10 seconds ago
+    ASSERT_TRUE(LeaseMgrFactory::instance().addLease(lease));
+
+    // There is just a single address in the pool and allocated it to someone
+    // else, so the allocation should fail
+
+    EXPECT_THROW(engine->allocateAddress6(subnet_, duid_, iaid_, IOAddress("::"),false),
+                 AllocFailed);
+}
+
+// This test checks if an expired lease can be reused in SOLICIT (fake allocation)
+TEST_F(AllocEngineTest, solicitReuseExpiredLease) {
+    boost::scoped_ptr<AllocEngine> engine;
+    ASSERT_NO_THROW(engine.reset(new AllocEngine(AllocEngine::ALLOC_ITERATIVE, 100)));
+    ASSERT_TRUE(engine);
+
+    IOAddress addr("2001:db8:1::ad");
+    CfgMgr& cfg_mgr = CfgMgr::instance();
+    cfg_mgr.deleteSubnets6(); // Get rid of the default test configuration
+
+    // Create configuration similar to other tests, but with a single address pool
+    subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 56, 1, 2, 3, 4));
+    pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, addr, addr)); // just a single address
+    subnet_->addPool6(pool_);
+    cfg_mgr.addSubnet6(subnet_);
+
+    // Just a different duid
+    DuidPtr other_duid = DuidPtr(new DUID(vector<uint8_t>(12, 0xff)));
+    const uint32_t other_iaid = 3568;
+    Lease6Ptr lease(new Lease6(Lease6::LEASE_IA_NA, addr, other_duid, other_iaid,
+                               501, 502, 503, 504, subnet_->getID(), 0));
+    lease->cltt_ = time(NULL) - 500; // Allocated 500 seconds ago
+    lease->valid_lft_ = 495; // Lease was valid for 495 seconds
+    ASSERT_TRUE(LeaseMgrFactory::instance().addLease(lease));
+
+    // CASE 1: Asking for any address
+    lease = engine->allocateAddress6(subnet_, duid_, iaid_, IOAddress("::"),
+                                     true);
+    // Check that we got that single lease
+    ASSERT_TRUE(lease);
+    EXPECT_EQ(addr.toText(), lease->addr_.toText());
+
+    // Do all checks on the lease (if subnet-id, preferred/valid times are ok etc.)
+    checkLease6(lease);
+
+    // CASE 2: Asking specifically for this address
+    lease = engine->allocateAddress6(subnet_, duid_, iaid_, IOAddress(addr.toText()),
+                                     true);
+    // Check that we got that single lease
+    ASSERT_TRUE(lease);
+    EXPECT_EQ(addr.toText(), lease->addr_.toText());
+}
+
+// This test checks if an expired lease can be reused in REQUEST (actual allocation)
+TEST_F(AllocEngineTest, requestReuseExpiredLease) {
+    boost::scoped_ptr<AllocEngine> engine;
+    ASSERT_NO_THROW(engine.reset(new AllocEngine(AllocEngine::ALLOC_ITERATIVE, 100)));
+    ASSERT_TRUE(engine);
+
+    IOAddress addr("2001:db8:1::ad");
+    CfgMgr& cfg_mgr = CfgMgr::instance();
+    cfg_mgr.deleteSubnets6(); // Get rid of the default test configuration
+
+    // Create configuration similar to other tests, but with a single address pool
+    subnet_ = Subnet6Ptr(new Subnet6(IOAddress("2001:db8:1::"), 56, 1, 2, 3, 4));
+    pool_ = Pool6Ptr(new Pool6(Pool6::TYPE_IA, addr, addr)); // just a single address
+    subnet_->addPool6(pool_);
+    cfg_mgr.addSubnet6(subnet_);
+
+    // Let's create an expired lease
+    DuidPtr other_duid = DuidPtr(new DUID(vector<uint8_t>(12, 0xff)));
+    const uint32_t other_iaid = 3568;
+    const SubnetID other_subnetid = 999;
+    Lease6Ptr lease(new Lease6(Lease6::LEASE_IA_NA, addr, other_duid, other_iaid,
+                               501, 502, 503, 504, other_subnetid, 0));
+    lease->cltt_ = time(NULL) - 500; // Allocated 500 seconds ago
+    lease->valid_lft_ = 495; // Lease was valid for 495 seconds
+    ASSERT_TRUE(LeaseMgrFactory::instance().addLease(lease));
+
+    // A client comes along, asking specifically for this address
+    lease = engine->allocateAddress6(subnet_, duid_, iaid_,
+                                     IOAddress(addr.toText()), false);
+
+    // Check that he got that single lease
+    ASSERT_TRUE(lease);
+    EXPECT_EQ(addr.toText(), lease->addr_.toText());
+
+    // Check that the lease is indeed updated in LeaseMgr
+    Lease6Ptr from_mgr = LeaseMgrFactory::instance().getLease6(addr);
+    ASSERT_TRUE(from_mgr);
+
+    // Now check that the lease in LeaseMgr has the same parameters
+    detailCompareLease(lease, from_mgr);
+}
+
 }; // end of anonymous namespace

+ 26 - 1
src/lib/dhcpsrv/tests/lease_mgr_unittest.cc

@@ -258,7 +258,7 @@ TEST(Lease4, Lease4Constructor) {
     // ...and a time
     const time_t current_time = time(NULL);
 
-    // Other random constants. 
+    // Other random constants.
     const uint32_t SUBNET_ID = 42;
     const uint32_t VALID_LIFETIME = 500;
 
@@ -605,4 +605,29 @@ TEST(Lease6, OperatorEquals) {
     EXPECT_TRUE(lease1 == lease2);  // Check that the reversion has made the
     EXPECT_FALSE(lease1 != lease2); // ... leases equal
 }
+
+// Checks if lease expiration is calculated properly
+TEST(Lease6, Lease6Expired) {
+    const IOAddress addr("2001:db8:1::456");
+    const uint8_t duid_array[] = {0, 1, 2, 3, 4, 5, 6, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+    const DuidPtr duid(new DUID(duid_array, sizeof(duid_array)));
+    const uint32_t iaid = 7; // just a number
+    const SubnetID subnet_id = 8; // just another number
+    Lease6 lease(Lease6::LEASE_IA_NA, addr, duid, iaid, 100, 200, 50, 80,
+                               subnet_id);
+
+    // case 1: a second before expiration
+    lease.cltt_ = time(NULL) - 100;
+    lease.valid_lft_ = 101;
+    EXPECT_FALSE(lease.expired());
+
+    // case 2: the lease will expire after this second is concluded
+    lease.cltt_ = time(NULL) - 101;
+    EXPECT_FALSE(lease.expired());
+
+    // case 3: the lease is expired
+    lease.cltt_ = time(NULL) - 102;
+    EXPECT_TRUE(lease.expired());
+}
+
 }; // end of anonymous namespace

+ 2 - 42
src/lib/dhcpsrv/tests/mysql_lease_mgr_unittest.cc

@@ -17,6 +17,7 @@
 #include <asiolink/io_address.h>
 #include <dhcpsrv/lease_mgr_factory.h>
 #include <dhcpsrv/mysql_lease_mgr.h>
+#include <dhcpsrv/tests/test_utils.h>
 
 #include <gtest/gtest.h>
 
@@ -29,6 +30,7 @@
 using namespace isc;
 using namespace isc::asiolink;
 using namespace isc::dhcp;
+using namespace isc::dhcp::test;
 using namespace std;
 
 namespace {
@@ -537,48 +539,6 @@ public:
     vector<IOAddress> ioaddress6_;  ///< IOAddress forms of IPv6 addresses
 };
 
-///@{
-/// @brief Test Utilities
-///
-/// The follow are a set of functions used during the tests.
-
-/// @brief Compare two Lease4 structures for equality
-void
-detailCompareLease(const Lease4Ptr& first, const Lease4Ptr& second) {
-    // Compare address strings.  Comparison of address objects is not used, as
-    // odd things happen when they are different: the EXPECT_EQ macro appears to
-    // call the operator uint32_t() function, which causes an exception to be
-    // thrown for IPv6 addresses.
-    EXPECT_EQ(first->addr_.toText(), second->addr_.toText());
-    EXPECT_TRUE(first->hwaddr_ == second->hwaddr_);
-    EXPECT_TRUE(*first->client_id_ == *second->client_id_);
-    EXPECT_EQ(first->valid_lft_, second->valid_lft_);
-    EXPECT_EQ(first->cltt_, second->cltt_);
-    EXPECT_EQ(first->subnet_id_, second->subnet_id_);
-}
-
-/// @brief Compare two Lease6 structures for equality
-void
-detailCompareLease(const Lease6Ptr& first, const Lease6Ptr& second) {
-    EXPECT_EQ(first->type_, second->type_);
-
-    // Compare address strings.  Comparison of address objects is not used, as
-    // odd things happen when they are different: the EXPECT_EQ macro appears to
-    // call the operator uint32_t() function, which causes an exception to be
-    // thrown for IPv6 addresses.
-    EXPECT_EQ(first->addr_.toText(), second->addr_.toText());
-    EXPECT_EQ(first->prefixlen_, second->prefixlen_);
-    EXPECT_EQ(first->iaid_, second->iaid_);
-    EXPECT_TRUE(*first->duid_ == *second->duid_);
-    EXPECT_EQ(first->preferred_lft_, second->preferred_lft_);
-    EXPECT_EQ(first->valid_lft_, second->valid_lft_);
-    EXPECT_EQ(first->cltt_, second->cltt_);
-    EXPECT_EQ(first->subnet_id_, second->subnet_id_);
-}
-
-///@}
-
-
 /// @brief Check that database can be opened
 ///
 /// This test checks if the MySqlLeaseMgr can be instantiated.  This happens

+ 58 - 0
src/lib/dhcpsrv/tests/test_utils.cc

@@ -0,0 +1,58 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "test_utils.h"
+#include <gtest/gtest.h>
+
+namespace isc {
+namespace dhcp {
+namespace test {
+
+void
+detailCompareLease(const Lease4Ptr& first, const Lease4Ptr& second) {
+    // Compare address strings.  Comparison of address objects is not used, as
+    // odd things happen when they are different: the EXPECT_EQ macro appears to
+    // call the operator uint32_t() function, which causes an exception to be
+    // thrown for IPv6 addresses.
+    EXPECT_EQ(first->addr_.toText(), second->addr_.toText());
+    EXPECT_TRUE(first->hwaddr_ == second->hwaddr_);
+    EXPECT_TRUE(*first->client_id_ == *second->client_id_);
+    EXPECT_EQ(first->valid_lft_, second->valid_lft_);
+    EXPECT_EQ(first->cltt_, second->cltt_);
+    EXPECT_EQ(first->subnet_id_, second->subnet_id_);
+}
+
+void
+detailCompareLease(const Lease6Ptr& first, const Lease6Ptr& second) {
+    EXPECT_EQ(first->type_, second->type_);
+
+    // Compare address strings.  Comparison of address objects is not used, as
+    // odd things happen when they are different: the EXPECT_EQ macro appears to
+    // call the operator uint32_t() function, which causes an exception to be
+    // thrown for IPv6 addresses.
+    EXPECT_EQ(first->addr_.toText(), second->addr_.toText());
+    EXPECT_EQ(first->prefixlen_, second->prefixlen_);
+    EXPECT_EQ(first->iaid_, second->iaid_);
+    ASSERT_TRUE(first->duid_);
+    ASSERT_TRUE(second->duid_);
+    EXPECT_TRUE(*first->duid_ == *second->duid_);
+    EXPECT_EQ(first->preferred_lft_, second->preferred_lft_);
+    EXPECT_EQ(first->valid_lft_, second->valid_lft_);
+    EXPECT_EQ(first->cltt_, second->cltt_);
+    EXPECT_EQ(first->subnet_id_, second->subnet_id_);
+}
+
+};
+};
+};

+ 49 - 0
src/lib/dhcpsrv/tests/test_utils.h

@@ -0,0 +1,49 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LIBDHCPSRV_TEST_UTILS_H
+#define LIBDHCPSRV_TEST_UTILS_H
+
+#include <dhcpsrv/lease_mgr.h>
+
+namespace isc {
+namespace dhcp {
+namespace test {
+
+// @brief performs details comparison between two IPv6 leases
+//
+// @param first first lease to compare
+// @param second second lease to compare
+//
+// This method is intended to be run from gtest tests as it
+// uses gtest macros and possibly reports gtest failures.
+void
+detailCompareLease(const Lease6Ptr& first, const Lease6Ptr& second);
+
+// @brief performs details comparison between two IPv4 leases
+//
+// @param first first lease to compare
+// @param second second lease to compare
+//
+// This method is intended to be run from gtest tests as it
+// uses gtest macros and possibly reports gtest failures.
+void
+detailCompareLease(const Lease4Ptr& first, const Lease4Ptr& second);
+
+
+};
+};
+};
+
+#endif

+ 13 - 4
src/lib/dns/master_loader.cc

@@ -79,7 +79,7 @@ public:
         warn_rfc1035_ttl_(true)
     {}
 
-    void pushSource(const std::string& filename) {
+    void pushSource(const std::string& filename, const Name& current_origin) {
         std::string error;
         if (!lexer_.pushSource(filename.c_str(), &error)) {
             if (initialized_) {
@@ -91,7 +91,7 @@ public:
             }
         }
         // Store the current status, so we can recover it upon popSource
-        include_info_.push_back(IncludeInfo(active_origin_, last_name_));
+        include_info_.push_back(IncludeInfo(current_origin, last_name_));
         initialized_ = true;
         previous_name_ = false;
     }
@@ -182,9 +182,18 @@ private:
             filename(lexer_.getNextToken(MasterToken::QSTRING).getString());
 
         // There optionally can be an origin, that applies before the include.
+        // We need to save the currently active origin before calling
+        // doOrigin(), because it would update active_origin_ while we need
+        // to pass the active origin before recognizing the new origin to
+        // pushSource.  Note: RFC 1035 is not really clear on this: it reads
+        // "regardless of changes... within the included file", but the new
+        // origin is not really specified "within the included file".
+        // Nevertheless, this behavior is probably more likely to be the
+        // intent of the RFC, and it's compatible with BIND 9.
+        const Name current_origin = active_origin_;
         doOrigin(true);
 
-        pushSource(filename);
+        pushSource(filename, current_origin);
     }
 
     // A helper method for loadIncremental(). It parses part of an RR
@@ -512,7 +521,7 @@ MasterLoader::MasterLoaderImpl::loadIncremental(size_t count_limit) {
                   "Trying to load when already loaded");
     }
     if (!initialized_) {
-        pushSource(master_file_);
+        pushSource(master_file_, active_origin_);
     }
     size_t count = 0;
     while (ok_ && count < count_limit) {

+ 4 - 3
src/lib/dns/tests/master_loader_unittest.cc

@@ -544,7 +544,7 @@ TEST_F(MasterLoaderTest, includeAndOrigin) {
         "@  1H  IN  A   192.0.2.1\n"
         // Then include the file with data and switch origin back
         "$INCLUDE " TEST_DATA_SRCDIR "/example.org example.org.\n"
-        // Another RR to see the switch survives after we exit include
+        // Another RR to see we fall back to the previous origin.
         "www    1H  IN  A   192.0.2.1\n";
     stringstream ss(include_string);
     setLoader(ss, Name("example.org"), RRClass::IN(),
@@ -557,7 +557,7 @@ TEST_F(MasterLoaderTest, includeAndOrigin) {
     // And check it's the correct data
     checkARR("www.example.org");
     checkBasicRRs();
-    checkARR("www.example.org");
+    checkARR("www.www.example.org");
 }
 
 // Like above, but the origin after include is bogus. The whole line should
@@ -582,7 +582,8 @@ TEST_F(MasterLoaderTest, includeAndBadOrigin) {
 
 // Check the origin doesn't get outside of the included file.
 TEST_F(MasterLoaderTest, includeOriginRestore) {
-    const string include_string = "$INCLUDE " TEST_DATA_SRCDIR "/origincheck.txt\n"
+    const string include_string =
+        "$INCLUDE " TEST_DATA_SRCDIR "/origincheck.txt\n"
         "@  1H  IN  A   192.0.2.1\n";
     stringstream ss(include_string);
     setLoader(ss, Name("example.org"), RRClass::IN(),

+ 1 - 1
src/lib/python/isc/datasrc/Makefile.am

@@ -2,7 +2,7 @@ SUBDIRS = . tests
 
 # old data, should be removed in the near future once conversion is done
 pythondir = $(pyexecdir)/isc/datasrc
-python_PYTHON = __init__.py master.py sqlite3_ds.py
+python_PYTHON = __init__.py sqlite3_ds.py
 
 
 # new data

+ 0 - 616
src/lib/python/isc/datasrc/master.py

@@ -1,616 +0,0 @@
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import sys, re, string
-import time
-import os
-#########################################################################
-# define exceptions
-#########################################################################
-class MasterFileError(Exception):
-    pass
-
-#########################################################################
-# pop: remove the first word from a line
-# input: a line
-# returns: first word, rest of the line
-#########################################################################
-def pop(line):
-    list = line.split()
-    first, rest = '', ''
-    if len(list) != 0:
-        first = list[0]
-    if len(list) > 1:
-        rest = ' '.join(list[1:])
-    return first, rest
-
-#########################################################################
-# cleanup: removes excess content from zone file data, including comments
-# and extra whitespace
-# input:
-#   line of text
-# returns:
-#   the same line, with comments removed, leading and trailing
-#   whitespace removed, and all other whitespace compressed to
-#   single spaces
-#########################################################################
-decomment = re.compile('^\s*((?:[^;"]|"[^"]*")*)\s*(?:|;.*)$')
-# Regular expression explained:
-# First, ignore any whitespace at the start. Then take the content,
-# each bit is either a harmless character (no ; nor ") or a string -
-# sequence between " " not containing double quotes. Then there may
-# be a comment at the end.
-def cleanup(s):
-    global decomment
-    s = s.strip().expandtabs()
-    s = decomment.search(s).group(1)
-    return ' '.join(s.split())
-
-#########################################################################
-# istype: check whether a string is a known RR type.
-# returns: boolean
-#########################################################################
-rrtypes = set(['a', 'aaaa', 'afsdb', 'apl', 'cert', 'cname', 'dhcid',
-               'dlv', 'dname', 'dnskey', 'ds', 'gpos', 'hinfo', 'hip',
-               'ipseckey', 'isdn', 'key', 'kx', 'loc', 'mb', 'md',
-               'mf', 'mg', 'minfo', 'mr', 'mx', 'naptr', 'ns', 'nsap',
-               'nsap-ptr', 'nsec', 'nsec3', 'nsec3param', 'null',
-               'nxt', 'opt', 'ptr', 'px', 'rp', 'rrsig', 'rt', 'sig',
-               'soa', 'spf', 'srv', 'sshfp', 'tkey', 'tsig', 'txt',
-               'x25', 'wks'])
-def istype(s):
-    global rrtypes
-    if s.lower() in rrtypes:
-        return True
-    else:
-        return False
-
-#########################################################################
-# isclass: check whether a string is a known RR class.  (only 'IN' is
-# supported, but the others must still be recognizable.)
-# returns: boolean
-#########################################################################
-rrclasses = set(['in', 'ch', 'chaos', 'hs', 'hesiod'])
-def isclass(s):
-    global rrclasses
-    if s.lower() in rrclasses:
-        return True
-    else:
-        return False
-
-#########################################################################
-# isname: check whether a string is a valid DNS name.
-# returns: boolean
-#########################################################################
-name_regex = re.compile('[-\w\$\d\/*]+(?:\.[-\w\$\d\/]+)*\.?')
-def isname(s):
-    global name_regex
-    if s == '.' or name_regex.match(s):
-        return True
-    else:
-        return False
-
-#########################################################################
-# isttl: check whether a string is a valid TTL specifier.
-# returns: boolean
-#########################################################################
-ttl_regex = re.compile('([0-9]+[wdhms]?)+$', re.I)
-def isttl(s):
-    global ttl_regex
-    if ttl_regex.match(s):
-        return True
-    else:
-        return False
-
-#########################################################################
-# parse_ttl: convert a TTL field into an integer TTL value
-# (multiplying as needed for minutes, hours, etc.)
-# input:
-#   string
-# returns:
-#   int
-# throws:
-#   MasterFileError
-#########################################################################
-def parse_ttl(s):
-    sum = 0
-    if not isttl(s):
-        raise MasterFileError('Invalid TTL: ' + s)
-    for ttl_expr in re.findall('\d+[wdhms]?', s, re.I):
-        if ttl_expr.isdigit():
-            ttl = int(ttl_expr)
-            sum += ttl
-            continue
-        ttl = int(ttl_expr[:-1])
-        suffix = ttl_expr[-1].lower()
-        if suffix == 'w':
-            ttl *= 604800
-        elif suffix == 'd':
-            ttl *= 86400
-        elif suffix == 'h':
-            ttl *= 3600
-        elif suffix == 'm':
-            ttl *= 60
-        sum += ttl
-    return str(sum)
-
-#########################################################################
-# records: generator function to return complete RRs from the zone file,
-# combining lines when necessary because of parentheses
-# input:
-#   descriptor for a zone master file (returned from openzone)
-# yields:
-#   complete RR
-#########################################################################
-def records(input):
-    record = []
-    complete = True
-    paren = 0
-    size = 0
-    for line in input:
-        size += len(line)
-        list = cleanup(line).split()
-        for word in list:
-            if paren == 0:
-                left, p, right = word.partition('(')
-                if p == '(':
-                    if left: record.append(left)
-                    if right: record.append(right)
-                    paren += 1
-                else:
-                    record.append(word)
-            else:
-                left, p, right = word.partition(')')
-                if p == ')':
-                    if left: record.append(left)
-                    if right: record.append(right)
-                    paren -= 1
-                else:
-                    record.append(word)
-
-        if paren == 1 or not record:
-            continue
-
-        ret = ' '.join(record)
-        record = []
-        oldsize = size
-        size = 0
-        yield ret, oldsize
-
-#########################################################################
-# define the MasterFile class for reading zone master files
-#########################################################################
-class MasterFile:
-    __rrclass = 'IN'
-    __maxttl = 0x7fffffff
-    __ttl = ''
-    __lastttl = ''
-    __zonefile = ''
-    __name = ''
-    __file_level = 0
-    __file_type = ""
-    __init_time = time.time()
-    __records_num = 0
-
-    def __init__(self, filename, initial_origin = '', verbose = False):
-        self.__initial_origin = initial_origin
-        self.__origin = initial_origin
-        self.__datafile = filename
-
-        try:
-            self.__zonefile = open(filename, 'r')
-        except:
-            raise MasterFileError("Could not open " + filename)
-        self.__filesize = os.fstat(self.__zonefile.fileno()).st_size
-
-        self.__cur = 0
-        self.__numback = 0
-        self.__verbose = verbose
-        try:
-            self.__zonefile = open(filename, 'r')
-        except:
-            raise MasterFileError("Could not open " + filename)
-
-    def __status(self):
-        interval = time.time() - MasterFile.__init_time
-        if self.__filesize == 0:
-            percent = 100
-        else:
-            percent = (self.__cur * 100)/self.__filesize
-
-        sys.stdout.write("\r" + (80 * " "))
-        sys.stdout.write("\r%d RR(s) loaded in %.2f second(s) (%.2f%% of %s%s)"\
-                % (MasterFile.__records_num, interval, percent, MasterFile.__file_type, self.__datafile))
-
-    def __del__(self):
-        if self.__zonefile:
-            self.__zonefile.close()
-    ########################################################################
-    # check if the zonename is relative
-    # no then return
-    # yes , sets the relative domain name to the stated name
-    #######################################################################
-    def __statedname(self, name, record):
-        if name[-1] != '.':
-            if not self.__origin:
-                raise MasterFileError("Cannot parse RR, No $ORIGIN: " + record)
-            elif self.__origin == '.':
-                name += '.'
-            else:
-                name += '.' + self.__origin
-        return name
-    #####################################################################
-    # handle $ORIGIN, $TTL and $GENERATE directives
-    # (currently only $ORIGIN and $TTL are implemented)
-    # input:
-    #   a line from a zone file
-    # returns:
-    #   a boolean indicating whether a directive was found
-    # throws:
-    #   MasterFileError
-    #########################################################################
-    def __directive(self, s):
-        first, more = pop(s)
-        second, more = pop(more)
-        if re.match('\$origin', first, re.I):
-            if not second or not isname(second):
-                raise MasterFileError('Invalid $ORIGIN')
-            if more:
-                raise MasterFileError('Invalid $ORIGIN')
-            if second[-1] == '.':
-                self.__origin = second
-            elif not self.__origin:
-                raise MasterFileError("$ORIGIN is not absolute in record: %s" % s)
-            elif self.__origin != '.':
-                self.__origin = second + '.' + self.__origin
-            else:
-                self.__origin = second + '.'
-            return True
-        elif re.match('\$ttl', first, re.I):
-            if not second or not isttl(second):
-                raise MasterFileError('Invalid TTL: "' + second + '"')
-            if more:
-                raise MasterFileError('Invalid $TTL statement')
-            MasterFile.__ttl = parse_ttl(second)
-            if int(MasterFile.__ttl) > self.__maxttl:
-                raise MasterFileError('TTL too high: ' + second)
-            return True
-        elif re.match('\$generate', first, re.I):
-            raise MasterFileError('$GENERATE not yet implemented')
-        else:
-            return False
-
-    #########################################################################
-    # handle $INCLUDE directives
-    # input:
-    #   a line from a zone file
-    # returns:
-    #   the parsed output of the included file, if any, or an empty array
-    # throws:
-    #   MasterFileError
-    #########################################################################
-    __include_syntax1 = re.compile('\s+(\S+)(?:\s+(\S+))?$', re.I)
-    __include_syntax2 = re.compile('\s+"([^"]+)"(?:\s+(\S+))?$', re.I)
-    __include_syntax3 = re.compile("\s+'([^']+)'(?:\s+(\S+))?$", re.I)
-    def __include(self, s):
-        if not s.lower().startswith('$include'):
-            return "", ""
-        s = s[len('$include'):]
-        m = self.__include_syntax1.match(s)
-        if not m:
-            m = self.__include_syntax2.match(s)
-        if not m:
-            m = self.__include_syntax3.match(s)
-        if not m:
-            raise MasterFileError('Invalid $include format')
-        file = m.group(1)
-        if m.group(2):
-            if not isname(m.group(2)):
-                raise MasterFileError('Invalid $include format (invalid origin)')
-            origin = self.__statedname(m.group(2), s)
-        else:
-            origin = self.__origin
-        return file, origin
-
-    #########################################################################
-    # try parsing an RR on the assumption that the type is specified in
-    # field 4, and name, ttl and class are in fields 1-3
-    # are all specified, with type in field 4
-    # input:
-    #   a record to parse, and the most recent name found in prior records
-    # returns:
-    #   empty list if parse failed, else name, ttl, class, type, rdata
-    #########################################################################
-    def __four(self, record, curname):
-        ret = ''
-        list = record.split()
-        if len(list) <= 4:
-            return ret
-        if istype(list[3]):
-            if isclass(list[2]) and isttl(list[1]) and isname(list[0]):
-                name, ttl, rrclass, rrtype = list[0:4]
-                ttl = parse_ttl(ttl)
-                MasterFile.__lastttl = ttl or MasterFile.__lastttl
-                rdata = ' '.join(list[4:])
-                ret = name, ttl, rrclass, rrtype, rdata
-            elif isclass(list[1]) and isttl(list[2]) and isname(list[0]):
-                name, rrclass, ttl, rrtype = list[0:4]
-                ttl = parse_ttl(ttl)
-                MasterFile.__lastttl = ttl or MasterFile.__lastttl
-                rdata = ' '.join(list[4:])
-                ret = name, ttl, rrclass, rrtype, rdata
-        return ret
-
-    #########################################################################
-    # try parsing an RR on the assumption that the type is specified
-    # in field 3, and one of name, ttl, or class has been omitted
-    # input:
-    #   a record to parse, and the most recent name found in prior records
-    # returns:
-    #   empty list if parse failed, else name, ttl, class, type, rdata
-    #########################################################################
-    def __getttl(self):
-        return MasterFile.__ttl or MasterFile.__lastttl
-
-    def __three(self, record, curname):
-        ret = ''
-        list = record.split()
-        if len(list) <= 3:
-            return ret
-        if istype(list[2]) and not istype(list[1]):
-            if isclass(list[1]) and not isttl(list[0]) and isname(list[0]):
-                rrclass = list[1]
-                ttl = self.__getttl()
-                name = list[0]
-            elif not isclass(list[1]) and isttl(list[1]) and not isclass(list[0]) and isname(list[0]):
-                rrclass = self.__rrclass
-                ttl = parse_ttl(list[1])
-                MasterFile.__lastttl = ttl or MasterFile.__lastttl
-                name = list[0]
-            elif curname and isclass(list[1]) and isttl(list[0]):
-                rrclass = list[1]
-                ttl = parse_ttl(list[0])
-                MasterFile.__lastttl = ttl or MasterFile.__lastttl
-                name = curname
-            elif curname and isttl(list[1]) and isclass(list[0]):
-                rrclass = list[0]
-                ttl = parse_ttl(list[1])
-                MasterFile.__lastttl = ttl or MasterFile.__lastttl
-                name = curname
-            else:
-                return ret
-            rrtype = list[2]
-            rdata = ' '.join(list[3:])
-            ret = name, ttl, rrclass, rrtype, rdata
-        return ret
-
-    #########################################################################
-    # try parsing an RR on the assumption that the type is specified in
-    # field 2, and field 1 is either name or ttl
-    # input:
-    #   a record to parse, and the most recent name found in prior records
-    # returns:
-    #   empty list if parse failed, else name, ttl, class, type, rdata
-    # throws:
-    #   MasterFileError
-    #########################################################################
-    def __two(self, record, curname):
-        ret = ''
-        list = record.split()
-        if len(list) <= 2:
-            return ret
-        if istype(list[1]):
-            rrclass = self.__rrclass
-            rrtype = list[1]
-            if list[0].lower() == 'rrsig':
-                name = curname
-                ttl = self.__getttl()
-                rrtype = list[0]
-                rdata = ' '.join(list[1:])
-            elif isttl(list[0]):
-                ttl = parse_ttl(list[0])
-                name = curname
-                rdata = ' '.join(list[2:])
-            elif isclass(list[0]):
-                ttl = self.__getttl()
-                name = curname
-                rdata = ' '.join(list[2:])
-            elif isname(list[0]):
-                name = list[0]
-                ttl = self.__getttl()
-                rdata = ' '.join(list[2:])
-            else:
-                raise MasterFileError("Cannot parse RR: " + record)
-
-            ret = name, ttl, rrclass, rrtype, rdata
-        return ret
-
-    ########################################################################
-    #close verbose
-    ######################################################################
-    def closeverbose(self):
-        self.__status()
-
-    #########################################################################
-    # zonedata: generator function to parse a zone master file and return
-    # each RR as a (name, ttl, type, class, rdata) tuple
-    #########################################################################
-    def zonedata(self):
-        name = ''
-        last_status = 0.0
-        flag = 1
-
-        for record, size in records(self.__zonefile):
-            if self.__verbose:
-                now = time.time()
-                if flag == 1:
-                    self.__status()
-                    flag = 0
-                if now - last_status >= 1.0:
-                    self.__status()
-                    last_status = now
-
-            self.__cur += size
-            if self.__directive(record):
-                continue
-
-            incl, suborigin = self.__include(record)
-            if incl:
-                if self.__filesize == 0:
-                    percent = 100
-                else:
-                    percent = (self.__cur * 100)/self.__filesize
-                if self.__verbose:
-                    sys.stdout.write("\r" + (80 * " "))
-                    sys.stdout.write("\rIncluding \"%s\" from \"%s\"\n" % (incl, self.__datafile))
-                MasterFile.__file_level += 1
-                MasterFile.__file_type = "included "
-                sub = MasterFile(incl, suborigin, self.__verbose)
-
-                for rrname, ttl, rrclass, rrtype, rdata in sub.zonedata():
-                    yield (rrname, ttl, rrclass, rrtype, rdata)
-                if self.__verbose:
-                    sub.closeverbose()
-                MasterFile.__file_level -= 1
-                if MasterFile.__file_level == 0:
-                    MasterFile.__file_type = ""
-                del sub
-                continue
-
-            # replace @ with origin
-            rl = record.split()
-            if rl[0] == '@':
-                rl[0] = self.__origin
-                if not self.__origin:
-                    raise MasterFileError("Cannot parse RR, No $ORIGIN: " + record)
-                record = ' '.join(rl)
-
-            result = self.__four(record, name)
-
-            if not result:
-                result = self.__three(record, name)
-
-            if not result:
-                result = self.__two(record, name)
-
-            if not result:
-                first, rdata = pop(record)
-                if istype(first):
-                    result = name, self.__getttl(), self.__rrclass, first, rdata
-
-            if not result:
-                raise MasterFileError("Cannot parse RR: " + record)
-
-            name, ttl, rrclass, rrtype, rdata = result
-            name = self.__statedname(name, record)
-
-            if rrclass.lower() != 'in':
-                raise MasterFileError("CH and HS zones not supported")
-
-            # add origin to rdata containing names, if necessary
-            if rrtype.lower() in ('cname', 'dname', 'ns', 'ptr'):
-                if not isname(rdata):
-                    raise MasterFileError("Invalid " + rrtype + ": " + rdata)
-                rdata = self.__statedname(rdata, record)
-
-            if rrtype.lower() == 'soa':
-                soa = rdata.split()
-                if len(soa) < 2 or not isname(soa[0]) or not isname(soa[1]):
-                    raise MasterFileError("Invalid " + rrtype + ": " + rdata)
-                soa[0] = self.__statedname(soa[0], record)
-                soa[1] = self.__statedname(soa[1], record)
-                if not MasterFile.__ttl and not ttl:
-                    MasterFile.__ttl = MasterFile.__ttl or parse_ttl(soa[-1])
-                    ttl = MasterFile.__ttl
-
-                for index in range(3, len(soa)):
-                    if isttl(soa[index]):
-                        soa[index] = parse_ttl(soa[index])
-                    else :
-                        raise MasterFileError("No TTL specified; in soa record!")
-                rdata = ' '.join(soa)
-
-            if not ttl:
-                raise MasterFileError("No TTL specified; zone rejected")
-
-            if rrtype.lower() == 'mx':
-                mx = rdata.split()
-                if len(mx) != 2 or not isname(mx[1]):
-                    raise MasterFileError("Invalid " + rrtype + ": " + rdata)
-                if mx[1][-1] != '.':
-                    mx[1] += '.' + self.__origin
-                    rdata = ' '.join(mx)
-            MasterFile.__records_num += 1
-            yield (name, ttl, rrclass, rrtype, rdata)
-
-    #########################################################################
-    # zonename: scans zone data for an SOA record, returns its name, restores
-    # the zone file to its prior state
-    #########################################################################
-    def zonename(self):
-        if self.__name:
-            return self.__name
-        old_origin = self.__origin
-        self.__origin = self.__initial_origin
-        cur_value = self.__cur
-        old_location = self.__zonefile.tell()
-        old_verbose = self.__verbose
-        self.__verbose = False
-        self.__zonefile.seek(0)
-
-        for name, ttl, rrclass, rrtype, rdata in self.zonedata():
-            if rrtype.lower() == 'soa':
-                break
-        self.__zonefile.seek(old_location)
-        self.__origin = old_origin
-        self.__cur = cur_value
-        if rrtype.lower() != 'soa':
-            raise MasterFileError("No SOA found")
-        self.__name = name
-        self.__verbose = old_verbose
-        return name
-
-    #########################################################################
-    # reset: reset the state of the master file
-    #########################################################################
-    def reset(self):
-        self.__zonefile.seek(0)
-        self.__origin = self.__initial_origin
-        MasterFile.__ttl = ''
-        MasterFile.__lastttl = ''
-
-#########################################################################
-# main: used for testing; parse a zone file and print out each record
-# broken up into separate name, ttl, class, type, and rdata files
-#########################################################################
-def main():
-    try:
-        file = sys.argv[1]
-    except:
-        file = 'testfile'
-    master = MasterFile(file, '.')
-    print ('zone name: ' + master.zonename())
-    print ('---------------------')
-    for name, ttl, rrclass, rrtype, rdata in master.zonedata():
-        print ('name: ' + name)
-        print ('ttl: ' + ttl)
-        print ('rrclass: ' + rrclass)
-        print ('rrtype: ' + rrtype)
-        print ('rdata: ' + rdata)
-        print ('---------------------')
-    del master
-
-if __name__ == "__main__":
-    main()

+ 1 - 2
src/lib/python/isc/datasrc/tests/Makefile.am

@@ -1,6 +1,4 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-# old tests, TODO remove or change to use new API?
-#PYTESTS = master_test.py
 PYTESTS =  datasrc_test.py sqlite3_ds_test.py
 PYTESTS += clientlist_test.py zone_loader_test.py
 EXTRA_DIST = $(PYTESTS)
@@ -29,6 +27,7 @@ LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/data
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
+# We need to define B10_FROM_BUILD for datasrc loadable modules
 check-local:
 if ENABLE_PYTHON_COVERAGE
 	touch $(abs_top_srcdir)/.coverage

+ 0 - 35
src/lib/python/isc/datasrc/tests/master_test.py

@@ -1,35 +0,0 @@
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-from isc.datasrc.master import *
-import unittest
-
-class TestTTL(unittest.TestCase):
-    def test_ttl(self):
-        self.assertTrue(isttl('3600'))
-        self.assertTrue(isttl('1W'))
-        self.assertTrue(isttl('1w'))
-        self.assertTrue(isttl('2D'))
-        self.assertTrue(isttl('2d'))
-        self.assertTrue(isttl('30M'))
-        self.assertTrue(isttl('30m'))
-        self.assertTrue(isttl('10S'))
-        self.assertTrue(isttl('10s'))
-        self.assertTrue(isttl('2W1D'))
-        self.assertFalse(isttl('not a ttl'))
-        self.assertFalse(isttl('1X'))
-
-if __name__ == '__main__':
-    unittest.main()

+ 2 - 0
src/lib/python/isc/log_messages/Makefile.am

@@ -14,6 +14,7 @@ EXTRA_DIST += config_messages.py
 EXTRA_DIST += notify_out_messages.py
 EXTRA_DIST += libddns_messages.py
 EXTRA_DIST += libxfrin_messages.py
+EXTRA_DIST += loadzone_messages.py
 EXTRA_DIST += server_common_messages.py
 EXTRA_DIST += dbutil_messages.py
 
@@ -31,6 +32,7 @@ CLEANFILES += config_messages.pyc
 CLEANFILES += notify_out_messages.pyc
 CLEANFILES += libddns_messages.pyc
 CLEANFILES += libxfrin_messages.pyc
+CLEANFILES += loadzone_messages.pyc
 CLEANFILES += server_common_messages.pyc
 CLEANFILES += dbutil_messages.pyc
 

+ 1 - 0
src/lib/python/isc/log_messages/loadzone_messages.py

@@ -0,0 +1 @@
+from work.loadzone_messages import *

+ 2 - 1
src/lib/util/encode/base_n.cc

@@ -291,7 +291,8 @@ BaseNTransformer<BitsPerChunk, BaseZeroCode, Encoder, Decoder>::decode(
                 isc_throw(BadValue, "Too many " << algorithm
                           << " padding characters: " << input);
             }
-        } else if (ch < 0 || !isspace(ch)) {
+        } else if (!(ch > 0 && isspace(ch))) {
+            // see the note for DecodeNormalizer::skipSpaces() above for ch > 0
             break;
         }
         ++srit;

+ 2 - 2
tests/system/bindctl/setup.sh

@@ -22,5 +22,5 @@ SUBTEST_TOP=${TEST_TOP}/bindctl
 cp ${SUBTEST_TOP}/nsx1/b10-config.db.template ${SUBTEST_TOP}/nsx1/b10-config.db
 
 rm -f ${SUBTEST_TOP}/*/zone.sqlite3
-${B10_LOADZONE} -o . -d ${SUBTEST_TOP}/nsx1/zone.sqlite3 \
-	${SUBTEST_TOP}//nsx1/root.db
+${B10_LOADZONE} -i 1 -c "{\"database_file\": \"${SUBTEST_TOP}/nsx1/zone.sqlite3\"}" \
+	. ${SUBTEST_TOP}//nsx1/root.db

+ 4 - 0
tests/system/bindctl/tests.sh

@@ -85,6 +85,7 @@ config commit
 quit
 ' | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
+sleep 2
 $DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
 # perform a simple check on the output (digcomp would be too much for this)
 grep 192.0.2.1 dig.out.$n > /dev/null || status=1
@@ -118,6 +119,7 @@ config commit
 quit
 " | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
+sleep 2
 $DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
 grep 192.0.2.2 dig.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi
@@ -148,6 +150,7 @@ quit
 ' | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
 done
+sleep 2
 $DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
 grep 192.0.2.2 dig.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi
@@ -183,6 +186,7 @@ quit
 ' | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
 done
+sleep 2
 $DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
 grep 192.0.2.2 dig.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi

+ 4 - 1
tests/system/conf.sh.in

@@ -51,7 +51,10 @@ export RNDC=$BIND9_TOP/bin/rndc/rndc
 export TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
 export DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
 
-export SUBDIRS="bindctl glue ixfr/in-2"
+# bindctl test doesn't work right now and is disabled (see #2568)
+#export SUBDIRS="bindctl glue ixfr/in-2"
+export SUBDIRS="glue ixfr/in-2"
+
 # Add appropriate subdirectories to the above statement as the tests become
 # available.
 #SUBDIRS="dnssec masterfile ixfr/in-1 ixfr/in-2 ixfr/in-4"

+ 4 - 4
tests/system/glue/setup.sh.in

@@ -18,8 +18,8 @@ SYSTEMTESTTOP=..
 . $SYSTEMTESTTOP/conf.sh
 
 rm -f */zone.sqlite3
-${B10_LOADZONE} -o . -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/root.db
-${B10_LOADZONE} -o root-servers.nil -d @builddir@/nsx1/zone.sqlite3 \
+${B10_LOADZONE} -i 1 -c '{"database_file": "@builddir@/nsx1/zone.sqlite3"}' . @builddir@/nsx1/root.db
+${B10_LOADZONE} -i 1 -c '{"database_file": "@builddir@/nsx1/zone.sqlite3"}' root-servers.nil \
 	@builddir@/nsx1/root-servers.nil.db
-${B10_LOADZONE} -o com -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/com.db
-${B10_LOADZONE} -o net -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/net.db
+${B10_LOADZONE} -i 1 -c '{"database_file": "@builddir@/nsx1/zone.sqlite3"}' com @builddir@/nsx1/com.db
+${B10_LOADZONE} -i 1 -c '{"database_file": "@builddir@/nsx1/zone.sqlite3"}' net @builddir@/nsx1/net.db

+ 1 - 1
tests/system/ixfr/in-1/setup.sh.in

@@ -27,4 +27,4 @@ cp -f $IXFR_TOP/db.example.n4 ns1/db.example
 
 # Set up the IXFR client - load the same version of the zone.
 cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
-${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n4
+${B10_LOADZONE} -c "{\"database_file\": \"$IXFR_TOP/zone.sqlite3\"}" example. $IXFR_TOP/db.example.n4

+ 1 - 1
tests/system/ixfr/in-2/setup.sh.in

@@ -26,4 +26,4 @@ cp -f $IXFR_TOP/db.example.n6 ns1/db.example
 
 # Set up the IXFR client - load an earlier version of the zone
 cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
-${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n6
+${B10_LOADZONE} -c "{\"database_file\": \"$IXFR_TOP/zone.sqlite3\"}" example. $IXFR_TOP/db.example.n6

+ 1 - 1
tests/system/ixfr/in-3/setup.sh.in

@@ -26,4 +26,4 @@ cp -f $IXFR_TOP/db.example.n0 ns1/db.example
 
 # Set up the IXFR client - load a previous version of the zone.
 cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
-${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2
+${B10_LOADZONE} -c "{\"database_file\": \"$IXFR_TOP/zone.sqlite3\"}" example. $IXFR_TOP/db.example.n2

+ 1 - 1
tests/system/ixfr/in-4/setup.sh.in

@@ -27,4 +27,4 @@ cp -f $IXFR_TOP/db.example.n2.refresh ns1/db.example
 # Set up the IXFR client - load a previous version of the zone with a short
 # refresh time.
 cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
-${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2.refresh
+${B10_LOADZONE} -c "{\"database_file\": \"$IXFR_TOP/zone.sqlite3\"}" example. $IXFR_TOP/db.example.n2.refresh