Commit 6aa01234 authored by Jelte Jansen's avatar Jelte Jansen
Browse files

[2380merge2] Merge branch 'trac2380' into trac2380merge2

parents ddd815eb 07a0de09
......@@ -1176,8 +1176,7 @@ AC_CONFIG_FILES([Makefile
src/bin/dbutil/tests/Makefile
src/bin/dbutil/tests/testdata/Makefile
src/bin/loadzone/Makefile
src/bin/loadzone/tests/correct/Makefile
src/bin/loadzone/tests/error/Makefile
src/bin/loadzone/tests/Makefile
src/bin/msgq/Makefile
src/bin/msgq/tests/Makefile
src/bin/auth/Makefile
......@@ -1352,6 +1351,7 @@ AC_OUTPUT([doc/version.ent
src/bin/loadzone/tests/correct/correct_test.sh
src/bin/loadzone/tests/error/error_test.sh
src/bin/loadzone/b10-loadzone.py
src/bin/loadzone/loadzone.py
src/bin/usermgr/run_b10-cmdctl-usermgr.sh
src/bin/usermgr/b10-cmdctl-usermgr.py
src/bin/msgq/msgq.py
......
......@@ -449,8 +449,10 @@ var/
<listitem>
<para>Load desired zone file(s), for example:
<screen>$ <userinput>b10-loadzone <replaceable>your.zone.example.org</replaceable></userinput></screen>
<screen>$ <userinput>b10-loadzone <replaceable>-c '{"database_file": "/usr/local/var/bind10/zone.sqlite3"}'</replaceable> <replaceable>your.zone.example.org</replaceable> <replaceable>your.zone.file</replaceable></userinput></screen>
</para>
(If you use the sqlite3 data source with the default DB
file, you can omit the -c option).
</listitem>
<listitem>
......@@ -2636,19 +2638,10 @@ can use various data source backends.
</para>
<para>
The <option>-o</option> argument may be used to define the
default origin for loaded zone file records.
</para>
<note>
<para>
In the current release, only the SQLite3 back
end is used by <command>b10-loadzone</command>.
By default, it stores the zone data in
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
unless the <option>-d</option> switch is used to set the
database filename.
Multiple zones are stored in a single SQLite3 zone database.
</para>
</note>
......
SUBDIRS = . tests/correct tests/error
#SUBDIRS = . tests/correct tests/error <= TBD: clean this up later
SUBDIRS = . tests
bin_SCRIPTS = b10-loadzone
# tentative setup: clean this up:
bin_SCRIPTS += b10-loadzone-ng
noinst_SCRIPTS = run_loadzone.sh
nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
pylogmessagedir = $(pyexecdir)/isc/log_messages/
CLEANFILES = b10-loadzone
# tentative setup: clean this up:
CLEANFILES += b10-loadzone-ng
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.pyc
man_MANS = b10-loadzone.8
DISTCLEANFILES = $(man_MANS)
EXTRA_DIST = $(man_MANS) b10-loadzone.xml
EXTRA_DIST = $(man_MANS) b10-loadzone.xml loadzone_messages.mes
if GENERATE_DOCS
......@@ -21,12 +31,22 @@ $(man_MANS):
endif
# Define rule to build logging source files from message file
$(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py : loadzone_messages.mes
$(top_builddir)/src/lib/log/compiler/message \
-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/loadzone_messages.mes
b10-loadzone: b10-loadzone.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" \
-e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" b10-loadzone.py >$@
chmod a+x $@
# tentatively named "-ng".
b10-loadzone-ng: loadzone.py $(PYTHON_LOGMSGPKG_DIR)/work/loadzone_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" loadzone.py >$@
chmod a+x $@
EXTRA_DIST += tests/normal/README
EXTRA_DIST += tests/normal/dsset-subzone.example.com
EXTRA_DIST += tests/normal/example.com
......
Support optional origin in $INCLUDE:
$INCLUDE filename origin
Support optional comment in $INCLUDE:
$INCLUDE filename origin comment
Support optional comment in $TTL (RFC 2308):
$TTL number comment
Do not assume "." is origin if origin is not set and sees a @ or
a label without a ".". It should probably fail. (Don't assume a
mistake means it is a root level label.)
Add verbose option to show what it is adding, not necessarily
in master file format, but in the context of the data source.
......
......@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "&#8212;">]>
<!--
- Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
- Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
......@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
<date>March 26, 2012</date>
<date>December 15, 2012</date>
</refentryinfo>
<refmeta>
......@@ -36,7 +36,7 @@
<docinfo>
<copyright>
<year>2010</year>
<year>2012</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
......@@ -44,9 +44,13 @@
<refsynopsisdiv>
<cmdsynopsis>
<command>b10-loadzone</command>
<arg><option>-d <replaceable class="parameter">database</replaceable></option></arg>
<arg><option>-o <replaceable class="parameter">origin</replaceable></option></arg>
<arg choice="req">filename</arg>
<arg><option>-c <replaceable class="parameter">datasrc_config</replaceable></option></arg>
<arg><option>-d <replaceable class="parameter">debug_level</replaceable></option></arg>
<arg><option>-i <replaceable class="parameter">report_interval</replaceable></option></arg>
<arg><option>-t <replaceable class="parameter">datasrc_type</replaceable></option></arg>
<arg><option>-C <replaceable class="parameter">zone_class</replaceable></option></arg>
<arg choice="req">zone name</arg>
<arg choice="req">zone file</arg>
</cmdsynopsis>
</refsynopsisdiv>
......@@ -66,20 +70,38 @@
$ORIGIN is followed by a domain name, and sets the the origin
that will be used for relative domain names in subsequent records.
$INCLUDE is followed by a filename to load.
<!-- TODO: and optionally a
domain name used to set the relative domain name origin. -->
The previous origin is restored after the file is included.
<!-- the current domain name is also restored -->
$TTL is followed by a time-to-live value which is used
by any following records that don't specify a TTL.
</para>
<para>
If the specified zone does not exist in the specified data
source, <command>b10-loadzone</command> will first create a
new empty zone in the data source, then fill it with the RRs
given in the specified master zone file. In this case, if
loading fails for some reason, the creation of the new zone
is also canceled.
<note><simpara>
Due to an implementation limitation, the current version
does not make the zone creation and subsequent loading an
atomic operation; an empty zone will be visible and used by
other application (e.g., the <command>b10-auth</command>
authoritative server) while loading. If this is an issue,
make sure the initial loading of a new zone is done before
starting other BIND 10 applications.
</simpara></note>
</para>
<para>
When re-loading an existing zone, the prior version is completely
removed. While the new version of the zone is being loaded, the old
version remains accessible to queries. After the new version is
completely loaded, the old version is swapped out and replaced
with the new one in a single operation.
with the new one in a single operation. If loading fails for
some reason, the loaded RRs will be effectively deleted, and the
old version will still remain accessible for other applications.
</para>
</refsect1>
......@@ -88,21 +110,82 @@
<title>ARGUMENTS</title>
<variablelist>
<varlistentry>
<term>-c <replaceable class="parameter">datasrc_config</replaceable></term>
<listitem><para>
Specifies configuration of the data source in the JSON
format. The configuration contents depend on the type of
the data source, and that's the same as what would be
specified for the BIND 10 servers (see the data source
configuration section of the BIND 10 guide). For example,
for an SQLite3 data source, it would look like
'{"database_file": "path-to-sqlite3-db-file"}'.
<note>
<simpara>For SQLite3 data source with the default DB file,
this option can be omitted; in other cases including
for any other types of data sources when supported,
this option is currently mandatory in practice.
In a future version it will be possible to retrieve the
configuration from the BIND 10 server configuration (if
it exists).
</simpara></note>
</para></listitem>
</varlistentry>
<varlistentry>
<term>-d <replaceable class="parameter">debug_level</replaceable> </term>
<listitem><para>
Enable dumping debug level logging with the specified
level. By default, only log messages at the severity of
informational or higher levels will be produced.
</para></listitem>
</varlistentry>
<varlistentry>
<term>-i <replaceable class="parameter">report_interval</replaceable></term>
<listitem><para>
Specifies the interval of status update by the number of RRs
loaded in the interval.
The <command>b10-loadzone</command> tool periodically
reports the progress of loading with the total number of
loaded RRs and elapsed time. This option specifies the
interval of the reports. If set to 0, status reports will
be suppressed. The default is 10,000.
</para></listitem>
</varlistentry>
<varlistentry>
<term>-d <replaceable class="parameter">database</replaceable> </term>
<term>-t <replaceable class="parameter">datasrc_type</replaceable></term>
<listitem><para>
Defines the filename for the database.
The default is
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>.
<!-- TODO: fix filename -->
Specifies the type of data source to store the zone.
Currently, only the "sqlite3" type is supported (which is
the default of this option), which means the SQLite3 data
source.
</para></listitem>
</varlistentry>
<varlistentry>
<term>-o <replaceable class="parameter">origin</replaceable></term>
<term>-C <replaceable class="parameter">zone_class</replaceable></term>
<listitem><para>
Defines the default origin for the zone file records.
Specifies the RR class of the zone.
Currently, only class IN is supported (which is the default
of this option) due to limitation of the underlying data
source implementation.
</para></listitem>
</varlistentry>
<varlistentry>
<term><replaceable class="parameter">zone name</replaceable></term>
<listitem><para>
The name of the zone to create or update. This must be a valid DNS
domain name.
</para></listitem>
</varlistentry>
<varlistentry>
<term><replaceable class="parameter">zone file</replaceable></term>
<listitem><para>
A path to the master zone file to be loaded.
</para></listitem>
</varlistentry>
......@@ -131,8 +214,31 @@
<refsect1>
<title>AUTHORS</title>
<para>
The <command>b10-loadzone</command> tool was initial written
by Evan Hunt of ISC.
A prior version of the <command>b10-loadzone</command> tool was
written by Evan Hunt of ISC.
The new version that this manual refers to was rewritten from
the scratch by the BIND 10 development team in around December 2012.
</para>
</refsect1>
<refsect1>
<title>BUGS</title>
<para>
As of the initial implementation, the underlying library that
this tool uses does not fully validate the loaded zone; for
example, loading will succeed even if it doesn't have the SOA or
NS record at its origin name. Such checks will be implemented
in a near future version, but until then, the
<command>b10-loadzone</command> performs the existence of the
SOA and NS records by itself. However, <command>b10-loadzone</command>
only warns about it, and does not cancel the load itself.
If this warning message is produced, it's the user's
responsibility to fix the errors and reload it. When the
library is updated with the post load checks, it will be more
sophisticated and the such zone won't be successfully loaded.
</para>
<para>
There are some other issues noted in the DESCRIPTION section.
</para>
</refsect1>
</refentry><!--
......
#!@PYTHON@
# Copyright (C) 2012 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
sys.path.append('@@PYTHONPATH@@')
import time
import signal
from optparse import OptionParser
from isc.dns import *
from isc.datasrc import *
import isc.log
from isc.log_messages.loadzone_messages import *
# These are needed for logger settings
import bind10_config
import json
from isc.config import module_spec_from_file
from isc.config.ccsession import path_search
isc.log.init("b10-loadzone")
logger = isc.log.Logger("loadzone")
# The default value for the interval of progress report in terms of the
# number of RRs loaded in that interval. Arbitrary choice, but intended to
# be reasonably small to handle emergency exit.
LOAD_INTERVAL_DEFAULT = 10000
class BadArgument(Exception):
'''An exception indicating an error in command line argument.
'''
pass
class LoadFailure(Exception):
'''An exception indicating failure in loading operation.
'''
pass
def set_cmd_options(parser):
'''Helper function to set command-line options.
'''
parser.add_option("-c", "--datasrc-conf", dest="conf", action="store",
help="""configuration of datasrc to load the zone in.
Example: '{"database_file": "/path/to/dbfile/db.sqlite3"}'""",
metavar='CONFIG')
parser.add_option("-d", "--debug", dest="debug_level",
type='int', action="store", default=None,
help="enable debug logs with the specified level [0-99]")
parser.add_option("-i", "--report-interval", dest="report_interval",
type='int', action="store",
default=LOAD_INTERVAL_DEFAULT,
help="""report logs progress per specified number of RRs
(specify 0 to suppress report) [default: %default]""")
parser.add_option("-t", "--datasrc-type", dest="datasrc_type",
action="store", default='sqlite3',
help="""type of data source (e.g., 'sqlite3')\n
[default: %default]""")
parser.add_option("-C", "--class", dest="zone_class", action="store",
default='IN',
help="""RR class of the zone; currently must be 'IN'
[default: %default]""")
class LoadZoneRunner:
'''Main logic for the loadzone.
This is implemented as a class mainly for the convenience of tests.
'''
def __init__(self, command_args):
self.__command_args = command_args
self.__loaded_rrs = 0
self.__interrupted = False # will be set to True on receiving signal
# system-wide log configuration. We need to configure logging this
# way so that the logging policy applies to underlying libraries, too.
self.__log_spec = json.dumps(isc.config.module_spec_from_file(
path_search('logging.spec', bind10_config.PLUGIN_PATHS)).
get_full_spec())
# "severity" and "debuglevel" are the tunable parameters, which will
# be set in _config_log().
self.__log_conf_base = {"loggers":
[{"name": "*",
"output_options":
[{"output": "stderr",
"destination": "console"}]}]}
# These are essentially private, and defined as "protected" for the
# convenience of tests inspecting them
self._zone_class = None
self._zone_name = None
self._zone_file = None
self._datasrc_config = None
self._datasrc_type = None
self._log_severity = 'INFO'
self._log_debuglevel = 0
self._report_interval = LOAD_INTERVAL_DEFAULT
self._config_log()
def _config_log(self):
'''Configure logging policy.
This is essentially private, but defined as "protected" for tests.
'''
self.__log_conf_base['loggers'][0]['severity'] = self._log_severity
self.__log_conf_base['loggers'][0]['debuglevel'] = self._log_debuglevel
isc.log.log_config_update(json.dumps(self.__log_conf_base),
self.__log_spec)
def _parse_args(self):
'''Parse command line options and other arguments.
This is essentially private, but defined as "protected" for tests.
'''
usage_txt = \
'usage: %prog [options] -c datasrc_config zonename zonefile'
parser = OptionParser(usage=usage_txt)
set_cmd_options(parser)
(options, args) = parser.parse_args(args=self.__command_args)
# Configure logging policy as early as possible
if options.debug_level is not None:
self._log_severity = 'DEBUG'
# optparse performs type check
self._log_debuglevel = int(options.debug_level)
if self._log_debuglevel < 0:
raise BadArgument(
'Invalid debug level (must be non negative): %d' %
self._log_debuglevel)
self._config_log()
self._datasrc_type = options.datasrc_type
self._datasrc_config = options.conf
if options.conf is None:
self._datasrc_config = self._get_datasrc_config(self._datasrc_type)
try:
self._zone_class = RRClass(options.zone_class)
except isc.dns.InvalidRRClass as ex:
raise BadArgument('Invalid zone class: ' + str(ex))
if self._zone_class != RRClass.IN():
raise BadArgument("RR class is not supported: " +
str(self._zone_class))
self._report_interval = int(options.report_interval)
if self._report_interval < 0:
raise BadArgument(
'Invalid report interval (must be non negative): %d' %
self._report_interval)
if len(args) != 2:
raise BadArgument('Unexpected number of arguments: %d (must be 2)'
% (len(args)))
try:
self._zone_name = Name(args[0])
except Exception as ex: # too broad, but there's no better granurality
raise BadArgument("Invalid zone name '" + args[0] + "': " +
str(ex))
self._zone_file = args[1]
def _get_datasrc_config(self, datasrc_type):
''''Return the default data source configuration of given type.
Right now, it only supports SQLite3, and hardcodes the syntax
of the default configuration. It's a kind of workaround to balance
convenience of users and minimizing hardcoding of data source
specific logic in the entire tool. In future this should be
more sophisticated.
This is essentially a private helper method for _parse_arg(),
but defined as "protected" so tests can use it directly.
'''
if datasrc_type != 'sqlite3':
raise BadArgument('default config is not available for ' +
datasrc_type)
default_db_file = bind10_config.DATA_PATH + '/zone.sqlite3'
logger.info(LOADZONE_SQLITE3_USING_DEFAULT_CONFIG, default_db_file)
return '{"database_file": "' + default_db_file + '"}'
def __cancel_create(self):
'''sqlite3-only hack: delete the zone just created on load failure.
This should eventually be done via generic datasrc API, but right now
we don't have that interface. Leaving the zone in this situation
is too bad, so we handle it with a workaround.
'''
if self._datasrc_type is not 'sqlite3':
return
import sqlite3 # we need the module only here
import json
# If we are here, the following should basically succeed; since
# this is considered a temporary workaround we don't bother to catch
# and recover rare failure cases.
dbfile = json.loads(self._datasrc_config)['database_file']
with sqlite3.connect(dbfile) as conn:
cur = conn.cursor()
cur.execute("DELETE FROM zones WHERE name = ?",
[self._zone_name.to_text()])
def _report_progress(self, loaded_rrs):
'''Dump the current progress report to stdout.
This is essentially private, but defined as "protected" for tests.
'''
elapsed = time.time() - self.__start_time
sys.stdout.write("\r" + (80 * " "))
sys.stdout.write("\r%d RRs loaded in %.2f seconds" %
(loaded_rrs, elapsed))
def _do_load(self):
'''Main part of the load logic.
This is essentially private, but defined as "protected" for tests.
'''
created = False
try:
datasrc_client = DataSourceClient(self._datasrc_type,
self._datasrc_config)
created = datasrc_client.create_zone(self._zone_name)
if created:
logger.info(LOADZONE_ZONE_CREATED, self._zone_name,
self._zone_class)
loader = ZoneLoader(datasrc_client, self._zone_name,
self._zone_file)
self.__start_time = time.time()
if self._report_interval > 0:
limit = self._report_interval
else:
# Even if progress report is suppressed, we still load
# incrementally so we won't delay catching signals too long.
limit = LOAD_INTERVAL_DEFAULT
while (not self.__interrupted and
not loader.load_incremental(limit)):
self.__loaded_rrs += self._report_interval
if self._report_interval > 0:
self._report_progress(self.__loaded_rrs)
if self.__interrupted:
raise LoadFailure('loading interrupted by signal')
# On successfully completion, add final '\n' to the progress
# report output (on failure don't bother to make it prettier).
if (self._report_interval > 0 and
self.__loaded_rrs >= self._report_interval):
sys.stdout.write('\n')
except Exception as ex:
# release any remaining lock held in the client/loader
loader, datasrc_client = None, None
if created:
self.__cancel_create()
logger.error(LOADZONE_CANCEL_CREATE_ZONE, self._zone_name,
self._zone_class)
raise LoadFailure(str(ex))
def _post_load_checks(self):
'''Perform minimal validity checks on the loaded zone.
We do this ourselves because the underlying library currently
doesn't do any checks. Once the library support post-load validation
this check should be removed.
'''
datasrc_client = DataSourceClient(self._datasrc_type,
self._datasrc_config)
_, finder = datasrc_client.find_zone(self._zone_name) # should succeed
result = finder.find(self._zone_name, RRType.SOA())[0]
if result is not finder.SUCCESS:
self._post_load_warning('zone has no SOA')
result = finder.find(self._zone_name, RRType.NS())[0]
if result is not finder.SUCCESS:
self._post_load_warning('zone has no NS')
def _post_load_warning(self, msg):
logger.warn(LOADZONE_POSTLOAD_ISSUE, self._zone_name,
self._zone_class, msg)
def _set_signal_handlers(self):
signal.signal(signal.SIGINT, self._interrupt_handler)
signal.signal(signal.SIGTERM, self._interrupt_handler)
def _interrupt_handler(self, signal, frame):
self.__interrupted = True
def run(self):
'''Top-level method, simply calling other helpers'''
try:
self._set_signal_handlers()
self._parse_args()
self._do_load()
total_elapsed_txt = "%.2f" % (time.time() - self.__start_time)
logger.info(LOADZONE_DONE, self.__loaded_rrs, self._zone_name,