Commit afddaf4c authored by Yoshitaka Aharen's avatar Yoshitaka Aharen
Browse files

Merge branch 'master' into trac510

Conflicts:
	src/lib/statistics/counter_dict.cc
	src/lib/statistics/counter_dict.h
	src/lib/statistics/tests/counter_dict_unittest.cc
parents b1486679 2f4433fb
339. [func] y-aharen
344. [func] y-aharen
src/lib/statistics: Added statistics counter library for entire server
items and per zone items. Also, modified b10-auth to use it. It is
also intended to use in the other modules such as b10-resolver.
(Trac #510, git TBD)
343. [func] jelte
Added IXFR-out system tests, based on the first two test sets of
http://bind10.isc.org/wiki/IxfrSystemTests.
(Trac #1314, git 1655bed624866a766311a01214597db01b4c7cec)
342. [bug] stephen
In the resolver, a FORMERR received from an upstream nameserver
now results in a SERVFAIL being returned as a response to the original
query. Additional debug messages added to distinguish between
different errors in packets received from upstream nameservers.
(Trac #1383, git 9b2b249d23576c999a65d8c338e008cabe45f0c9)
341. [func] tomek
libdhcp++: Support for handling both IPv4 and IPv6 added.
Also added support for binding IPv4 sockets.
(Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
340. [build] jelte
Fixed several linker issues related to recent gcc versions, botan
and gtest.
(Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
339. [bug] jinmei
libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
incorrectly propagated ASIO specific exceptions to the application
if the given file name was too long. This could lead to
unexpected shut down of b10-auth.
(Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
338. [bug] jinmei
b10-xfrin didn't check SOA serials of SOA and IXFR responses,
which resulted in unnecessary transfer or unexpected IXFR
timeouts (these issues were not overlooked but deferred to be
fixed until #1278 was completed). Validation on responses to SOA
queries were tighten, too.
queries were tightened, too.
(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
337. [func] tomek
......@@ -51,12 +80,12 @@
potential problems and were fixed.
(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
333. [bug] dvv
Solaris needs "-z now" to force non-lazy binding and prevent g++ static
initialization code from deadlocking.
333. [bug] dvv
Solaris needs "-z now" to force non-lazy binding and prevent
g++ static initialization code from deadlocking.
(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
332. [bug] vorner
332. [bug] vorner
C++ exceptions in the isc.dns.Rdata wrapper are now converted
to python ones instead of just aborting the interpretter.
(Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
......
......@@ -480,23 +480,33 @@ else
fi
fi
BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
# We expect botan-config --libs to contain -L<path_to_libbotan>, but
# this is not always the case. As a heuristics workaround we add
# -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
# (but using include instead of lib) below.
# -L`botan-config --prefix/lib` in this case (if not present already).
# Same for BOTAN_INCLUDES (but using include instead of lib) below.
if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
fi
# botan-config script (and the way we call pkg-config) returns -L and -l
# as one string, but we need them in separate values
BOTAN_LDFLAGS=
BOTAN_NEWLIBS=
for flag in ${BOTAN_LIBS}; do
BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
done
# See python_rpath for some info on why we do this
if test $rpath_available = yes; then
BOTAN_RPATH=
for flag in ${BOTAN_LDFLAGS}; do
for flag in ${BOTAN_LIBS}; do
BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
done
AC_SUBST(BOTAN_RPATH)
......@@ -512,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
fi
AC_SUBST(BOTAN_LDFLAGS)
AC_SUBST(BOTAN_LIBS)
AC_SUBST(BOTAN_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
LDFLAGS_SAVED="$LDFLAGS"
LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
LIBS_SAVED="$LIBS"
LIBS="$LIBS $BOTAN_LIBS"
AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([#include <botan/botan.h>
......@@ -533,7 +543,7 @@ AC_LINK_IFELSE(
AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
)
CPPFLAGS=$CPPFLAGS_SAVED
LDFLAGS=$LDFLAGS_SAVED
LIBS=$LIBS_SAVED
# Check for log4cplus
log4cplus_path="yes"
......@@ -545,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
else
# If not specified, try some common paths.
log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
......@@ -553,21 +563,21 @@ else
do
if test -f $d/include/log4cplus/logger.h; then
LOG4CPLUS_INCLUDES="-I$d/include"
LOG4CPLUS_LDFLAGS="-L$d/lib"
LOG4CPLUS_LIBS="-L$d/lib"
break
fi
done
fi
LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
AC_SUBST(LOG4CPLUS_LDFLAGS)
AC_SUBST(LOG4CPLUS_LIBS)
AC_SUBST(LOG4CPLUS_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
LDFLAGS_SAVED="$LDFLAGS"
LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
LIBS_SAVED="$LIBS"
LIBS="$LOG4CPLUS_LIBS $LIBS"
AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
......@@ -582,7 +592,7 @@ AC_LINK_IFELSE(
)
CPPFLAGS=$CPPFLAGS_SAVED
LDFLAGS=$LDFLAGS_SAVED
LIBS=$LIBS_SAVED
#
# Configure Boost header path
......@@ -675,6 +685,13 @@ else
AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
fi
# I can't get some of the #include <asio.hpp> right without this
# TODO: find the real cause of asio/boost wanting pthreads
# (this currently only occurs for src/lib/cc/session_unittests)
PTHREAD_LDFLAGS=
AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
AC_SUBST(PTHREAD_LDFLAGS)
AC_SUBST(MULTITHREADING_FLAG)
#
# Check availability of gtest, which will be used for unit tests.
......@@ -711,6 +728,48 @@ then
GTEST_LDFLAGS="-L$dir/lib"
GTEST_LDADD="-lgtest"
GTEST_FOUND="true"
# There is no gtest-config script on this
# system, which is supposed to inform us
# whether we need pthreads as well (a
# gtest compile-time option). So we still
# need to test that manually.
CPPFLAGS_SAVED="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
LDFLAGS_SAVED="$LDFLAGS"
LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
LIBS_SAVED=$LIBS
LIBS="$LIBS $GTEST_LDADD"
AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
# First try to compile without pthreads
AC_TRY_LINK([
#include <gtest/gtest.h>
],[
int i = 0;
char* c = NULL;
::testing::InitGoogleTest(&i, &c);
return (0);
],
[ AC_MSG_RESULT(no) ],
[
LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
# Now try to compile with pthreads
AC_TRY_LINK([
#include <gtest/gtest.h>
],[
int i = 0;
char* c = NULL;
::testing::InitGoogleTest(&i, &c);
return (0);
],
[ AC_MSG_RESULT(yes)
GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
],
# Apparently we can't compile it at all
[ AC_MSG_ERROR(unable to compile with gtest) ])
])
CPPFLAGS=$CPPFLAGS_SAVED
LDFLAGS=$LDFLAGS_SAVED
LIBS=$LIBS_SAVED
break
fi
done
......@@ -737,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
fi
PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
# I can't get some of the #include <asio.hpp> right without this
# TODO: find the real cause of asio/boost wanting pthreads
# (this currently only occurs for src/lib/cc/session_unittests)
PTHREAD_LDFLAGS=
AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
AC_SUBST(PTHREAD_LDFLAGS)
AC_SUBST(MULTITHREADING_FLAG)
#
# ASIO: we extensively use it as the C++ event management module.
#
......@@ -912,6 +962,7 @@ AC_CONFIG_FILES([Makefile
src/lib/datasrc/tests/Makefile
src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
src/lib/xfr/tests/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
src/lib/log/tests/Makefile
......@@ -933,8 +984,6 @@ AC_CONFIG_FILES([Makefile
src/lib/util/tests/Makefile
src/lib/acl/Makefile
src/lib/acl/tests/Makefile
src/lib/statistics/Makefile
src/lib/statistics/tests/Makefile
tests/Makefile
tests/system/Makefile
tests/tools/Makefile
......@@ -1089,8 +1138,9 @@ dnl includes too
Boost: ${BOOST_INCLUDES}
Botan: ${BOTAN_INCLUDES}
${BOTAN_LDFLAGS}
${BOTAN_LIBS}
Log4cplus: ${LOG4CPLUS_INCLUDES}
${LOG4CPLUS_LDFLAGS}
${LOG4CPLUS_LIBS}
SQLite: $SQLITE_CFLAGS
$SQLITE_LIBS
......
......@@ -32,9 +32,9 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
query_bench_LDADD += $(SQLITE_LIBS)
......@@ -30,10 +30,10 @@ using namespace isc::auth;
using namespace isc::statistics;
// TODO: We need a namespace ("auth_server"?) to hold
// AuthSrv and AuthCounters.
// AuthSrv and AuthCounters.
// TODO: Make use of wrappers like isc::dns::Opcode
// for counter item type.
// for counter item type.
class AuthCountersImpl : boost::noncopyable {
public:
......
......@@ -64,8 +64,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
endif
noinst_PROGRAMS = $(TESTS)
......@@ -99,6 +99,12 @@ The boss module is sending a kill signal to process with the given name,
as part of the process of killing all started processes during a failed
startup, as described for BIND10_KILLING_ALL_PROCESSES
% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
A connection from one of the applications which requested a socket was
closed. This means the application has terminated, so all the sockets it was
using are now closed and bind10 process can release them as well, unless the
same sockets are used by yet another application.
% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
......@@ -110,6 +116,11 @@ While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
An error occurred when the bind10 process was asked to send a socket file
descriptor. The error is mentioned, most common reason is that the request
is invalid and may not come from bind10 process at all.
% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
This indicates a process started previously terminated. The process id
and component owning the process are indicated, as well as the exit code.
......
......@@ -72,6 +72,9 @@ import isc.log
from isc.log_messages.bind10_messages import *
import isc.bind10.component
import isc.bind10.special_component
import isc.bind10.socket_cache
import libutil_io_python
import tempfile
isc.log.init("b10-boss")
logger = isc.log.Logger("boss")
......@@ -81,6 +84,10 @@ logger = isc.log.Logger("boss")
DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
# Messages sent over the unix domain socket to indicate if it is followed by a real socket
CREATOR_SOCKET_OK = "1\n"
CREATOR_SOCKET_UNAVAILABLE = "0\n"
# Assign this process some longer name
isc.util.process.rename(sys.argv[0])
......@@ -241,6 +248,12 @@ class BoB:
# If -v was set, enable full debug logging.
if self.verbose:
logger.set_severity("DEBUG", 99)
# This is set in init_socket_srv
self._socket_path = None
self._socket_cache = None
self._tmpdir = None
self._srv_socket = None
self._unix_sockets = {}
def __propagate_component_config(self, config):
comps = dict(config)
......@@ -315,6 +328,18 @@ class BoB:
elif command == "show_processes":
answer = isc.config.ccsession. \
create_answer(0, self.get_processes())
elif command == "get_socket":
answer = self._get_socket(args)
elif command == "drop_socket":
if "token" not in args:
answer = isc.config.ccsession. \
create_answer(1, "Missing token parameter")
else:
try:
self._socket_cache.drop_socket(args["token"])
answer = isc.config.ccsession.create_answer(0)
except Exception as e:
answer = isc.config.ccsession.create_answer(1, str(e))
else:
answer = isc.config.ccsession.create_answer(1,
"Unknown command")
......@@ -769,6 +794,209 @@ class BoB:
return next_restart_time
def _get_socket(self, args):
"""
Implementation of the get_socket CC command. It asks the cache
to provide the token and sends the information back.
"""
try:
try:
addr = isc.net.parse.addr_parse(args['address'])
port = isc.net.parse.port_parse(args['port'])
protocol = args['protocol']
if protocol not in ['UDP', 'TCP']:
raise ValueError("Protocol must be either UDP or TCP")
share_mode = args['share_mode']
if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
raise ValueError("Share mode must be one of ANY, SAMEAPP" +
" or NO")
share_name = args['share_name']
except KeyError as ke:
return \
isc.config.ccsession.create_answer(1,
"Missing parameter " +
str(ke))
# FIXME: This call contains blocking IPC. It is expected to be
# short, but if it turns out to be problem, we'll need to do
# something about it.
token = self._socket_cache.get_token(protocol, addr, port,
share_mode, share_name)
return isc.config.ccsession.create_answer(0, {
'token': token,
'path': self._socket_path
})
except Exception as e:
return isc.config.ccsession.create_answer(1, str(e))
def socket_request_handler(self, token, unix_socket):
"""
This function handles a token that comes over a unix_domain socket.
The function looks into the _socket_cache and sends the socket
identified by the token back over the unix_socket.
"""
try:
fd = self._socket_cache.get_socket(token, unix_socket.fileno())
# FIXME: These two calls are blocking in their nature. An OS-level
# buffer is likely to be large enough to hold all these data, but
# if it wasn't and the remote application got stuck, we would have
# a problem. If there appear such problems, we should do something
# about it.
unix_socket.sendall(CREATOR_SOCKET_OK)
libutil_io_python.send_fd(unix_socket.fileno(), fd)
except Exception as e:
logger.info(BIND10_NO_SOCKET, token, e)
unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
def socket_consumer_dead(self, unix_socket):
"""
This function handles when a unix_socket closes. This means all
sockets sent to it are to be considered closed. This function signals
so to the _socket_cache.
"""
logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
try:
self._socket_cache.drop_application(unix_socket.fileno())
except ValueError:
# This means the application holds no sockets. It's harmless, as it
# can happen in real life - for example, it requests a socket, but
# get_socket doesn't find it, so the application dies. It should be
# rare, though.
pass
def set_creator(self, creator):
"""
Registeres a socket creator into the boss. The socket creator is not
used directly, but through a cache. The cache is created in this
method.
If called more than once, it raises a ValueError.
"""
if self._socket_cache is not None:
raise ValueError("A creator was inserted previously")
self._socket_cache = isc.bind10.socket_cache.Cache(creator)
def init_socket_srv(self):
"""
Creates and listens on a unix-domain socket to be able to send out
the sockets.
This method should be called after switching user, or the switched
applications won't be able to access the socket.
"""
self._srv_socket = socket.socket(socket.AF_UNIX)
# We create a temporary directory somewhere safe and unique, to avoid
# the need to find the place ourself or bother users. Also, this
# secures the socket on some platforms, as it creates a private
# directory.
self._tmpdir = tempfile.mkdtemp()
# Get the name
self._socket_path = os.path.join(self._tmpdir, "sockcreator")
# And bind the socket to the name
self._srv_socket.bind(self._socket_path)
self._srv_socket.listen(5)
def remove_socket_srv(self):
"""
Closes and removes the listening socket and the directory where it
lives, as we created both.
It does nothing if the _srv_socket is not set (eg. it was not yet
initialized).
"""
if self._srv_socket is not None:
self._srv_socket.close()
os.remove(self._socket_path)
os.rmdir(self._tmpdir)
def _srv_accept(self):
"""
Accept a socket from the unix domain socket server and put it to the
others we care about.
"""
socket = self._srv_socket.accept()
self._unix_sockets[socket.fileno()] = (socket, b'')
def _socket_data(self, socket_fileno):
"""
This is called when a socket identified by the socket_fileno needs
attention. We try to read data from there. If it is closed, we remove
it.
"""
(sock, previous) = self._unix_sockets[socket_fileno]
while True:
try:
data = sock.recv(1, socket.MSG_DONTWAIT)
except socket.error as se:
# These two might be different on some systems
if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
# No more data now. Oh, well, just store what we have.
self._unix_sockets[socket_fileno] = (sock, previous)
return
else:
data = b'' # Pretend it got closed
if len(data) == 0: # The socket got to it's end
del self._unix_sockets[socket_fileno]
self.socket_consumer_dead(sock)
sock.close()
return
else:
if data == b"\n":
# Handle this token and clear it
self.socket_request_handler(previous, sock)
previous = b''
else:
previous += data
def run(self, wakeup_fd):
"""
The main loop, waiting for sockets, commands and dead processes.
Runs as long as the runnable is true.
The wakeup_fd descriptor is the read end of pipe where CHLD signal
handler writes.
"""
ccs_fd = self.ccs.get_socket().fileno()
while self.runnable:
# clean up any processes that exited
self.reap_children()
next_restart = self.restart_processes()
if next_restart is None:
wait_time = None
else:
wait_time = max(next_restart - time.time(), 0)
# select() can raise EINTR when a signal arrives,
# even if they are resumable, so we have to catch
# the exception
try:
(rlist, wlist, xlist) = \
select.select([wakeup_fd, ccs_fd,
self._srv_socket.fileno()] +
list(self._unix_sockets.keys()), [], [],
wait_time)
except select.error as err:
if err.args[0] == errno.EINTR:
(rlist, wlist, xlist) = ([], [], [])
else:
logger.fatal(BIND10_SELECT_ERROR, err)
break
for fd in rlist + xlist:
if fd == ccs_fd:
try:
self.ccs.check_command()
except isc.cc.session.ProtocolError:
logger.fatal(BIND10_MSGQ_DISAPPEARED)
self.runnable = False
break
elif fd == wakeup_fd:
os.read(wakeup_fd, 32)
elif fd == self._srv_socket.fileno():
self._srv_accept()
elif fd in self._unix_sockets:
self._socket_data(fd)
# global variables, needed for signal handlers
options = None
boss_of_bind = None
......@@ -931,60 +1159,32 @@ def main():
# Block SIGPIPE, as we don't want it to end this process
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
# Go bob!
boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
options.config_file, options.nocache, options.verbose,
setuid, username, options.cmdctl_port,
options.wait_time)
startup_result = boss_of_bind.startup()
if startup_result:
logger.fatal(BIND10_STARTUP_ERROR, startup_result)
sys.exit(1)
logger.info(BIND10_STARTUP_COMPLETE)
dump_pid(options.pid_file)
# In our main loop, we check for dead processes or messages
# on the c-channel.
wakeup_fd = wakeup_pipe[0]
ccs_fd = boss_of_bind.ccs.get_socket().fileno()
while boss_of_bind.runnable:
# clean up any processes that exited
boss_of_bind.reap_children()
next_restart = boss_of_bind.restart_processes()
if next_restart is None:
wait_time = None
else:
wait_time = max(next_restart - time.time(), 0)
# select() can raise EINTR when a signal arrives,
# even if they are resumable, so we have to catch
# the exception
try:
(rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
wait_time)
except select.error as err:
if err.args[0] == errno.EINTR:
(rlist, wlist, xlist) = ([], [], [])
else:
logger.fatal(BIND10_SELECT_ERROR, err)
break
for fd in rlist + xlist:
if fd == ccs_fd:
try:
boss_of_bind.ccs.check_command()
except isc.cc.session.ProtocolError:
logger.fatal(BIND10_MSGQ_DISAPPEARED)
self.runnable = False
break
elif fd == wakeup_fd:
os.read(wakeup_fd, 32)