diff --git a/.gitignore b/.gitignore index da9d3b4bdb..3480cb68f5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +*.gcda +*.gcno +*.gcov *.la *.lo *.o @@ -28,4 +31,7 @@ TAGS /py-compile /stamp-h1 +/all.info +/coverage-cpp-html /dns++.pc +/report.info diff --git a/AUTHORS b/AUTHORS index e69de29bb2..67cb090193 100644 --- a/AUTHORS +++ b/AUTHORS @@ -0,0 +1,21 @@ +Chen Zhengzhang +Dmitriy Volodin +Evan Hunt +Haidong Wang +Haikuo Zhang +Han Feng +Jelte Jansen +Jeremy C. Reed +Xie Jiagui +Jin Jian +JINMEI Tatuya +Kazunori Fujiwara +Michael Graff +Michal Vaner +Mukund Sivaraman +Naoki Kambe +Shane Kerr +Shen Tingting +Stephen Morris +Yoshitaka Aharen +Zhang Likun diff --git a/ChangeLog b/ChangeLog index 7f76c0c487..38e1eb5940 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,9 +1,108 @@ -4XX. [func]* tomek +450. [func]* tomek b10-dhcp4: DHCPv4 server component is now integrated into BIND10 framework. It can be started from BIND10 (using bindctl) and can receive commands. The only supported command for now is 'Dhcp4 shutdown'. +bind10-devel-20120621 released on June 21. 2012 + +449. [bug] muks + b10-xfin: fixed a bug where xfrin sent the wrong notification + message to zonemgr on successful zone transfer. This also + solves other reported problems such as too frequent attempts + of zone refreshing (see Trac #1786 and #1834). + (Trac #2023, git b5fbf8a408a047a2552e89ef435a609f5df58d8c) + +448. [func] team + b10-ddns is now functional and handles dynamic update requests + per RFC 2136. See BIND 10 guide for configuration and operation + details. + (Multiple Trac tickets) + +447. [bug] jinmei + Fixed a bug in b10-xfrout where a helper thread could fall into + an infinite loop if b10-auth stops while the thread is waiting for + forwarded requests from b10-auth. + (Trac #988 and #1833, git 95a03bbefb559615f3f6e529d408b749964d390a) + +446. [bug] muks + A number of warnings reported by Python about unclosed file and + socket objects were fixed. Some related code was also made safer. + (Trac #1828, git 464682a2180c672f1ed12d8a56fd0a5ab3eb96ed) + +445. [bug]* jinmei + The pre-install check for older SQLite3 DB now refers to the DB + file with the prefix of DESTDIR. This ensures that 'make install' + with specific DESTDIR works regardless of the version of the DB + file installed in the default path. + (Trac #1982, git 380b3e8ec02ef45555c0113ee19329fe80539f71) + +444. [bug] jinmei + libdatasrc: fixed ZoneFinder for database-based data sources so + that it handles type DS query correctly, i.e., treating it as + authoritative data even on a delegation point. + (Trac #1912, git 7130da883f823ce837c10cbf6e216a15e1996e5d) + +443. [func]* muks + The logger now uses a lockfile named `logger_lockfile' that is + created in the local state directory to mutually separate + individual logging operations from various processes. This is + done so that log messages from different processes don't mix + together in the middle of lines. The `logger_lockfile` is created + with file permission mode 0660. BIND 10's local state directory + should be writable and perhaps have g+s mode bit so that the + `logger_lockfile` can be opened by a group of processes. + (Trac #1704, git ad8d445dd0ba208107eb239405166c5c2070bd8b) + +442. [func] tomek + b10-dhcp4, b10-dhcp6: Both DHCP servers now accept -p parameter + that can be used to specify listening port number. This capability + is useful only for testing purposes. + (Trac #1503, git e60af9fa16a6094d2204f27c40a648fae313bdae) + +441. [func] tomek + libdhcp++: Stub interface detection (support for interfaces.txt + file) was removed. + (Trac #1281, git 900fc8b420789a8c636bcf20fdaffc60bc1041e0) + +bind10-devel-20120517 released on May 17. 2012 + +440. [func] muks + bindctl: improved some error messages so they will be more + helpful. Those include the one when the zone name is unspecified + or the name is invalid in the b10-auth configuration. + (Trac #1627, git 1a4d0ae65b2c1012611f4c15c5e7a29d65339104) + +439. [func] team + The in-memory data source can now load zones from the + sqlite3 data source, so that zones stored in the database + (and updated for example by xfrin) can be served from memory. + (Trac #1789,#1790,#1792,#1793,#1911, + git 93f11d2a96ce4dba9308889bdb9be6be4a765b27) + +438. [bug] naokikambe + b10-stats-httpd now sends the system a notification that + it is shutting down if it encounters a fatal error during + startup. + (Trac #1852, git a475ef271d4606f791e5ed88d9b8eb8ed8c90ce6) + +437. [build] jinmei + Building BIND 10 may fail on MacOS if Python has been + installed via Homebrew unless --without-werror is specified. + The configure script now includes a URL that explains this + issue when it detects failure that is possibly because of + this problem. + (Trac #1907, git 0d03b06138e080cc0391fb912a5a5e75f0f97cec) + +436. [bug] jelte + The --config-file option now works correctly with relative paths if + --data-path is not given. + (Trac #1889, git ce7d1aef2ca88084e4dacef97132337dd3e50d6c) + +435. [func] team + The in-memory datasource now supports NSEC-signed zones. + (Trac #1802-#1810, git 2f9aa4a553a05aa1d9eac06f1140d78f0c99408b) + 434. [func] tomek libdhcp++: Linux interface detection refactored. The code is now cleaner. Tests better support certain versions of ifconfig. @@ -37,8 +136,8 @@ (Trac #1843, git 551657702a4197ef302c567b5c0eaf2fded3e121) 428. [bug] marcin - perfdhcp: bind to local address to allow reception of replies from IPv6 - DHCP servers. + perfdhcp: bind to local address to allow reception of + replies from IPv6 DHCP servers. (Trac #1908, git 597e059afaa4a89e767f8f10d2a4d78223af3940) 427. [bug] jinmei @@ -48,10 +147,11 @@ now manipulates them in the separate table for the NSEC3 namespace. As a result b10-xfrin now correctly updates NSEC3-signed zones by inbound zone transfers. - (Trac #1891, git 672f129700dae33b701bb02069cf276238d66be3) + (Trac #1781,#1788,#1891, git 672f129700dae33b701bb02069cf276238d66be3) 426. [bug] vorner - The NSEC3 records are now included when transferring a signed zone out. + The NSEC3 records are now included when transferring a + signed zone out. (Trac #1782, git 36efa7d10ecc4efd39d2ce4dfffa0cbdeffa74b0) 425. [func]* muks @@ -202,7 +302,7 @@ bind10-devel-20120329 released on March 29, 2012 providing result for random instance. (Trac #1751, git 3285353a660e881ec2b645e1bc10d94e5020f357) -403. [build]* jelte +403. [build]* jelte The configure option for botan (--with-botan=PATH) is replaced by --with-botan-config=PATH, which takes a full path to a botan-config script, instead of the botan 'install' directory. Also, if not diff --git a/Makefile.am b/Makefile.am index 54216b612a..7024294276 100644 --- a/Makefile.am +++ b/Makefile.am @@ -16,6 +16,26 @@ DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations # Use same --with-gtest flag if set DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG) +dist_doc_DATA = AUTHORS COPYING ChangeLog README + +.PHONY: check-valgrind check-valgrind-suppress + +check-valgrind: +if HAVE_VALGRIND + @VALGRIND_COMMAND="$(VALGRIND) -q --gen-suppressions=all --track-origins=yes --num-callers=48 --leak-check=full --fullpath-after=" \ + make -C $(abs_top_builddir) check +else + @echo "*** Valgrind is required for check-valgrind ***"; exit 1; +endif + +check-valgrind-suppress: +if HAVE_VALGRIND + @VALGRIND_COMMAND="$(VALGRIND) -q --gen-suppressions=all --error-exitcode=1 --suppressions=$(abs_top_srcdir)/src/valgrind-suppressions --suppressions=$(abs_top_srcdir)/src/valgrind-suppressions.revisit --num-callers=48 --leak-check=full --fullpath-after=" \ + make -C $(abs_top_builddir) check +else + @echo "*** Valgrind is required for check-valgrind-suppress ***"; exit 1; +endif + clean-cpp-coverage: @if [ $(USE_LCOV) = yes ] ; then \ $(LCOV) --directory . --zerocounters; \ @@ -405,3 +425,5 @@ EXTRA_DIST += ext/coroutine/coroutine.h pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = dns++.pc + +CLEANFILES = $(abs_top_builddir)/logger_lockfile diff --git a/README b/README index 3f6892395e..4ef941e886 100644 --- a/README +++ b/README @@ -2,17 +2,14 @@ This is the source for the development version of BIND 10. BIND is the popular implementation of a DNS server, developer -interfaces, and DNS tools. BIND 10 is a rewrite of BIND 9. BIND 10 -is written in C++ and Python and provides a modular environment -for serving, maintaining, and developing DNS. +interfaces, and DNS tools. BIND 10 is a rewrite of BIND 9 and ISC +DHCP. BIND 10 is written in C++ and Python and provides a modular +environment for serving, maintaining, and developing DNS and DHCP. BIND10-devel is new development leading up to the production BIND 10 release. It contains prototype code and experimental interfaces. Nevertheless it is ready to use now for testing the -new BIND 10 infrastructure ideas. The Year 3 goals of the five -year plan are described here: - - http://bind10.isc.org/wiki/Year3Goals +new BIND 10 infrastructure ideas. This release includes the bind10 master process, b10-msgq message bus, b10-auth authoritative DNS server (with SQLite3 and in-memory diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am index 15ef017008..6cc4036161 100644 --- a/compatcheck/Makefile.am +++ b/compatcheck/Makefile.am @@ -1,12 +1,17 @@ -# We're going to abuse install-data-local for a pre-install check. -# This is to be considered a short term hack and is expected to be removed -# in a near future version. +# We're going to abuse install-data-local for a pre-install check. This may +# not be the cleanest way to do this type of job, but that's the least ugly +# one we've found. +# +# Note also that if any test needs to examine some file that has possibly +# been installed before (e.g., older DB or configuration file), it should be +# referenced with the prefix of DESTDIR. Otherwise +# 'make DESTDIR=/somewhere install' may not work. install-data-local: - if test -e $(localstatedir)/$(PACKAGE)/zone.sqlite3; then \ + if test -e $(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3; then \ $(SHELL) $(top_builddir)/src/bin/dbutil/run_dbutil.sh --check \ - $(localstatedir)/$(PACKAGE)/zone.sqlite3 || \ + $(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3 || \ (echo "\nSQLite3 DB file schema version is old. " \ "Please run: " \ "$(abs_top_builddir)/src/bin/dbutil/run_dbutil.sh --upgrade " \ - "$(localstatedir)/$(PACKAGE)/zone.sqlite3"; exit 1) \ + "$(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3"; exit 1) \ fi diff --git a/configure.ac b/configure.ac index af9125f5af..70df25d7e8 100644 --- a/configure.ac +++ b/configure.ac @@ -362,7 +362,7 @@ if test $werror_ok = 1; then PYTHON_CXXFLAGS="${PYTHON_CXXFLAGS} -Wno-unused-parameter" AC_SUBST(PYTHON_CXXFLAGS) ], - [AC_MSG_ERROR([Can't compile against Python.h])] + [AC_MSG_ERROR([Can't compile against Python.h. If you're using MacOS X and have installed Python with Homebrew, see http://bind10.isc.org/wiki/SystemNotesMacOSX])] ) ] ) @@ -407,9 +407,9 @@ case $system in OS_TYPE="BSD" CPPFLAGS="$CPPFLAGS -DOS_BSD" ;; - Solaris) + SunOS) OS_TYPE="Solaris" - CPPFLAGS="$CPPFLAGS -DOS_SOLARIS" + CPPFLAGS="$CPPFLAGS -DOS_SUN" ;; *) OS_TYPE="Unknown" @@ -982,6 +982,15 @@ AC_ARG_ENABLE(logger-checks, [AC_HELP_STRING([--enable-logger-checks], AM_CONDITIONAL(ENABLE_LOGGER_CHECKS, test x$enable_logger_checks != xno) AM_COND_IF([ENABLE_LOGGER_CHECKS], [AC_DEFINE([ENABLE_LOGGER_CHECKS], [1], [Check logger messages?])]) +# Check for valgrind +AC_PATH_PROG(VALGRIND, valgrind, no) +AM_CONDITIONAL(HAVE_VALGRIND, test "x$VALGRIND" != "xno") + +found_valgrind="not found" +if test "x$VALGRIND" != "xno"; then + found_valgrind="found" +fi + AC_CONFIG_FILES([Makefile doc/Makefile doc/guide/Makefile @@ -1067,6 +1076,8 @@ AC_CONFIG_FILES([Makefile src/lib/python/isc/testutils/Makefile src/lib/python/isc/bind10/Makefile src/lib/python/isc/bind10/tests/Makefile + src/lib/python/isc/ddns/Makefile + src/lib/python/isc/ddns/tests/Makefile src/lib/python/isc/xfrin/Makefile src/lib/python/isc/xfrin/tests/Makefile src/lib/python/isc/server_common/Makefile @@ -1120,6 +1131,7 @@ AC_CONFIG_FILES([Makefile tests/tools/badpacket/Makefile tests/tools/badpacket/tests/Makefile tests/tools/perfdhcp/Makefile + tests/tools/perfdhcp/tests/Makefile dns++.pc ]) AC_OUTPUT([doc/version.ent @@ -1184,6 +1196,7 @@ AC_OUTPUT([doc/version.ent src/lib/log/tests/destination_test.sh src/lib/log/tests/init_logger_test.sh src/lib/log/tests/local_file_test.sh + src/lib/log/tests/logger_lock_test.sh src/lib/log/tests/severity_test.sh src/lib/log/tests/tempdir.h src/lib/util/python/mkpywrapper.py @@ -1232,6 +1245,7 @@ AC_OUTPUT([doc/version.ent chmod +x src/lib/log/tests/destination_test.sh chmod +x src/lib/log/tests/init_logger_test.sh chmod +x src/lib/log/tests/local_file_test.sh + chmod +x src/lib/log/tests/logger_lock_test.sh chmod +x src/lib/log/tests/severity_test.sh chmod +x src/lib/util/python/mkpywrapper.py chmod +x src/lib/util/python/gen_wiredata.py @@ -1287,8 +1301,10 @@ Features: Developer: Google Tests: $gtest_path + Valgrind: $found_valgrind C++ Code Coverage: $USE_LCOV Python Code Coverage: $USE_PYCOVERAGE + Logger checks: $enable_logger_checks Generate Manuals: $enable_man END diff --git a/doc/Doxyfile b/doc/Doxyfile index 8730ae4c8d..6d91bf273d 100644 --- a/doc/Doxyfile +++ b/doc/Doxyfile @@ -579,7 +579,7 @@ INPUT = ../src/lib/exceptions ../src/lib/cc \ ../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \ ../src/bin/sockcreator/ ../src/lib/util/ ../src/lib/util/io/ \ ../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6 ../src/lib/dhcp \ - ../src/bin/dhcp4 devel + ../src/bin/dhcp4 ../tests/tools/perfdhcp devel # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am index ffe89c9437..7d90d37b6b 100644 --- a/doc/guide/Makefile.am +++ b/doc/guide/Makefile.am @@ -1,6 +1,7 @@ -EXTRA_DIST = bind10-guide.css -EXTRA_DIST += bind10-guide.xml bind10-guide.html bind10-guide.txt -EXTRA_DIST += bind10-messages.xml bind10-messages.html +dist_doc_DATA = bind10-guide.txt +dist_html_DATA = bind10-guide.css bind10-guide.html bind10-messages.html + +EXTRA_DIST = bind10-guide.xml bind10-messages.xml # This is not a "man" manual, but reuse this for now for docbook. if ENABLE_MAN diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html index b2286f4602..7a1a120a7b 100644 --- a/doc/guide/bind10-guide.html +++ b/doc/guide/bind10-guide.html @@ -1,4 +1,4 @@ -BIND 10 Guide

BIND 10 Guide

Administrator Reference for BIND 10

This is the reference guide for BIND 10 version +BIND 10 Guide

BIND 10 Guide

Administrator Reference for BIND 10

This is the reference guide for BIND 10 version 20120405.

Abstract

BIND 10 is a framework that features Domain Name System (DNS) suite and Dynamic Host Configuration Protocol (DHCP) servers managed by Internet Systems Consortium (ISC). It @@ -10,9 +10,9 @@ The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at http://bind10.isc.org/docs. -


List of Tables

3.1.

Preface

Table of Contents

1. Acknowledgements

1. Acknowledgements

ISC would like to acknowledge generous support for +


Table of Contents

Preface
1. Acknowledgements
1. Introduction
1.1. Supported Platforms
1.2. Required Software
1.3. Starting and Stopping the Server
1.4. Managing BIND 10
2. Installation
2.1. Building Requirements
2.2. Quick start
2.3. Installation from source
2.3.1. Download Tar File
2.3.2. Retrieve from Git
2.3.3. Configure before the build
2.3.4. Build
2.3.5. Install
2.3.6. Install Hierarchy
3. Starting BIND10 with bind10
3.1. Starting BIND 10
3.2. Configuration of started processes
4. Command channel
5. Configuration manager
6. Remote control daemon
6.1. Configuration specification for b10-cmdctl
7. Control and configure user interface
8. Authoritative Server
8.1. Server Configurations
8.2. Data Source Backends
8.2.1. In-memory Data Source
8.2.2. In-memory Data Source With SQLite3 Backend
8.2.3. Reloading an In-memory Data Source
8.2.4. Disabling In-memory Data Sources
8.3. Loading Master Zones Files
9. Incoming Zone Transfers
9.1. Configuration for Incoming Zone Transfers
9.2. Enabling IXFR
9.3. Secondary Manager
9.4. Trigger an Incoming Zone Transfer Manually
9.5. Incoming Transfers with In-memory Datasource
10. Outbound Zone Transfers
11. Dynamic DNS Update
11.1. Enabling Dynamic Update
11.2. Access Control
11.3. Miscellaneous Operational Issues
12. Recursive Name Server
12.1. Access Control
12.2. Forwarding
13. DHCPv4 Server
13.1. DHCPv4 Server Usage
13.2. DHCPv4 Server Configuration
13.3. Supported standards
13.4. DHCPv4 Server Limitations
14. DHCPv6 Server
14.1. DHCPv6 Server Usage
14.2. DHCPv6 Server Configuration
14.3. Supported DHCPv6 Standards
14.4. DHCPv6 Server Limitations
15. libdhcp++ library
15.1. Interface detection
15.2. DHCPv4/DHCPv6 packet handling
16. Statistics
17. Logging
17.1. Logging configuration
17.1.1. Loggers
17.1.2. Output Options
17.1.3. Example session
17.2. Logging Message Format

List of Tables

3.1.

Preface

Table of Contents

1. Acknowledgements

1. Acknowledgements

ISC would like to acknowledge generous support for BIND 10 development of DHCPv4 and DHCPv6 components provided - by Comcast.

Chapter 1. Introduction

BIND is the popular implementation of a DNS server, developer interfaces, and DNS tools. BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python @@ -23,10 +23,11 @@

This guide covers the experimental prototype of BIND 10 version 20120405. -

1.1. Supported Platforms

- BIND 10 builds have been tested on Debian GNU/Linux 5 and unstable, - Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, CentOS - Linux 5.3, and MacOS 10.6. +

1.1. Supported Platforms

+ BIND 10 builds have been tested on (in no particular order) + Debian GNU/Linux 5 and unstable, Ubuntu 9.10, NetBSD 5, + Solaris 10 and 11, FreeBSD 7 and 8, CentOS Linux 5.3, + MacOS 10.6 and 10.7, and OpenBSD 5.1. It has been tested on Sparc, i386, and amd64 hardware platforms. @@ -51,11 +52,13 @@ It needs at least SQLite version 3.3.9.

- The b10-xfrin, b10-xfrout, - and b10-zonemgr components require the - libpython3 library and the Python _sqlite3.so module - (which is included with Python). - The Python module needs to be built for the corresponding Python 3. + The b10-ddns, b10-xfrin, + b10-xfrout, and b10-zonemgr + components require the libpython3 library and the Python + _sqlite3.so module (which is included with Python). + The b10-stats-httpd component uses the + Python pyexpat.so module. + The Python modules need to be built for the corresponding Python 3.

Note

Some operating systems do not provide these dependencies in their default installation nor standard packages @@ -89,6 +92,12 @@ b10-cmdctl — Command and control service. This process allows external control of the BIND 10 system. +

  • + b10-ddns — + Dynamic DNS update service. + This process is used to handle incoming DNS update + requests to allow granted clients to update zones + for which BIND 10 is serving as a primary server.
  • b10-msgq — Message bus daemon. @@ -162,7 +171,7 @@ and, of course, DNS. These include detailed developer documentation and code examples. -

  • Chapter 2. Installation

    2.1. Building Requirements

    +

    Chapter 2. Installation

    2.1. Building Requirements

    In addition to the run-time requirements, building BIND 10 from source code requires various development include headers.

    Note

    @@ -224,14 +233,14 @@ the Git code revision control system or as a downloadable tar file. It may also be available in pre-compiled ready-to-use packages from operating system vendors. -

    2.3.1. Download Tar File

    +

    2.3.1. Download Tar File

    Downloading a release tar file is the recommended method to obtain the source code.

    The BIND 10 releases are available as tar file downloads from ftp://ftp.isc.org/isc/bind10/. Periodic development snapshots may also be available. -

    2.3.2. Retrieve from Git

    +

    2.3.2. Retrieve from Git

    Downloading this "bleeding edge" code is recommended only for developers or advanced users. Using development code in a production environment is not recommended. @@ -265,7 +274,7 @@ autoheader, automake, and related commands. -

    2.3.3. Configure before the build

    +

    2.3.3. Configure before the build

    BIND 10 uses the GNU Build System to discover build environment details. To generate the makefiles using the defaults, simply run: @@ -296,16 +305,16 @@

    If the configure fails, it may be due to missing or old dependencies. -

    2.3.4. Build

    +

    2.3.4. Build

    After the configure step is complete, to build the executables from the C++ code and prepare the Python scripts, run:

    $ make

    -

    2.3.5. Install

    +

    2.3.5. Install

    To install the BIND 10 executables, support files, and documentation, run:

    $ make install

    -

    Note

    The install step may require superuser privileges.

    2.3.6. Install Hierarchy

    +

    Note

    The install step may require superuser privileges.

    2.3.6. Install Hierarchy

    The following is the layout of the complete BIND 10 installation:

    • bin/ — @@ -397,7 +406,7 @@ during startup or shutdown. Unless specified, the component is started in usual way. This is the list of components that need to be started in a special way, with the value of special used for them: -

      Table 3.1. 

      ComponentSpecialDescription
      b10-authauthAuthoritative server
      b10-resolverresolverThe resolver
      b10-cmdctlcmdctlThe command control (remote control interface)


      +

      Table 3.1. 

      ComponentSpecialDescription
      b10-authauthAuthoritative server
      b10-resolverresolverThe resolver
      b10-cmdctlcmdctlThe command control (remote control interface)


      The kind specifies how a failure of the component should be handled. If it is set to dispensable @@ -425,7 +434,7 @@ message bus. The special components already know their address, but the usual ones don't. The address is by convention the thing after b10-, with - the first letter capital (eg. b10-stats + the first letter capitalized (eg. b10-stats would have Stats as its address).

      @@ -625,12 +634,12 @@ shutdown the details and relays (over a b10-msgq command channel) the configuration on to the specified module.

      -

    Chapter 8. Authoritative Server

    The b10-auth is the authoritative DNS server. It supports EDNS0 and DNSSEC. It supports IPv6. Normally it is started by the bind10 master process. -

    8.1. Server Configurations

    +

    8.1. Server Configurations

    b10-auth is configured via the b10-cfgmgr configuration manager. The module name is Auth. @@ -649,9 +658,10 @@ This may be a temporary setting until then. class to optionally select the class (it defaults to IN); and - zones to define the - file path name and the - origin (default domain). + zones to define + the file path name, + the filetype (e.g., sqlite3), + and the origin (default domain). By default, this is empty. @@ -661,7 +671,8 @@ This may be a temporary setting until then. Only the IN class is supported at this time. By default, the memory data source is disabled. Also, currently the zone file must be canonical such as - generated by named-compilezone -D. + generated by named-compilezone -D, or + must be an SQLite3 database.

    listen_on
    @@ -671,6 +682,21 @@ This may be a temporary setting until then. and port number. By default, b10-auth listens on port 53 on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses. +

    Note

    + The default configuration is currently not appropriate for a multi-homed host. + In case you have multiple public IP addresses, it is possible the + query UDP packet comes through one interface and the answer goes out + through another. The answer will probably be dropped by the client, as it + has a different source address than the one it sent the query to. The + client would fallback on TCP after several attempts, which works + well in this situation, but is clearly not ideal. +

    + There are plans to solve the problem such that the server handles + it by itself. But until it is actually implemented, it is recommended to + alter the configuration — remove the wildcard addresses and list all + addresses explicitly. Then the server will answer on the same + interface the request came on, preserving the correct address. +

    statistics-interval
    statistics-interval is the timer interval in seconds for b10-auth to share its @@ -710,7 +736,7 @@ This may be a temporary setting until then. if configured.)

    -

    8.2. Data Source Backends

    Note

    +

    8.2. Data Source Backends

    Note

    For the development prototype release, b10-auth supports a SQLite3 data source backend and in-memory data source backend. @@ -742,7 +768,26 @@ This may be a temporary setting until then. The authoritative server will begin serving it immediately after it is loaded. -

    +

    8.2.2. In-memory Data Source With SQLite3 Backend

    + + The following commands to bindctl + provide an example of configuring an in-memory data + source containing the example.org zone + with a SQLite3 backend file named example.org.sqlite3: + + + +

    > config add Auth/datasources
    +> config set Auth/datasources[1]/type "memory"
    +> config add Auth/datasources[1]/zones
    +> config set Auth/datasources[1]/zones[0]/origin "example.org"
    +> config set Auth/datasources[1]/zones[0]/file "example.org.sqlite3"
    +> config set Auth/datasources[1]/zones[0]/filetype "sqlite3"
    +> config commit

    + + The authoritative server will begin serving it immediately + after it is loaded. +

    8.2.3. Reloading an In-memory Data Source

    Use the Auth loadzone command in bindctl to reload a changed master file into memory; for example: @@ -750,7 +795,7 @@ This may be a temporary setting until then.

    > Auth loadzone origin="example.com"
     

    -

    +

    8.2.4. Disabling In-memory Data Sources

    By default, the memory data source is disabled; it must be configured explicitly. To disable all the in-memory zones, specify a null list for Auth/datasources: @@ -770,7 +815,7 @@ This may be a temporary setting until then. and/or zones[0] for the relevant zone as needed.) -

    8.3. Loading Master Zones Files

    +

    8.3. Loading Master Zones Files

    RFC 1035 style DNS master zone files may imported into a BIND 10 SQLite3 data source by using the b10-loadzone utility. @@ -799,7 +844,7 @@ This may be a temporary setting until then. If you reload a zone already existing in the database, all records from that prior zone disappear and a whole new set appears. -

    Chapter 9. Incoming Zone Transfers

    Incoming zones are transferred using the b10-xfrin process which is started by bind10. When received, the zone is stored in the corresponding BIND 10 @@ -813,11 +858,7 @@ This may be a temporary setting until then. IXFR. Due to some implementation limitations of the current development release, however, it only tries AXFR by default, and care should be taken to enable IXFR. -

    Note

    - In the current development release of BIND 10, incoming zone - transfers are only available for SQLite3-based data sources, - that is, they don't work for an in-memory data source. -

    9.1. Configuration for Incoming Zone Transfers

    +

    9.1. Configuration for Incoming Zone Transfers

    In practice, you need to specify a list of secondary zones to enable incoming zone transfers for these zones (you can still trigger a zone transfer manually, without a prior configuration @@ -833,7 +874,7 @@ This may be a temporary setting until then. > config commit

    (We assume there has been no zone configuration before). -

    9.2. Enabling IXFR

    +

    9.2. Enabling IXFR

    As noted above, b10-xfrin uses AXFR for zone transfers by default. To enable IXFR for zone transfers for a particular zone, set the use_ixfr @@ -885,12 +926,24 @@ This may be a temporary setting until then. (i.e. no SOA record for it), b10-zonemgr will automatically tell b10-xfrin to transfer the zone in. -

    9.4. Trigger an Incoming Zone Transfer Manually

    +

    9.4. Trigger an Incoming Zone Transfer Manually

    To manually trigger a zone transfer to retrieve a remote zone, you may use the bindctl utility. For example, at the bindctl prompt run:

    > Xfrin retransfer zone_name="foo.example.org" master=192.0.2.99

    +

    9.5. Incoming Transfers with In-memory Datasource

    + In the case of an incoming zone transfer, the received zone is + first stored in the corresponding BIND 10 datasource. In + case the secondary zone is served by an in-memory datasource + with an SQLite3 backend, b10-auth is + automatically sent a loadzone command to + reload the corresponding zone into memory from the backend. +

    + The administrator doesn't have to do anything for + b10-auth to serve the new version of the + zone, except for the configuration such as the one described in + Section 8.2.2, “In-memory Data Source With SQLite3 Backend”.

    Chapter 10. Outbound Zone Transfers

    The b10-xfrout process is started by bind10. @@ -934,7 +987,254 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    TSIGs in the incoming messages and to sign responses.

    Note

    The way to specify zone specific configuration (ACLs, etc) is likely to be changed. -

    Chapter 11. Recursive Name Server

    +

    Chapter 11. Dynamic DNS Update

    + BIND 10 supports the server side of the Dynamic DNS Update + (DDNS) protocol as defined in RFC 2136. + This service is provided by the b10-ddns + component, which is started by the bind10 + process if configured so. +

    + When the b10-auth authoritative DNS server + receives an UPDATE request, it internally forwards the request + to b10-ddns, which handles the rest of + request processing. + When the processing is completed b10-ddns + will send a response to the client with the RCODE set to the + value as specified in RFC 2136 (NOERROR for successful update, + REFUSED if rejected due to ACL check, etc). + If the zone has been changed as a result, it will internally + notify b10-xfrout so that other secondary + servers will be notified via the DNS notify protocol. + In addition, if b10-auth serves the updated + zone from its in-memory cache (as described in + Section 8.2.2, “In-memory Data Source With SQLite3 Backend”), + b10-ddns will also + notify b10-auth so that b10-auth + will re-cache the updated zone content. +

    + The b10-ddns component supports requests over + both UDP and TCP, and both IPv6 and IPv4; for TCP requests, + however, it terminates the TCP connection immediately after + each single request has been processed. Clients cannot reuse the + same TCP connection for multiple requests. (This is a current + implementation limitation of b10-ddns. + While RFC 2136 doesn't specify anything about such reuse of TCP + connection, there is no reason for disallowing it as RFC 1035 + generally allows multiple requests sent over a single TCP + connection. BIND 9 supports such reuse.) +

    + As of this writing b10-ddns does not support + update forwarding for secondary zones. + If it receives an update request for a secondary zone, it will + immediately return a response with an RCODE of NOTIMP. +

    Note

    + For feature completeness update forwarding should be + eventually supported. But right now it's considered a lower + priority task and there is no specific plan of implementing + this feature. + +

    +

    11.1. Enabling Dynamic Update

    + First off, it must be made sure that a few components on which + b10-ddns depends are configured to run, + which are b10-auth + and b10-zonemgr. + In addition, b10-xfrout should also be + configured to run; otherwise the notification after an update + (see above) will fail with a timeout, suspending the DDNS + service while b10-ddns waits for the + response (see the description of the DDNS_UPDATE_NOTIFY_FAIL + log message for further details). + If BIND 10 is already configured to provide authoritative DNS + service they should normally be configured to run already. +

    + Second, for the obvious reason dynamic update requires that the + underlying data source storing the zone data be writable. + In the current implementation this means the zone must be stored + in an SQLite3-based data source. + Also, right now, the b10-ddns component + configures itself with the data source referring to the + database_file configuration parameter of + b10-auth. + So this information must be configured correctly before starting + b10-ddns. + +

    Note

    + The way to configure data sources is now being revised. + Configuration on the data source for DDNS will be very + likely to be changed in a backward incompatible manner in + a near future version. +

    +

    + In general, if something goes wrong regarding the dependency + described above, b10-ddns will log the + related event at the warning or error level. + It's advisable to check the log message when you first enable + DDNS or if it doesn't work as you expect to see if there's any + warning or error log message. +

    + Next, to enable the DDNS service, b10-ddns + needs to be explicitly configured to run. + It can be done by using the bindctl + utility. For example: +

    +> config add Boss/components b10-ddns
    +> config set Boss/components/b10-ddns/address DDNS
    +> config set Boss/components/b10-ddns/kind dispensable
    +> config commit
    +

    +

    Note

    + In theory "kind" could be omitted because "dispensable" is its + default. But there's some peculiar behavior (which should + be a bug and should be fixed eventually; see Trac ticket + #2064) with bindctl and you'll still need to specify that explicitly. + Likewise, "address" may look unnecessary because + b10-ddns would start and work without + specifying it. But for it to shutdown gracefully this + parameter should also be specified. +

    +

    11.2. Access Control

    + By default b10-ddns rejects any update + requests from any clients by returning a response with an RCODE + of REFUSED. + To allow updates to take effect, an access control rule + (called update ACL) with a policy allowing updates must explicitly be + configured. + Update ACL must be configured per zone basis in the + zones configuration parameter of + b10-ddns. + This is a list of per-zone configurations regarding DDNS. + Each list element consists of the following parameters: +

    origin
    The zone's origin name
    class
    The RR class of the zone + (normally IN, and in that case + can be omitted in configuration)
    update_acl
    List of access control rules (ACL) for the zone

    + The syntax of the ACL is the same as ACLs for other + components. + Specific examples are given below. +

    + In general, an update ACL rule that allows an update request + should be configured with a TSIG key. + This is an example update ACL that allows updates to the zone + named example.org of RR class IN + from clients that send requests signed with a TSIG whose + key name is "key.example.org" (and refuses all others): +

    +> config add DDNS/zones
    +> config set DDNS/zones[0]/origin example.org
    +> config set DDNS/zones[0]/class IN
    +(Note: "class" can be omitted)
    +> config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "key": "key.example.org"}
    +> config commit
    +

    + The TSIG key must be configured system wide + (see Chapter 10, Outbound Zone Transfers.) +

    + Multiple rules can be specified in the ACL, and an ACL rule + can consist of multiple constraints, such as a combination of + IP address and TSIG. + The following configuration sequence will add a new rule to + the ACL created in the above example. This additional rule + allows update requests sent from a client + using TSIG key name of "key.example" (different from the + key used in the previous example) and has an IPv6 address of ::1. +

    +> config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "from": "::1", "key": "key.example"}
    +> config show DDNS/zones[0]/update_acl
    +DDNS/zones[0]/update_acl[0]     {"action": "ACCEPT", "key": "key.example.org"} any (modified)
    +DDNS/zones[0]/update_acl[1]     {"action": "ACCEPT", "from": "::1", "key": "key.example"} any (modified)
    +> config commit
    +

    + (Note the "add" in the first line. Before this sequence, we + have had only entry in zones[0]/update_acl. The "add" command + with a value (rule) adds a new entry and sets it to the given rule. + Due to a limitation of the current implementation, it doesn't + work if you first try to just add a new entry and then set it to + a given rule). +

    Note

    + The b10-ddns component accepts an ACL + rule that just allows updates from a specific IP address + (i.e., without requiring TSIG), but this is highly + discouraged (remember that requests can be made over UDP and + spoofing the source address of a UDP packet is often pretty + easy). + Unless you know what you are doing and that you can accept + its consequence, any update ACL rule that allows updates + should have a TSIG key in its constraints. +

    + The ACL rules will be checked in the listed order, and the + first matching one will apply. + If none of the rules matches, the default rule will apply, + which is rejecting any requests in the case of + b10-ddns. +

    + Other actions than "ACCEPT", namely "REJECT" and "DROP", can be + used, too. + See Chapter 12, Recursive Name Server about their effects. +

    + Currently update ACL can only control updates per zone basis; + it's not possible to specify access control with higher + granularity such as for particular domain names or specific + types of RRs. + +

    Note

    + Contrary to what RFC 2136 (literally) specifies, + b10-ddns checks the update ACL before + checking the prerequisites of the update request. + This is a deliberate implementation decision. + This counter intuitive specification has been repeatedly + discussed among implementers and in the IETF, and it is now + widely agreed that it does not make sense to strictly follow + that part of RFC. + One known specific bad result of following the RFC is that it + could leak information about which name or record exists or does not + exist in the zone as a result of prerequisite checks even if a + zone is somehow configured to reject normal queries from + arbitrary clients. + There have been other troubles that could have been avoided if + the ACL could be checked before the prerequisite check. +

    11.3. Miscellaneous Operational Issues

    + Unlike BIND 9, BIND 10 currently does not support automatic + resigning of DNSSEC-signed zone when it's updated via DDNS. + It could be possible to resign the updated zone afterwards + or make sure the update request also updates related DNSSEC + records, but that will be pretty error-prone operation. + In general, it's not advisable to allow DDNS for a signed zone + at this moment. +

    + Also unlike BIND 9, it's currently not possible + to freeze a zone temporarily in order to + suspend DDNS while you manually update the zone. + If you need to make manual updates to a dynamic zone, + you'll need to temporarily reject any updates to the zone via + the update ACLs. +

    + Dynamic updates are only applicable to primary zones. + In order to avoid updating secondary zones via DDNS requests, + b10-ddns refers to the + secondary_zones configuration of + b10-zonemgr. Zones listed in + secondary_zones will never be updated via DDNS + regardless of the update ACL configuration; + b10-ddns will return a response with an + RCODE of NOTAUTH as specified in RFC 2136. + If you have a "conceptual" secondary zone whose content is a + copy of some external source but is not updated via the + standard zone transfers and therefore not listed in + secondary_zones, be careful not to allow DDNS + for the zone; it would be quite likely to lead to inconsistent + state between different servers. + Normally this should not be a problem because the default + update ACL rejects any update requests, but you may want to + take an extra care about the configuration if you have such + type of secondary zones. +

    + The difference of two versions of a zone, before and after a + DDNS transaction, is automatically recorded in the underlying + data source, and can be retrieved in the form of outbound + IXFR. + This is done automatically; it does not require specific + configuration to make this possible. +

    Chapter 12. Recursive Name Server

    The b10-resolver process is started by bind10. @@ -968,7 +1268,7 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    (Replace the 2 as needed; run config show - Resolver/listen_on if needed.)

    11.1. Access Control

    + Resolver/listen_on” if needed.)

    12.1. Access Control

    By default, the b10-resolver daemon only accepts DNS queries from the localhost (127.0.0.1 and ::1). The Resolver/query_acl configuration may @@ -1001,7 +1301,7 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    (Replace the 2 as needed; run config show Resolver/query_acl if needed.)

    Note

    This prototype access control configuration - syntax may be changed.

    11.2. Forwarding

    + syntax may be changed.

    12.2. Forwarding

    To enable forwarding, the upstream address and port must be configured to forward queries to, such as: @@ -1021,7 +1321,7 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    > config set Resolver/forward_addresses [] > config commit

    -

    Chapter 12. DHCPv4 Server

    Dynamic Host Configuration Protocol for IPv4 (DHCP or +

    Chapter 13. DHCPv4 Server

    Dynamic Host Configuration Protocol for IPv4 (DHCP or DHCPv4) and Dynamic Host Configuration Protocol for IPv6 (DHCPv6) are protocols that allow one node (server) to provision configuration parameters to many hosts and devices (clients). To @@ -1031,7 +1331,7 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    somewhat similar, these are two radically different protocols. BIND10 offers server implementations for both DHCPv4 and DHCPv6. This chapter is about DHCP for IPv4. For a description - of the DHCPv6 server, see Chapter 13, DHCPv6 Server.

    The DHCPv4 server component is currently under intense + of the DHCPv6 server, see Chapter 14, DHCPv6 Server.

    The DHCPv4 server component is currently under intense development. You may want to check out BIND10 DHCP (Kea) wiki and recent posts on BIND10 developers mailing list.

    The DHCPv4 and DHCPv6 components in BIND10 architecture are @@ -1041,13 +1341,13 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    performing DHCP configuration, they are not fully functional yet. In particular, neither has functional lease databases. This means that they will assign the same, fixed, - hardcoded addresses to any client that will ask. See Section 12.4, “DHCPv4 Server Limitations” and Section 13.4, “DHCPv6 Server Limitations” for + hardcoded addresses to any client that will ask. See Section 13.4, “DHCPv4 Server Limitations” and Section 14.4, “DHCPv6 Server Limitations” for detailed description. -

    12.1. DHCPv4 Server Usage

    BIND10 provides the DHCPv4 server component since December +

    13.1. DHCPv4 Server Usage

    BIND10 provides the DHCPv4 server component since December 2011. It is a skeleton server and can be described as an early prototype that is not fully functional yet. It is mature enough to conduct first tests in lab environment, but it has - significant limitations. See Section 12.4, “DHCPv4 Server Limitations” for + significant limitations. See Section 13.4, “DHCPv4 Server Limitations” for details.

    b10-dhcp4 is a BIND10 component and is being @@ -1078,7 +1378,7 @@ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)

    be started directly, but rather via bind10. Please be aware of this planned change. -

    12.2. DHCPv4 Server Configuration

    +

    13.2. DHCPv4 Server Configuration

    The DHCPv4 server does not have a lease database implemented yet nor any support for configuration, so every time the same set of configuration options (including the same fixed address) @@ -1098,12 +1398,12 @@ const std::string HARDCODED_DOMAIN_NAME = "isc.example.com"; const std::string HARDCODED_SERVER_ID = "192.0.2.1";

    Lease database and configuration support is planned for 2012. -

    12.3. Supported standards

    The following standards and draft standards are currently +

    13.3. Supported standards

    The following standards and draft standards are currently supported:

    • RFC2131: Supported messages are DISCOVER, OFFER, REQUEST, and ACK.
    • RFC2132: Supported options are: PAD (0), END(255), Message Type(53), DHCP Server Identifier (54), Domain Name (15), DNS Servers (6), IP Address Lease Time - (51), Subnet mask (1), and Routers (3).

    12.4. DHCPv4 Server Limitations

    These are the current limitations of the DHCPv4 server + (51), Subnet mask (1), and Routers (3).

    13.4. DHCPv4 Server Limitations

    These are the current limitations of the DHCPv4 server software. Most of them are reflections of the early stage of development and should be treated as not implemented yet, rather than actual limitations.

    • During initial IPv4 node configuration, the @@ -1119,7 +1419,7 @@ const std::string HARDCODED_SERVER_ID = "192.0.2.1";

      address.

    • b10-dhcp4 does not support any configuration mechanisms yet. The whole configuration is currently hardcoded. The only way to tweak configuration - is to directly modify source code. See see Section 12.2, “DHCPv4 Server Configuration” for details.
    • Upon start, the server will open sockets on all + is to directly modify source code. See see Section 13.2, “DHCPv4 Server Configuration” for details.
    • Upon start, the server will open sockets on all interfaces that are not loopback, are up and running and have IPv4 address.
    • PRL (Parameter Request List, a list of options requested by a client) is currently ignored and server @@ -1128,16 +1428,16 @@ const std::string HARDCODED_SERVER_ID = "192.0.2.1";

      permanent. If you have legacy nodes that can't use DHCP and require BOOTP support, please use latest version of ISC DHCP http://www.isc.org/software/dhcp.

    • Interface detection is currently working on Linux - only. See Section 14.1, “Interface detection” for details.
    • b10-dhcp4 does not verify that + only. See Section 15.1, “Interface detection” for details.
    • b10-dhcp4 does not verify that assigned address is unused. According to RFC2131, the allocating server should verify that address is no used by sending ICMP echo request.
    • Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), duplication report (DECLINE) and release (RELEASE) are not supported yet.
    • DNS Update is not supported yet.
    • -v (verbose) command line option is currently - the default, and cannot be disabled.

    Chapter 13. DHCPv6 Server

    Dynamic Host Configuration Protocol for IPv6 (DHCPv6) is + the default, and cannot be disabled.

    Chapter 14. DHCPv6 Server

    Dynamic Host Configuration Protocol for IPv6 (DHCPv6) is specified in RFC3315. BIND10 provides DHCPv6 server implementation that is described in this chapter. For a description of the DHCPv4 - server implementation, see Chapter 12, DHCPv4 Server. + server implementation, see Chapter 13, DHCPv4 Server.

    The DHCPv6 server component is currently under intense development. You may want to check out BIND10 DHCP (Kea) wiki and recent posts on BIND10 @@ -1148,14 +1448,14 @@ const std::string HARDCODED_SERVER_ID = "192.0.2.1";

    performing DHCP configuration, they are not fully functional yet. In particular, neither has functional lease databases. This means that they will assign the same, fixed, - hardcoded addresses to any client that will ask. See Section 12.4, “DHCPv4 Server Limitations” and Section 13.4, “DHCPv6 Server Limitations” for + hardcoded addresses to any client that will ask. See Section 13.4, “DHCPv4 Server Limitations” and Section 14.4, “DHCPv6 Server Limitations” for detailed description. -

    13.1. DHCPv6 Server Usage

    +

    14.1. DHCPv6 Server Usage

    BIND10 provides the DHCPv6 server component since September 2011. It is a skeleton server and can be described as an early prototype that is not fully functional yet. It is mature enough to conduct first tests in lab environment, but it has - significant limitations. See Section 13.4, “DHCPv6 Server Limitations” for + significant limitations. See Section 14.4, “DHCPv6 Server Limitations” for details.

    The DHCPv6 server is implemented as b10-dhcp6 @@ -1190,7 +1490,7 @@ const std::string HARDCODED_SERVER_ID = "192.0.2.1";

    be started directly, but rather via bind10. Please be aware of this planned change. -

    13.2. DHCPv6 Server Configuration

    +

    14.2. DHCPv6 Server Configuration

    The DHCPv6 server does not have lease database implemented yet or any support for configuration, so every time the same set of configuration options (including the same fixed address) @@ -1209,10 +1509,10 @@ const uint32_t HARDCODED_VALID_LIFETIME = 7200; // in seconds const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    Lease database and configuration support is planned for 2012. -

    13.3. Supported DHCPv6 Standards

    The following standards and draft standards are currently +

    14.3. Supported DHCPv6 Standards

    The following standards and draft standards are currently supported:

    13.4. DHCPv6 Server Limitations

    These are the current limitations of the DHCPv6 server + SERVER_ID, CLIENT_ID, IA_NA, and IAADDRESS.

  • RFC3646: Supported option is DNS_SERVERS.
  • 14.4. DHCPv6 Server Limitations

    These are the current limitations of the DHCPv6 server software. Most of them are reflections of the early stage of development and should be treated as not implemented yet, rather than actual limitations.

    @@ -1222,7 +1522,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    they will both get the same fixed address.

  • b10-dhcp6 does not support any configuration mechanisms yet. The whole configuration is currently hardcoded. The only way to tweak configuration - is to directly modify source code. See see Section 13.2, “DHCPv6 Server Configuration” for details.
  • Upon start, the server will open sockets on all + is to directly modify source code. See see Section 14.2, “DHCPv6 Server Configuration” for details.
  • Upon start, the server will open sockets on all interfaces that are not loopback, are up, running and are multicast capable and have IPv6 address. Support for multiple interfaces is not coded in reception routines yet, @@ -1235,9 +1535,9 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    assigns DNS SERVER option.

  • Temporary addresses are not supported yet.
  • Prefix delegation is not supported yet.
  • Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), duplication report (DECLINE) and release (RELEASE) are not supported yet.
  • DNS Update is not supported yet.
  • Interface detection is currently working on Linux - only. See Section 14.1, “Interface detection” for details.
  • -v (verbose) command line option is currently the + only. See Section 15.1, “Interface detection” for details.
  • -v (verbose) command line option is currently the default, and cannot be disabled.
  • -

    Chapter 14. libdhcp++ library

    Table of Contents

    14.1. Interface detection
    14.2. DHCPv4/DHCPv6 packet handling

    libdhcp++ is a common library written in C++ that handles +

    Chapter 15. libdhcp++ library

    Table of Contents

    15.1. Interface detection
    15.2. DHCPv4/DHCPv6 packet handling

    libdhcp++ is a common library written in C++ that handles many DHCP-related tasks, like DHCPv4 and DHCPv6 packets parsing, manipulation and assembly, option parsing, manipulation and assembly, network interface detection and socket operations, like @@ -1248,13 +1548,13 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    b10-dhcp4 and b10-dhcp6 only, it is designed to be portable, universal library useful for any kind of DHCP-related software. -

    14.1. Interface detection

    Both DHCPv4 and DHCPv6 components share network +

    15.1. Interface detection

    Both DHCPv4 and DHCPv6 components share network interface detection routines. Interface detection is currently only supported on Linux systems.

    For non-Linux systems, there is currently stub implementation provided. Interface manager detects loopback interfaces only as their name (lo or lo0) can be easily predicted. Please contact BIND10 development team if you are interested - in running DHCP components on systems other than Linux.

    14.2. DHCPv4/DHCPv6 packet handling

    TODO: Describe packet handling here, with pointers to wiki

    Chapter 15. Statistics

    + in running DHCP components on systems other than Linux.

    15.2. DHCPv4/DHCPv6 packet handling

    TODO: Describe packet handling here, with pointers to wiki

    Chapter 16. Statistics

    The b10-stats process is started by bind10. It periodically collects statistics data from various modules @@ -1292,7 +1592,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    } }

    -

    Chapter 16. Logging

    Table of Contents

    16.1. Logging configuration
    16.1.1. Loggers
    16.1.2. Output Options
    16.1.3. Example session
    16.2. Logging Message Format

    16.1. Logging configuration

    +

    Chapter 17. Logging

    17.1. Logging configuration

    The logging system in BIND 10 is configured through the Logging module. All BIND 10 modules will look at the @@ -1301,7 +1601,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    -

    16.1.1. Loggers

    +

    17.1.1. Loggers

    Within BIND 10, a message is logged through a component called a "logger". Different parts of BIND 10 log messages @@ -1322,7 +1622,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    (what to log), and the output_options (where to log). -

    16.1.1.1. name (string)

    +

    17.1.1.1. name (string)

    Each logger in the system has a name, the name being that of the component using it to log messages. For instance, if you want to configure logging for the resolver module, @@ -1395,7 +1695,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    Auth.cache logger will appear in the output with a logger name of b10-auth.cache). -

    16.1.1.2. severity (string)

    +

    17.1.1.2. severity (string)

    This specifies the category of messages logged. Each message is logged with an associated severity which @@ -1411,7 +1711,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    -

    16.1.1.3. output_options (list)

    +

    17.1.1.3. output_options (list)

    Each logger can have zero or more output_options. These specify where log @@ -1421,7 +1721,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    The other options for a logger are: -

    16.1.1.4. debuglevel (integer)

    +

    17.1.1.4. debuglevel (integer)

    When a logger's severity is set to DEBUG, this value specifies what debug messages should be printed. It ranges @@ -1430,7 +1730,7 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    If severity for the logger is not DEBUG, this value is ignored. -

    16.1.1.5. additive (true or false)

    +

    17.1.1.5. additive (true or false)

    If this is true, the output_options from the parent will be used. For example, if there are two @@ -1444,18 +1744,18 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    -

    16.1.2. Output Options

    +

    17.1.2. Output Options

    The main settings for an output option are the destination and a value called output, the meaning of which depends on the destination that is set. -

    16.1.2.1. destination (string)

    +

    17.1.2.1. destination (string)

    The destination is the type of output. It can be one of: -

    • console
    • file
    • syslog

    16.1.2.2. output (string)

    +

    • console
    • file
    • syslog

    17.1.2.2. output (string)

    Depending on what is set as the output destination, this value is interpreted as follows: @@ -1485,12 +1785,12 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    The other options for output_options are: -

    16.1.2.2.1. flush (true of false)

    +

    17.1.2.2.1. flush (true of false)

    Flush buffers after each log message. Doing this will reduce performance but will ensure that if the program terminates abnormally, all messages up to the point of termination are output. -

    16.1.2.2.2. maxsize (integer)

    +

    17.1.2.2.2. maxsize (integer)

    Only relevant when destination is file, this is maximum file size of output files in bytes. When the maximum size is reached, the file is renamed and a new file opened. @@ -1499,11 +1799,11 @@ const std::string HARDCODED_DNS_SERVER = "2001:db8:1::1";

    etc.)

    If this is 0, no maximum file size is used. -

    16.1.2.2.3. maxver (integer)

    +

    17.1.2.2.3. maxver (integer)

    Maximum number of old log files to keep around when rolling the output file. Only relevant when destination is file. -

    16.1.3. Example session

    +

    17.1.3. Example session

    In this example we want to set the global logging to write to the file /var/log/my_bind10.log, @@ -1664,7 +1964,7 @@ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified) And every module will now be using the values from the logger named *. -

    16.2. Logging Message Format

    +

    17.2. Logging Message Format

    Each message written by BIND 10 to the configured logging destinations comprises a number of components that identify the origin of the message and, if the message indicates diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt index cf81af6b3f..e38b43a34b 100644 --- a/doc/guide/bind10-guide.txt +++ b/doc/guide/bind10-guide.txt @@ -4,7 +4,7 @@ Administrator Reference for BIND 10 This is the reference guide for BIND 10 version 20120405. - Copyright © 2010-2012 Internet Systems Consortium, Inc. + Copyright (c) 2010-2012 Internet Systems Consortium, Inc. Abstract @@ -81,6 +81,13 @@ Administrator Reference for BIND 10 8.2.1. In-memory Data Source + 8.2.2. In-memory Data Source With SQLite3 + Backend + + 8.2.3. Reloading an In-memory Data Source + + 8.2.4. Disabling In-memory Data Sources + 8.3. Loading Master Zones Files 9. Incoming Zone Transfers @@ -93,53 +100,63 @@ Administrator Reference for BIND 10 9.4. Trigger an Incoming Zone Transfer Manually + 9.5. Incoming Transfers with In-memory Datasource + 10. Outbound Zone Transfers - 11. Recursive Name Server + 11. Dynamic DNS Update - 11.1. Access Control + 11.1. Enabling Dynamic Update - 11.2. Forwarding + 11.2. Access Control - 12. DHCPv4 Server + 11.3. Miscellaneous Operational Issues - 12.1. DHCPv4 Server Usage + 12. Recursive Name Server - 12.2. DHCPv4 Server Configuration + 12.1. Access Control - 12.3. Supported standards + 12.2. Forwarding - 12.4. DHCPv4 Server Limitations + 13. DHCPv4 Server - 13. DHCPv6 Server + 13.1. DHCPv4 Server Usage - 13.1. DHCPv6 Server Usage + 13.2. DHCPv4 Server Configuration - 13.2. DHCPv6 Server Configuration + 13.3. Supported standards - 13.3. Supported DHCPv6 Standards + 13.4. DHCPv4 Server Limitations - 13.4. DHCPv6 Server Limitations + 14. DHCPv6 Server - 14. libdhcp++ library + 14.1. DHCPv6 Server Usage - 14.1. Interface detection + 14.2. DHCPv6 Server Configuration - 14.2. DHCPv4/DHCPv6 packet handling + 14.3. Supported DHCPv6 Standards - 15. Statistics + 14.4. DHCPv6 Server Limitations - 16. Logging + 15. libdhcp++ library - 16.1. Logging configuration + 15.1. Interface detection - 16.1.1. Loggers + 15.2. DHCPv4/DHCPv6 packet handling - 16.1.2. Output Options + 16. Statistics - 16.1.3. Example session + 17. Logging - 16.2. Logging Message Format + 17.1. Logging configuration + + 17.1.1. Loggers + + 17.1.2. Output Options + + 17.1.3. Example session + + 17.2. Logging Message Format List of Tables @@ -151,12 +168,12 @@ Preface 1. Acknowledgements -1. Acknowledgements +1. Acknowledgements ISC would like to acknowledge generous support for BIND 10 development of DHCPv4 and DHCPv6 components provided by Comcast. -Chapter 1. Introduction +Chapter 1. Introduction Table of Contents @@ -176,15 +193,16 @@ Chapter 1. Introduction This guide covers the experimental prototype of BIND 10 version 20120405. -1.1. Supported Platforms +1.1. Supported Platforms - BIND 10 builds have been tested on Debian GNU/Linux 5 and unstable, Ubuntu - 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, CentOS Linux 5.3, and MacOS - 10.6. It has been tested on Sparc, i386, and amd64 hardware platforms. It - is planned for BIND 10 to build, install and run on Windows and standard + BIND 10 builds have been tested on (in no particular order) Debian + GNU/Linux 5 and unstable, Ubuntu 9.10, NetBSD 5, Solaris 10 and 11, + FreeBSD 7 and 8, CentOS Linux 5.3, MacOS 10.6 and 10.7, and OpenBSD 5.1. + It has been tested on Sparc, i386, and amd64 hardware platforms. It is + planned for BIND 10 to build, install and run on Windows and standard Unix-type platforms. -1.2. Required Software +1.2. Required Software BIND 10 requires at least Python 3.1 (http://www.python.org/). It has also been tested with Python 3.2. @@ -199,10 +217,11 @@ Chapter 1. Introduction The authoritative DNS server uses SQLite3 (http://www.sqlite.org/). It needs at least SQLite version 3.3.9. - The b10-xfrin, b10-xfrout, and b10-zonemgr components require the - libpython3 library and the Python _sqlite3.so module (which is included - with Python). The Python module needs to be built for the corresponding - Python 3. + The b10-ddns, b10-xfrin, b10-xfrout, and b10-zonemgr components require + the libpython3 library and the Python _sqlite3.so module (which is + included with Python). The b10-stats-httpd component uses the Python + pyexpat.so module. The Python modules need to be built for the + corresponding Python 3. Note @@ -210,7 +229,7 @@ Chapter 1. Introduction installation nor standard packages collections. You may need to install them separately. -1.3. Starting and Stopping the Server +1.3. Starting and Stopping the Server BIND 10 is modular. Part of this modularity is accomplished using multiple cooperating processes which, together, provide the server functionality. @@ -223,43 +242,47 @@ Chapter 1. Introduction processes as needed. The processes started by the bind10 command have names starting with "b10-", including: - o b10-auth — Authoritative DNS server. This process serves DNS requests. - o b10-cfgmgr — Configuration manager. This process maintains all of the + o b10-auth -- Authoritative DNS server. This process serves DNS + requests. + o b10-cfgmgr -- Configuration manager. This process maintains all of the configuration for BIND 10. - o b10-cmdctl — Command and control service. This process allows external - control of the BIND 10 system. - o b10-msgq — Message bus daemon. This process coordinates communication + o b10-cmdctl -- Command and control service. This process allows + external control of the BIND 10 system. + o b10-ddns -- Dynamic DNS update service. This process is used to handle + incoming DNS update requests to allow granted clients to update zones + for which BIND 10 is serving as a primary server. + o b10-msgq -- Message bus daemon. This process coordinates communication between all of the other BIND 10 processes. - o b10-resolver — Recursive name server. This process handles incoming + o b10-resolver -- Recursive name server. This process handles incoming queries. - o b10-sockcreator — Socket creator daemon. This process creates sockets + o b10-sockcreator -- Socket creator daemon. This process creates sockets used by network-listening BIND 10 processes. - o b10-stats — Statistics collection daemon. This process collects and + o b10-stats -- Statistics collection daemon. This process collects and reports statistics data. - o b10-stats-httpd — HTTP server for statistics reporting. This process + o b10-stats-httpd -- HTTP server for statistics reporting. This process reports statistics data in XML format over HTTP. - o b10-xfrin — Incoming zone transfer service. This process is used to + o b10-xfrin -- Incoming zone transfer service. This process is used to transfer a new copy of a zone into BIND 10, when acting as a secondary server. - o b10-xfrout — Outgoing zone transfer service. This process is used to + o b10-xfrout -- Outgoing zone transfer service. This process is used to handle transfer requests to send a local zone to a remote secondary server, when acting as a master server. - o b10-zonemgr — Secondary manager. This process keeps track of timers + o b10-zonemgr -- Secondary manager. This process keeps track of timers and other necessary information for BIND 10 to act as a slave server. These are ran automatically by bind10 and do not need to be run manually. -1.4. Managing BIND 10 +1.4. Managing BIND 10 Once BIND 10 is running, a few commands are used to interact directly with the system: - o bindctl — interactive administration interface. This is a low-level + o bindctl -- interactive administration interface. This is a low-level command-line tool which allows a developer or an experienced administrator to control BIND 10. - o b10-loadzone — zone file loader. This tool will load standard + o b10-loadzone -- zone file loader. This tool will load standard masterfile-format zone files into BIND 10. - o b10-cmdctl-usermgr — user access control. This tool allows an + o b10-cmdctl-usermgr -- user access control. This tool allows an administrator to authorize additional users to manage BIND 10. The tools and modules are covered in full detail in this guide. In @@ -269,7 +292,7 @@ Chapter 1. Introduction Python for the message bus, configuration backend, and, of course, DNS. These include detailed developer documentation and code examples. -Chapter 2. Installation +Chapter 2. Installation Table of Contents @@ -291,7 +314,7 @@ Chapter 2. Installation 2.3.6. Install Hierarchy -2.1. Building Requirements +2.1. Building Requirements In addition to the run-time requirements, building BIND 10 from source code requires various development include headers. @@ -317,7 +340,7 @@ Chapter 2. Installation Visit the wiki at http://bind10.isc.org/wiki/SystemSpecificNotes for system-specific installation tips. -2.2. Quick start +2.2. Quick start Note @@ -328,48 +351,48 @@ Chapter 2. Installation To quickly get started with BIND 10, follow these steps. -  1. Install required run-time and build dependencies. -  2. Download the BIND 10 source tar file from + 1. Install required run-time and build dependencies. + 2. Download the BIND 10 source tar file from ftp://ftp.isc.org/isc/bind10/. -  3. Extract the tar file: + 3. Extract the tar file: $ gzcat bind10-VERSION.tar.gz | tar -xvf - -  4. Go into the source and run configure: + 4. Go into the source and run configure: $ cd bind10-VERSION $ ./configure -  5. Build it: + 5. Build it: $ make -  6. Install it (to default /usr/local): + 6. Install it (to default /usr/local): $ make install -  7. Start the server: + 7. Start the server: $ /usr/local/sbin/bind10 -  8. Test it; for example: + 8. Test it; for example: $ dig @127.0.0.1 -c CH -t TXT authors.bind -  9. Load desired zone file(s), for example: + 9. Load desired zone file(s), for example: $ b10-loadzone your.zone.example.org - 10. Test the new zone. + 10. Test the new zone. -2.3. Installation from source +2.3. Installation from source BIND 10 is open source software written in C++ and Python. It is freely available in source code form from ISC via the Git code revision control system or as a downloadable tar file. It may also be available in pre-compiled ready-to-use packages from operating system vendors. - 2.3.1. Download Tar File + 2.3.1. Download Tar File Downloading a release tar file is the recommended method to obtain the source code. @@ -378,7 +401,7 @@ Chapter 2. Installation ftp://ftp.isc.org/isc/bind10/. Periodic development snapshots may also be available. - 2.3.2. Retrieve from Git + 2.3.2. Retrieve from Git Downloading this "bleeding edge" code is recommended only for developers or advanced users. Using development code in a production environment is @@ -393,7 +416,7 @@ Chapter 2. Installation The latest development code, including temporary experiments and un-reviewed code, is available via the BIND 10 code revision control system. This is powered by Git and all the BIND 10 development is public. - The leading development is done in the “masterâ€. + The leading development is done in the "master". The code can be checked out from git://git.bind10.isc.org/bind10; for example: @@ -406,7 +429,7 @@ Chapter 2. Installation the --install switch. This will run autoconf, aclocal, libtoolize, autoheader, automake, and related commands. - 2.3.3. Configure before the build + 2.3.3. Configure before the build BIND 10 uses the GNU Build System to discover build environment details. To generate the makefiles using the defaults, simply run: @@ -441,14 +464,14 @@ Chapter 2. Installation If the configure fails, it may be due to missing or old dependencies. - 2.3.4. Build + 2.3.4. Build After the configure step is complete, to build the executables from the C++ code and prepare the Python scripts, run: $ make - 2.3.5. Install + 2.3.5. Install To install the BIND 10 executables, support files, and documentation, run: @@ -458,22 +481,22 @@ Chapter 2. Installation The install step may require superuser privileges. - 2.3.6. Install Hierarchy + 2.3.6. Install Hierarchy The following is the layout of the complete BIND 10 installation: - o bin/ — general tools and diagnostic clients. - o etc/bind10-devel/ — configuration files. - o lib/ — libraries and python modules. - o libexec/bind10-devel/ — executables that a user wouldn't normally run + o bin/ -- general tools and diagnostic clients. + o etc/bind10-devel/ -- configuration files. + o lib/ -- libraries and python modules. + o libexec/bind10-devel/ -- executables that a user wouldn't normally run directly and are not run independently. These are the BIND 10 modules which are daemons started by the bind10 tool. - o sbin/ — commands used by the system administrator. - o share/bind10-devel/ — configuration specifications. - o share/man/ — manual pages (online documentation). - o var/bind10-devel/ — data source and configuration databases. + o sbin/ -- commands used by the system administrator. + o share/bind10-devel/ -- configuration specifications. + o share/man/ -- manual pages (online documentation). + o var/bind10-devel/ -- data source and configuration databases. -Chapter 3. Starting BIND10 with bind10 +Chapter 3. Starting BIND10 with bind10 Table of Contents @@ -497,11 +520,10 @@ Chapter 3. Starting BIND10 with bind10 b10-sockcreator will allocate sockets for the rest of the system. In its default configuration, the bind10 master process will also start up - b10-cmdctl for administration tools to communicate with the system, - b10-stats for statistics collection, and b10-stats-httpd for statistics - reporting. + b10-cmdctl for administration tools to communicate with the system, and + b10-stats for statistics collection. -3.1. Starting BIND 10 +3.1. Starting BIND 10 To start the BIND 10 service, simply run bind10. Run it with the --verbose switch to get additional debugging or diagnostic output. @@ -510,9 +532,9 @@ Chapter 3. Starting BIND10 with bind10 If the setproctitle Python module is detected at start up, the process names for the Python-based daemons will be renamed to better identify them - instead of just “pythonâ€. This is not needed on some operating systems. + instead of just "python". This is not needed on some operating systems. -3.2. Configuration of started processes +3.2. Configuration of started processes The processes to be started can be configured, with the exception of the b10-sockcreator, b10-msgq and b10-cfgmgr. @@ -539,7 +561,7 @@ Chapter 3. Starting BIND10 with bind10 usual way. This is the list of components that need to be started in a special way, with the value of special used for them: - Table 3.1.  + Table 3.1. +------------------------------------------------------------------------+ | Component | Special | Description | @@ -553,11 +575,11 @@ Chapter 3. Starting BIND10 with bind10 +------------------------------------------------------------------------+ The kind specifies how a failure of the component should be handled. If it - is set to “dispensable†(the default unless you set something else), it - will get started again if it fails. If it is set to “needed†and it fails + is set to "dispensable" (the default unless you set something else), it + will get started again if it fails. If it is set to "needed" and it fails at startup, the whole bind10 shuts down and exits with error exit code. But if it fails some time later, it is just started again. If you set it - to “coreâ€, you indicate that the system is not usable without the + to "core", you indicate that the system is not usable without the component and if such component fails, the system shuts down no matter when the failure happened. This is the behaviour of the core components (the ones you can't turn off), but you can declare any other components as @@ -570,10 +592,10 @@ Chapter 3. Starting BIND10 with bind10 the default is enough. There are other parameters we didn't use in our example. One of them is - “addressâ€. It is the address used by the component on the b10-msgq message + "address". It is the address used by the component on the b10-msgq message bus. The special components already know their address, but the usual ones don't. The address is by convention the thing after b10-, with the first - letter capital (eg. b10-stats would have “Stats†as its address). + letter capitalized (eg. b10-stats would have "Stats" as its address). The last one is process. It is the name of the process to be started. It defaults to the name of the component if not set, but you can use this to @@ -614,11 +636,11 @@ Chapter 3. Starting BIND10 with bind10 locking the sqlite database, if used. The configuration might be changed to something more convenient in future. -Chapter 4. Command channel +Chapter 4. Command channel The BIND 10 components use the b10-msgq message routing daemon to communicate with other BIND 10 components. The b10-msgq implements what is - called the “Command Channelâ€. Processes intercommunicate by sending + called the "Command Channel". Processes intercommunicate by sending messages on the command channel. Example messages include shutdown, get configurations, and set configurations. This Command Channel is not used for DNS message passing. It is used only to control and monitor the BIND @@ -628,7 +650,7 @@ Chapter 4. Command channel default, BIND 10 uses port 9912 for the b10-msgq service. It listens on 127.0.0.1. -Chapter 5. Configuration manager +Chapter 5. Configuration manager The configuration manager, b10-cfgmgr, handles all BIND 10 system configuration. It provides persistent storage for configuration, and @@ -640,7 +662,7 @@ Chapter 5. Configuration manager The administrator doesn't connect to it directly, but uses a user interface to communicate with the configuration manager via b10-cmdctl's - REST-ful interface. b10-cmdctl is covered in Chapter 6, Remote control + REST-ful interface. b10-cmdctl is covered in Chapter 6, Remote control daemon. Note @@ -664,10 +686,10 @@ Chapter 5. Configuration manager The configuration manager does not have any command line arguments. Normally it is not started manually, but is automatically started using - the bind10 master process (as covered in Chapter 3, Starting BIND10 with + the bind10 master process (as covered in Chapter 3, Starting BIND10 with bind10). -Chapter 6. Remote control daemon +Chapter 6. Remote control daemon Table of Contents @@ -680,7 +702,7 @@ Chapter 6. Remote control daemon When b10-cmdctl starts, it firsts asks b10-cfgmgr about what modules are running and what their configuration is (over the b10-msgq channel). Then - it will start listening on HTTPS for clients — the user interface — such + it will start listening on HTTPS for clients -- the user interface -- such as bindctl. b10-cmdctl directly sends commands (received from the user interface) to @@ -707,7 +729,7 @@ Chapter 6. Remote control daemon /usr/local/etc/bind10-devel/cmdctl-accounts.csv. This comma-delimited file lists the accounts with a user name, hashed password, and salt. (A sample file is at /usr/local/share/bind10-devel/cmdctl-accounts.csv. It contains - the user named “root†with the password “bind10â€.) + the user named "root" with the password "bind10".) The administrator may create a user account with the b10-cmdctl-usermgr tool. @@ -718,14 +740,14 @@ Chapter 6. Remote control daemon connection is stateless and times out in 1200 seconds by default. This can be redefined by using the --idle-timeout command line argument. -6.1. Configuration specification for b10-cmdctl +6.1. Configuration specification for b10-cmdctl The configuration items for b10-cmdctl are: key_file cert_file accounts_file The control commands are: print_settings shutdown -Chapter 7. Control and configure user interface +Chapter 7. Control and configure user interface Note @@ -745,7 +767,7 @@ Chapter 7. Control and configure user interface b10-cfgmgr which then stores the details and relays (over a b10-msgq command channel) the configuration on to the specified module. -Chapter 8. Authoritative Server +Chapter 8. Authoritative Server Table of Contents @@ -755,16 +777,22 @@ Chapter 8. Authoritative Server 8.2.1. In-memory Data Source + 8.2.2. In-memory Data Source With SQLite3 Backend + + 8.2.3. Reloading an In-memory Data Source + + 8.2.4. Disabling In-memory Data Sources + 8.3. Loading Master Zones Files The b10-auth is the authoritative DNS server. It supports EDNS0 and DNSSEC. It supports IPv6. Normally it is started by the bind10 master process. -8.1. Server Configurations +8.1. Server Configurations b10-auth is configured via the b10-cfgmgr configuration manager. The - module name is “Authâ€. The configuration data items are: + module name is "Auth". The configuration data items are: database_file This is an optional string to define the path to find the SQLite3 @@ -773,10 +801,10 @@ Chapter 8. Authoritative Server datasources datasources configures data sources. The list items include: type - to define the required data source type (such as “memoryâ€); class - to optionally select the class (it defaults to “INâ€); and zones to - define the file path name and the origin (default domain). By - default, this is empty. + to define the required data source type (such as "memory"); class + to optionally select the class (it defaults to "IN"); and zones to + define the file path name, the filetype (e.g., sqlite3), and the + origin (default domain). By default, this is empty. Note @@ -784,7 +812,7 @@ Chapter 8. Authoritative Server memory data source. Only the IN class is supported at this time. By default, the memory data source is disabled. Also, currently the zone file must be canonical such as generated by - named-compilezone -D. + named-compilezone -D, or must be an SQLite3 database. listen_on listen_on is a list of addresses and ports for b10-auth to listen @@ -792,6 +820,24 @@ Chapter 8. Authoritative Server default, b10-auth listens on port 53 on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses. + Note + + The default configuration is currently not appropriate for a + multi-homed host. In case you have multiple public IP addresses, + it is possible the query UDP packet comes through one interface + and the answer goes out through another. The answer will probably + be dropped by the client, as it has a different source address + than the one it sent the query to. The client would fallback on + TCP after several attempts, which works well in this situation, + but is clearly not ideal. + + There are plans to solve the problem such that the server handles + it by itself. But until it is actually implemented, it is + recommended to alter the configuration -- remove the wildcard + addresses and list all addresses explicitly. Then the server will + answer on the same interface the request came on, preserving the + correct address. + statistics-interval statistics-interval is the timer interval in seconds for b10-auth to share its statistics information to b10-stats(8). Statistics @@ -802,9 +848,9 @@ Chapter 8. Authoritative Server loadzone loadzone tells b10-auth to load or reload a zone file. The arguments include: class which optionally defines the class (it - defaults to “INâ€); origin is the domain name of the zone; and + defaults to "IN"); origin is the domain name of the zone; and datasrc optionally defines the type of datasource (it defaults to - “memoryâ€). + "memory"). Note @@ -820,7 +866,7 @@ Chapter 8. Authoritative Server argument to select the process ID to stop. (Note that the BIND 10 boss process may restart this service if configured.) -8.2. Data Source Backends +8.2. Data Source Backends Note @@ -833,13 +879,13 @@ Chapter 8. Authoritative Server /usr/local/var/bind10-devel/zone.sqlite3. (The full path is what was defined at build configure time for --localstatedir. The default is /usr/local/var/.) This data file location may be changed by defining the - “database_file†configuration. + "database_file" configuration. - 8.2.1. In-memory Data Source + 8.2.1. In-memory Data Source The following commands to bindctl provide an example of configuring an - in-memory data source containing the “example.com†zone with the zone file - named “example.com.zoneâ€: + in-memory data source containing the "example.com" zone with the zone file + named "example.com.zone": > config add Auth/datasources > config set Auth/datasources[0]/type "memory" @@ -851,11 +897,32 @@ Chapter 8. Authoritative Server The authoritative server will begin serving it immediately after it is loaded. + 8.2.2. In-memory Data Source With SQLite3 Backend + + The following commands to bindctl provide an example of configuring an + in-memory data source containing the "example.org" zone with a SQLite3 + backend file named "example.org.sqlite3": + + > config add Auth/datasources + > config set Auth/datasources[1]/type "memory" + > config add Auth/datasources[1]/zones + > config set Auth/datasources[1]/zones[0]/origin "example.org" + > config set Auth/datasources[1]/zones[0]/file "example.org.sqlite3" + > config set Auth/datasources[1]/zones[0]/filetype "sqlite3" + > config commit + + The authoritative server will begin serving it immediately after it is + loaded. + + 8.2.3. Reloading an In-memory Data Source + Use the Auth loadzone command in bindctl to reload a changed master file into memory; for example: > Auth loadzone origin="example.com" + 8.2.4. Disabling In-memory Data Sources + By default, the memory data source is disabled; it must be configured explicitly. To disable all the in-memory zones, specify a null list for Auth/datasources: @@ -871,7 +938,7 @@ Chapter 8. Authoritative Server (Replace the list number(s) in datasources[0] and/or zones[0] for the relevant zone as needed.) -8.3. Loading Master Zones Files +8.3. Loading Master Zones Files RFC 1035 style DNS master zone files may imported into a BIND 10 SQLite3 data source by using the b10-loadzone utility. @@ -902,7 +969,7 @@ Chapter 8. Authoritative Server If you reload a zone already existing in the database, all records from that prior zone disappear and a whole new set appears. -Chapter 9. Incoming Zone Transfers +Chapter 9. Incoming Zone Transfers Table of Contents @@ -914,23 +981,19 @@ Chapter 9. Incoming Zone Transfers 9.4. Trigger an Incoming Zone Transfer Manually + 9.5. Incoming Transfers with In-memory Datasource + Incoming zones are transferred using the b10-xfrin process which is started by bind10. When received, the zone is stored in the corresponding BIND 10 data source, and its records can be served by b10-auth. In combination with b10-zonemgr (for automated SOA checks), this allows the - BIND 10 server to provide “secondary†service. + BIND 10 server to provide "secondary" service. The b10-xfrin process supports both AXFR and IXFR. Due to some implementation limitations of the current development release, however, it only tries AXFR by default, and care should be taken to enable IXFR. - Note - - In the current development release of BIND 10, incoming zone transfers are - only available for SQLite3-based data sources, that is, they don't work - for an in-memory data source. - -9.1. Configuration for Incoming Zone Transfers +9.1. Configuration for Incoming Zone Transfers In practice, you need to specify a list of secondary zones to enable incoming zone transfers for these zones (you can still trigger a zone @@ -947,7 +1010,7 @@ Chapter 9. Incoming Zone Transfers (We assume there has been no zone configuration before). -9.2. Enabling IXFR +9.2. Enabling IXFR As noted above, b10-xfrin uses AXFR for zone transfers by default. To enable IXFR for zone transfers for a particular zone, set the use_ixfr @@ -970,7 +1033,7 @@ Chapter 9. Incoming Zone Transfers be implemented in a near future version, at which point we will enable IXFR by default. -9.3. Secondary Manager +9.3. Secondary Manager The b10-zonemgr process is started by bind10. It keeps track of SOA refresh, retry, and expire timers and other details for BIND 10 to perform @@ -996,14 +1059,26 @@ Chapter 9. Incoming Zone Transfers for it), b10-zonemgr will automatically tell b10-xfrin to transfer the zone in. -9.4. Trigger an Incoming Zone Transfer Manually +9.4. Trigger an Incoming Zone Transfer Manually To manually trigger a zone transfer to retrieve a remote zone, you may use the bindctl utility. For example, at the bindctl prompt run: > Xfrin retransfer zone_name="foo.example.org" master=192.0.2.99 -Chapter 10. Outbound Zone Transfers +9.5. Incoming Transfers with In-memory Datasource + + In the case of an incoming zone transfer, the received zone is first + stored in the corresponding BIND 10 datasource. In case the secondary zone + is served by an in-memory datasource with an SQLite3 backend, b10-auth is + automatically sent a loadzone command to reload the corresponding zone + into memory from the backend. + + The administrator doesn't have to do anything for b10-auth to serve the + new version of the zone, except for the configuration such as the one + described in Section 8.2.2, "In-memory Data Source With SQLite3 Backend". + +Chapter 10. Outbound Zone Transfers The b10-xfrout process is started by bind10. When the b10-auth authoritative DNS server receives an AXFR or IXFR request, b10-auth @@ -1052,13 +1127,232 @@ Chapter 10. Outbound Zone Transfers The way to specify zone specific configuration (ACLs, etc) is likely to be changed. -Chapter 11. Recursive Name Server +Chapter 11. Dynamic DNS Update Table of Contents - 11.1. Access Control + 11.1. Enabling Dynamic Update - 11.2. Forwarding + 11.2. Access Control + + 11.3. Miscellaneous Operational Issues + + BIND 10 supports the server side of the Dynamic DNS Update (DDNS) protocol + as defined in RFC 2136. This service is provided by the b10-ddns + component, which is started by the bind10 process if configured so. + + When the b10-auth authoritative DNS server receives an UPDATE request, it + internally forwards the request to b10-ddns, which handles the rest of + request processing. When the processing is completed b10-ddns will send a + response to the client with the RCODE set to the value as specified in RFC + 2136 (NOERROR for successful update, REFUSED if rejected due to ACL check, + etc). If the zone has been changed as a result, it will internally notify + b10-xfrout so that other secondary servers will be notified via the DNS + notify protocol. In addition, if b10-auth serves the updated zone from its + in-memory cache (as described in Section 8.2.2, "In-memory Data Source + With SQLite3 Backend"), b10-ddns will also notify b10-auth so that + b10-auth will re-cache the updated zone content. + + The b10-ddns component supports requests over both UDP and TCP, and both + IPv6 and IPv4; for TCP requests, however, it terminates the TCP connection + immediately after each single request has been processed. Clients cannot + reuse the same TCP connection for multiple requests. (This is a current + implementation limitation of b10-ddns. While RFC 2136 doesn't specify + anything about such reuse of TCP connection, there is no reason for + disallowing it as RFC 1035 generally allows multiple requests sent over a + single TCP connection. BIND 9 supports such reuse.) + + As of this writing b10-ddns does not support update forwarding for + secondary zones. If it receives an update request for a secondary zone, it + will immediately return a response with an RCODE of NOTIMP. + + Note + + For feature completeness update forwarding should be eventually supported. + But right now it's considered a lower priority task and there is no + specific plan of implementing this feature. + +11.1. Enabling Dynamic Update + + First off, it must be made sure that a few components on which b10-ddns + depends are configured to run, which are b10-auth and b10-zonemgr. In + addition, b10-xfrout should also be configured to run; otherwise the + notification after an update (see above) will fail with a timeout, + suspending the DDNS service while b10-ddns waits for the response (see the + description of the DDNS_UPDATE_NOTIFY_FAIL log message for further + details). If BIND 10 is already configured to provide authoritative DNS + service they should normally be configured to run already. + + Second, for the obvious reason dynamic update requires that the underlying + data source storing the zone data be writable. In the current + implementation this means the zone must be stored in an SQLite3-based data + source. Also, right now, the b10-ddns component configures itself with the + data source referring to the "database_file" configuration parameter of + b10-auth. So this information must be configured correctly before starting + b10-ddns. + + Note + + The way to configure data sources is now being revised. Configuration on + the data source for DDNS will be very likely to be changed in a backward + incompatible manner in a near future version. + + In general, if something goes wrong regarding the dependency described + above, b10-ddns will log the related event at the warning or error level. + It's advisable to check the log message when you first enable DDNS or if + it doesn't work as you expect to see if there's any warning or error log + message. + + Next, to enable the DDNS service, b10-ddns needs to be explicitly + configured to run. It can be done by using the bindctl utility. For + example: + + > config add Boss/components b10-ddns + > config set Boss/components/b10-ddns/address DDNS + > config set Boss/components/b10-ddns/kind dispensable + > config commit + + Note + + In theory "kind" could be omitted because "dispensable" is its default. + But there's some peculiar behavior (which should be a bug and should be + fixed eventually; see Trac ticket #2064) with bindctl and you'll still + need to specify that explicitly. Likewise, "address" may look unnecessary + because b10-ddns would start and work without specifying it. But for it to + shutdown gracefully this parameter should also be specified. + +11.2. Access Control + + By default b10-ddns rejects any update requests from any clients by + returning a response with an RCODE of REFUSED. To allow updates to take + effect, an access control rule (called update ACL) with a policy allowing + updates must explicitly be configured. Update ACL must be configured per + zone basis in the "zones" configuration parameter of b10-ddns. This is a + list of per-zone configurations regarding DDNS. Each list element consists + of the following parameters: + + origin + The zone's origin name + + class + The RR class of the zone (normally "IN", and in that case can be + omitted in configuration) + + update_acl + List of access control rules (ACL) for the zone + + The syntax of the ACL is the same as ACLs for other components. Specific + examples are given below. + + In general, an update ACL rule that allows an update request should be + configured with a TSIG key. This is an example update ACL that allows + updates to the zone named "example.org" of RR class "IN" from clients that + send requests signed with a TSIG whose key name is "key.example.org" (and + refuses all others): + + > config add DDNS/zones + > config set DDNS/zones[0]/origin example.org + > config set DDNS/zones[0]/class IN + (Note: "class" can be omitted) + > config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "key": "key.example.org"} + > config commit + + The TSIG key must be configured system wide (see Chapter 10, Outbound Zone + Transfers.) + + Multiple rules can be specified in the ACL, and an ACL rule can consist of + multiple constraints, such as a combination of IP address and TSIG. The + following configuration sequence will add a new rule to the ACL created in + the above example. This additional rule allows update requests sent from a + client using TSIG key name of "key.example" (different from the key used + in the previous example) and has an IPv6 address of ::1. + + > config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "from": "::1", "key": "key.example"} + > config show DDNS/zones[0]/update_acl + DDNS/zones[0]/update_acl[0] {"action": "ACCEPT", "key": "key.example.org"} any (modified) + DDNS/zones[0]/update_acl[1] {"action": "ACCEPT", "from": "::1", "key": "key.example"} any (modified) + > config commit + + (Note the "add" in the first line. Before this sequence, we have had only + entry in zones[0]/update_acl. The "add" command with a value (rule) adds a + new entry and sets it to the given rule. Due to a limitation of the + current implementation, it doesn't work if you first try to just add a new + entry and then set it to a given rule). + + Note + + The b10-ddns component accepts an ACL rule that just allows updates from a + specific IP address (i.e., without requiring TSIG), but this is highly + discouraged (remember that requests can be made over UDP and spoofing the + source address of a UDP packet is often pretty easy). Unless you know what + you are doing and that you can accept its consequence, any update ACL rule + that allows updates should have a TSIG key in its constraints. + + The ACL rules will be checked in the listed order, and the first matching + one will apply. If none of the rules matches, the default rule will apply, + which is rejecting any requests in the case of b10-ddns. + + Other actions than "ACCEPT", namely "REJECT" and "DROP", can be used, too. + See Chapter 12, Recursive Name Server about their effects. + + Currently update ACL can only control updates per zone basis; it's not + possible to specify access control with higher granularity such as for + particular domain names or specific types of RRs. + + Note + + Contrary to what RFC 2136 (literally) specifies, b10-ddns checks the + update ACL before checking the prerequisites of the update request. This + is a deliberate implementation decision. This counter intuitive + specification has been repeatedly discussed among implementers and in the + IETF, and it is now widely agreed that it does not make sense to strictly + follow that part of RFC. One known specific bad result of following the + RFC is that it could leak information about which name or record exists or + does not exist in the zone as a result of prerequisite checks even if a + zone is somehow configured to reject normal queries from arbitrary + clients. There have been other troubles that could have been avoided if + the ACL could be checked before the prerequisite check. + +11.3. Miscellaneous Operational Issues + + Unlike BIND 9, BIND 10 currently does not support automatic resigning of + DNSSEC-signed zone when it's updated via DDNS. It could be possible to + resign the updated zone afterwards or make sure the update request also + updates related DNSSEC records, but that will be pretty error-prone + operation. In general, it's not advisable to allow DDNS for a signed zone + at this moment. + + Also unlike BIND 9, it's currently not possible to "freeze" a zone + temporarily in order to suspend DDNS while you manually update the zone. + If you need to make manual updates to a dynamic zone, you'll need to + temporarily reject any updates to the zone via the update ACLs. + + Dynamic updates are only applicable to primary zones. In order to avoid + updating secondary zones via DDNS requests, b10-ddns refers to the + "secondary_zones" configuration of b10-zonemgr. Zones listed in + "secondary_zones" will never be updated via DDNS regardless of the update + ACL configuration; b10-ddns will return a response with an RCODE of + NOTAUTH as specified in RFC 2136. If you have a "conceptual" secondary + zone whose content is a copy of some external source but is not updated + via the standard zone transfers and therefore not listed in + "secondary_zones", be careful not to allow DDNS for the zone; it would be + quite likely to lead to inconsistent state between different servers. + Normally this should not be a problem because the default update ACL + rejects any update requests, but you may want to take an extra care about + the configuration if you have such type of secondary zones. + + The difference of two versions of a zone, before and after a DDNS + transaction, is automatically recorded in the underlying data source, and + can be retrieved in the form of outbound IXFR. This is done automatically; + it does not require specific configuration to make this possible. + +Chapter 12. Recursive Name Server + + Table of Contents + + 12.1. Access Control + + 12.2. Forwarding The b10-resolver process is started by bind10. @@ -1083,26 +1377,26 @@ Chapter 11. Recursive Name Server > config set Resolver/listen_on[2]/port 53 > config commit - (Replace the “2†as needed; run “config show Resolver/listen_on†if + (Replace the "2" as needed; run "config show Resolver/listen_on" if needed.) -11.1. Access Control +12.1. Access Control By default, the b10-resolver daemon only accepts DNS queries from the localhost (127.0.0.1 and ::1). The Resolver/query_acl configuration may be used to reject, drop, or allow specific IPs or networks. This configuration list is first match. - The configuration's action item may be set to “ACCEPT†to allow the - incoming query, “REJECT†to respond with a DNS REFUSED return code, or - “DROP†to ignore the query without any response (such as a blackhole). For + The configuration's action item may be set to "ACCEPT" to allow the + incoming query, "REJECT" to respond with a DNS REFUSED return code, or + "DROP" to ignore the query without any response (such as a blackhole). For more information, see the respective debugging messages: RESOLVER_QUERY_ACCEPTED, RESOLVER_QUERY_REJECTED, and RESOLVER_QUERY_DROPPED. The required configuration's from item is set to an IPv4 or IPv6 address, addresses with an network mask, or to the special lowercase keywords - “any6†(for any IPv6 address) or “any4†(for any IPv4 address). + "any6" (for any IPv6 address) or "any4" (for any IPv4 address). For example to allow the 192.168.1.0/24 network to use your recursive name server, at the bindctl prompt run: @@ -1112,14 +1406,14 @@ Chapter 11. Recursive Name Server > config set Resolver/query_acl[2]/from "192.168.1.0/24" > config commit - (Replace the “2†as needed; run “config show Resolver/query_acl†if + (Replace the "2" as needed; run "config show Resolver/query_acl" if needed.) Note This prototype access control configuration syntax may be changed. -11.2. Forwarding +12.2. Forwarding To enable forwarding, the upstream address and port must be configured to forward queries to, such as: @@ -1135,17 +1429,17 @@ Chapter 11. Recursive Name Server > config set Resolver/forward_addresses [] > config commit -Chapter 12. DHCPv4 Server +Chapter 13. DHCPv4 Server Table of Contents - 12.1. DHCPv4 Server Usage + 13.1. DHCPv4 Server Usage - 12.2. DHCPv4 Server Configuration + 13.2. DHCPv4 Server Configuration - 12.3. Supported standards + 13.3. Supported standards - 12.4. DHCPv4 Server Limitations + 13.4. DHCPv4 Server Limitations Dynamic Host Configuration Protocol for IPv4 (DHCP or DHCPv4) and Dynamic Host Configuration Protocol for IPv6 (DHCPv6) are protocols that allow one @@ -1155,7 +1449,7 @@ Chapter 12. DHCPv4 Server clients. Even though principles of both DHCPv4 and DHCPv6 are somewhat similar, these are two radically different protocols. BIND10 offers server implementations for both DHCPv4 and DHCPv6. This chapter is about DHCP for - IPv4. For a description of the DHCPv6 server, see Chapter 13, DHCPv6 + IPv4. For a description of the DHCPv6 server, see Chapter 14, DHCPv6 Server. The DHCPv4 server component is currently under intense development. You @@ -1163,7 +1457,7 @@ Chapter 12. DHCPv4 Server developers mailing list. The DHCPv4 and DHCPv6 components in BIND10 architecture are internally - code named “Keaâ€. + code named "Kea". Note @@ -1171,17 +1465,17 @@ Chapter 12. DHCPv4 Server servers. That means that while they are capable of performing DHCP configuration, they are not fully functional yet. In particular, neither has functional lease databases. This means that they will assign the same, - fixed, hardcoded addresses to any client that will ask. See Section 12.4, - “DHCPv4 Server Limitations†and Section 13.4, “DHCPv6 Server Limitations†+ fixed, hardcoded addresses to any client that will ask. See Section 13.4, + "DHCPv4 Server Limitations" and Section 14.4, "DHCPv6 Server Limitations" for detailed description. -12.1. DHCPv4 Server Usage +13.1. DHCPv4 Server Usage BIND10 provides the DHCPv4 server component since December 2011. It is a skeleton server and can be described as an early prototype that is not fully functional yet. It is mature enough to conduct first tests in lab - environment, but it has significant limitations. See Section 12.4, “DHCPv4 - Server Limitations†for details. + environment, but it has significant limitations. See Section 13.4, "DHCPv4 + Server Limitations" for details. The DHCPv4 server is implemented as b10-dhcp4 daemon. As it is not configurable yet, it is fully autonomous, that is it does not interact @@ -1207,7 +1501,7 @@ Chapter 12. DHCPv4 Server started directly, but rather via bind10. Please be aware of this planned change. -12.2. DHCPv4 Server Configuration +13.2. DHCPv4 Server Configuration The DHCPv4 server does not have a lease database implemented yet nor any support for configuration, so every time the same set of configuration @@ -1228,82 +1522,82 @@ Chapter 12. DHCPv4 Server Lease database and configuration support is planned for 2012. -12.3. Supported standards +13.3. Supported standards The following standards and draft standards are currently supported: - o RFC2131: Supported messages are DISCOVER, OFFER, REQUEST, and ACK. - o RFC2132: Supported options are: PAD (0), END(255), Message Type(53), + o RFC2131: Supported messages are DISCOVER, OFFER, REQUEST, and ACK. + o RFC2132: Supported options are: PAD (0), END(255), Message Type(53), DHCP Server Identifier (54), Domain Name (15), DNS Servers (6), IP Address Lease Time (51), Subnet mask (1), and Routers (3). -12.4. DHCPv4 Server Limitations +13.4. DHCPv4 Server Limitations These are the current limitations of the DHCPv4 server software. Most of them are reflections of the early stage of development and should be - treated as “not implemented yetâ€, rather than actual limitations. + treated as "not implemented yet", rather than actual limitations. - o During initial IPv4 node configuration, the server is expected to send + o During initial IPv4 node configuration, the server is expected to send packets to a node that does not have IPv4 address assigned yet. The server requires certain tricks (or hacks) to transmit such packets. This is not implemented yet, therefore DHCPv4 server supports relayed traffic only (that is, normal point to point communication). - o b10-dhcp4 provides a single, fixed, hardcoded lease to any client that + o b10-dhcp4 provides a single, fixed, hardcoded lease to any client that asks. There is no lease manager implemented. If two clients request addresses, they will both get the same fixed address. - o b10-dhcp4 does not support any configuration mechanisms yet. The whole + o b10-dhcp4 does not support any configuration mechanisms yet. The whole configuration is currently hardcoded. The only way to tweak - configuration is to directly modify source code. See see Section 12.2, - “DHCPv4 Server Configuration†for details. - o Upon start, the server will open sockets on all interfaces that are + configuration is to directly modify source code. See see Section 13.2, + "DHCPv4 Server Configuration" for details. + o Upon start, the server will open sockets on all interfaces that are not loopback, are up and running and have IPv4 address. Support for multiple interfaces is not coded in reception routines yet, so if you are running this code on a machine that has many interfaces and b10-dhcp4 happens to listen on wrong interface, the easiest way to work around this problem is to turn down other interfaces. This limitation will be fixed shortly. - o PRL (Parameter Request List, a list of options requested by a client) + o PRL (Parameter Request List, a list of options requested by a client) is currently ignored and server assigns DNS SERVER and DOMAIN NAME options. - o b10-dhcp4 does not support BOOTP. That is a design choice. This + o b10-dhcp4 does not support BOOTP. That is a design choice. This limitation is permanent. If you have legacy nodes that can't use DHCP and require BOOTP support, please use latest version of ISC DHCP http://www.isc.org/software/dhcp. - o Interface detection is currently working on Linux only. See - Section 14.1, “Interface detection†for details. - o b10-dhcp4 does not verify that assigned address is unused. According + o Interface detection is currently working on Linux only. See + Section 15.1, "Interface detection" for details. + o b10-dhcp4 does not verify that assigned address is unused. According to RFC2131, the allocating server should verify that address is no used by sending ICMP echo request. - o Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), + o Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), duplication report (DECLINE) and release (RELEASE) are not supported yet. - o DNS Update is not supported yet. - o -v (verbose) command line option is currently the default, and cannot + o DNS Update is not supported yet. + o -v (verbose) command line option is currently the default, and cannot be disabled. -Chapter 13. DHCPv6 Server +Chapter 14. DHCPv6 Server Table of Contents - 13.1. DHCPv6 Server Usage + 14.1. DHCPv6 Server Usage - 13.2. DHCPv6 Server Configuration + 14.2. DHCPv6 Server Configuration - 13.3. Supported DHCPv6 Standards + 14.3. Supported DHCPv6 Standards - 13.4. DHCPv6 Server Limitations + 14.4. DHCPv6 Server Limitations Dynamic Host Configuration Protocol for IPv6 (DHCPv6) is specified in RFC3315. BIND10 provides DHCPv6 server implementation that is described in this chapter. For a description of the DHCPv4 server implementation, see - Chapter 12, DHCPv4 Server. + Chapter 13, DHCPv4 Server. The DHCPv6 server component is currently under intense development. You may want to check out BIND10 DHCP (Kea) wiki and recent posts on BIND10 developers mailing list. The DHCPv4 and DHCPv6 components in BIND10 architecture are internally - code named “Keaâ€. + code named "Kea". Note @@ -1311,17 +1605,17 @@ Chapter 13. DHCPv6 Server servers. That means that while they are capable of performing DHCP configuration, they are not fully functional yet. In particular, neither has functional lease databases. This means that they will assign the same, - fixed, hardcoded addresses to any client that will ask. See Section 12.4, - “DHCPv4 Server Limitations†and Section 13.4, “DHCPv6 Server Limitations†+ fixed, hardcoded addresses to any client that will ask. See Section 13.4, + "DHCPv4 Server Limitations" and Section 14.4, "DHCPv6 Server Limitations" for detailed description. -13.1. DHCPv6 Server Usage +14.1. DHCPv6 Server Usage BIND10 provides the DHCPv6 server component since September 2011. It is a skeleton server and can be described as an early prototype that is not fully functional yet. It is mature enough to conduct first tests in lab - environment, but it has significant limitations. See Section 13.4, “DHCPv6 - Server Limitations†for details. + environment, but it has significant limitations. See Section 14.4, "DHCPv6 + Server Limitations" for details. The DHCPv6 server is implemented as b10-dhcp6 daemon. As it is not configurable yet, it is fully autonomous, that is it does not interact @@ -1347,7 +1641,7 @@ Chapter 13. DHCPv6 Server started directly, but rather via bind10. Please be aware of this planned change. -13.2. DHCPv6 Server Configuration +14.2. DHCPv6 Server Configuration The DHCPv6 server does not have lease database implemented yet or any support for configuration, so every time the same set of configuration @@ -1367,56 +1661,56 @@ Chapter 13. DHCPv6 Server Lease database and configuration support is planned for 2012. -13.3. Supported DHCPv6 Standards +14.3. Supported DHCPv6 Standards The following standards and draft standards are currently supported: - o RFC3315: Supported messages are SOLICIT, ADVERTISE, REQUEST, and + o RFC3315: Supported messages are SOLICIT, ADVERTISE, REQUEST, and REPLY. Supported options are SERVER_ID, CLIENT_ID, IA_NA, and IAADDRESS. - o RFC3646: Supported option is DNS_SERVERS. + o RFC3646: Supported option is DNS_SERVERS. -13.4. DHCPv6 Server Limitations +14.4. DHCPv6 Server Limitations These are the current limitations of the DHCPv6 server software. Most of them are reflections of the early stage of development and should be - treated as “not implemented yetâ€, rather than actual limitations. + treated as "not implemented yet", rather than actual limitations. - o Relayed traffic is not supported. - o b10-dhcp6 provides a single, fixed, hardcoded lease to any client that + o Relayed traffic is not supported. + o b10-dhcp6 provides a single, fixed, hardcoded lease to any client that asks. There is no lease manager implemented. If two clients request addresses, they will both get the same fixed address. - o b10-dhcp6 does not support any configuration mechanisms yet. The whole + o b10-dhcp6 does not support any configuration mechanisms yet. The whole configuration is currently hardcoded. The only way to tweak - configuration is to directly modify source code. See see Section 13.2, - “DHCPv6 Server Configuration†for details. - o Upon start, the server will open sockets on all interfaces that are + configuration is to directly modify source code. See see Section 14.2, + "DHCPv6 Server Configuration" for details. + o Upon start, the server will open sockets on all interfaces that are not loopback, are up, running and are multicast capable and have IPv6 address. Support for multiple interfaces is not coded in reception routines yet, so if you are running this code on a machine that has many interfaces and b10-dhcp6 happens to listen on wrong interface, the easiest way to work around this problem is to turn down other interfaces. This limitation will be fixed shortly. - o ORO (Option Request Option, a list of options requested by a client) + o ORO (Option Request Option, a list of options requested by a client) is currently ignored and server assigns DNS SERVER option. - o Temporary addresses are not supported yet. - o Prefix delegation is not supported yet. - o Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), + o Temporary addresses are not supported yet. + o Prefix delegation is not supported yet. + o Address renewal (RENEW), rebinding (REBIND), confirmation (CONFIRM), duplication report (DECLINE) and release (RELEASE) are not supported yet. - o DNS Update is not supported yet. - o Interface detection is currently working on Linux only. See - Section 14.1, “Interface detection†for details. - o -v (verbose) command line option is currently the default, and cannot + o DNS Update is not supported yet. + o Interface detection is currently working on Linux only. See + Section 15.1, "Interface detection" for details. + o -v (verbose) command line option is currently the default, and cannot be disabled. -Chapter 14. libdhcp++ library +Chapter 15. libdhcp++ library Table of Contents - 14.1. Interface detection + 15.1. Interface detection - 14.2. DHCPv4/DHCPv6 packet handling + 15.2. DHCPv4/DHCPv6 packet handling libdhcp++ is a common library written in C++ that handles many DHCP-related tasks, like DHCPv4 and DHCPv6 packets parsing, manipulation @@ -1428,7 +1722,7 @@ Chapter 14. libdhcp++ library is designed to be portable, universal library useful for any kind of DHCP-related software. -14.1. Interface detection +15.1. Interface detection Both DHCPv4 and DHCPv6 components share network interface detection routines. Interface detection is currently only supported on Linux @@ -1450,11 +1744,11 @@ Chapter 14. libdhcp++ library # For DHCPv4, please use following format: #eth0 192.0.2.5 -14.2. DHCPv4/DHCPv6 packet handling +15.2. DHCPv4/DHCPv6 packet handling TODO: Describe packet handling here, with pointers to wiki -Chapter 15. Statistics +Chapter 16. Statistics The b10-stats process is started by bind10. It periodically collects statistics data from various modules and aggregates it. @@ -1486,27 +1780,27 @@ Chapter 15. Statistics } -Chapter 16. Logging +Chapter 17. Logging Table of Contents - 16.1. Logging configuration + 17.1. Logging configuration - 16.1.1. Loggers + 17.1.1. Loggers - 16.1.2. Output Options + 17.1.2. Output Options - 16.1.3. Example session + 17.1.3. Example session - 16.2. Logging Message Format + 17.2. Logging Message Format -16.1. Logging configuration +17.1. Logging configuration The logging system in BIND 10 is configured through the Logging module. All BIND 10 modules will look at the configuration in Logging to see what should be logged and to where. - 16.1.1. Loggers + 17.1.1. Loggers Within BIND 10, a message is logged through a component called a "logger". Different parts of BIND 10 log messages through different loggers, and @@ -1519,78 +1813,78 @@ Chapter 16. Logging (the component that is generating the messages), the severity (what to log), and the output_options (where to log). - 16.1.1.1. name (string) + 17.1.1.1. name (string) Each logger in the system has a name, the name being that of the component using it to log messages. For instance, if you want to configure logging - for the resolver module, you add an entry for a logger named “Resolverâ€. + for the resolver module, you add an entry for a logger named "Resolver". This configuration will then be used by the loggers in the Resolver module, and all the libraries used by it. If you want to specify logging for one specific library within the module, you set the name to module.library. For example, the logger used by the - nameserver address store component has the full name of “Resolver.nsasâ€. + nameserver address store component has the full name of "Resolver.nsas". If there is no entry in Logging for a particular library, it will use the configuration given for the module. To illustrate this, suppose you want the cache library to log messages of severity DEBUG, and the rest of the resolver code to log messages of severity INFO. To achieve this you specify two loggers, one with the name - “Resolver†and severity INFO, and one with the name “Resolver.cache†with + "Resolver" and severity INFO, and one with the name "Resolver.cache" with severity DEBUG. As there are no entries for other libraries (e.g. the - nsas), they will use the configuration for the module (“Resolverâ€), so + nsas), they will use the configuration for the module ("Resolver"), so giving the desired behavior. - One special case is that of a module name of “*†(asterisks), which is + One special case is that of a module name of "*" (asterisks), which is interpreted as any module. You can set global logging options by using this, including setting the logging configuration for a library that is - used by multiple modules (e.g. “*.config†specifies the configuration + used by multiple modules (e.g. "*.config" specifies the configuration library code in whatever module is using it). If there are multiple logger specifications in the configuration that might match a particular logger, the specification with the more specific logger name takes precedence. For example, if there are entries for for - both “*†and “Resolverâ€, the resolver module — and all libraries it uses — - will log messages according to the configuration in the second entry - (“Resolverâ€). All other modules will use the configuration of the first - entry (“*â€). If there was also a configuration entry for “Resolver.cacheâ€, + both "*" and "Resolver", the resolver module -- and all libraries it uses + -- will log messages according to the configuration in the second entry + ("Resolver"). All other modules will use the configuration of the first + entry ("*"). If there was also a configuration entry for "Resolver.cache", the cache library within the resolver would use that in preference to the - entry for “Resolverâ€. + entry for "Resolver". One final note about the naming. When specifying the module name within a logger, use the name of the module as specified in bindctl, e.g. - “Resolver†for the resolver module, “Xfrout†for the xfrout module, etc. + "Resolver" for the resolver module, "Xfrout" for the xfrout module, etc. When the message is logged, the message will include the name of the logger generating the message, but with the module name replaced by the name of the process implementing the module (so for example, a message - generated by the “Auth.cache†logger will appear in the output with a - logger name of “b10-auth.cacheâ€). + generated by the "Auth.cache" logger will appear in the output with a + logger name of "b10-auth.cache"). - 16.1.1.2. severity (string) + 17.1.1.2. severity (string) This specifies the category of messages logged. Each message is logged with an associated severity which may be one of the following (in descending order of severity): - o FATAL - o ERROR - o WARN - o INFO - o DEBUG + o FATAL + o ERROR + o WARN + o INFO + o DEBUG When the severity of a logger is set to one of these values, it will only log messages of that severity, and the severities above it. The severity may also be set to NONE, in which case all messages from that logger are inhibited. - 16.1.1.3. output_options (list) + 17.1.1.3. output_options (list) Each logger can have zero or more output_options. These specify where log messages are sent to. These are explained in detail below. The other options for a logger are: - 16.1.1.4. debuglevel (integer) + 17.1.1.4. debuglevel (integer) When a logger's severity is set to DEBUG, this value specifies what debug messages should be printed. It ranges from 0 (least verbose) to 99 (most @@ -1598,80 +1892,80 @@ Chapter 16. Logging If severity for the logger is not DEBUG, this value is ignored. - 16.1.1.5. additive (true or false) + 17.1.1.5. additive (true or false) If this is true, the output_options from the parent will be used. For - example, if there are two loggers configured; “Resolver†and - “Resolver.cacheâ€, and additive is true in the second, it will write the - log messages not only to the destinations specified for “Resolver.cacheâ€, + example, if there are two loggers configured; "Resolver" and + "Resolver.cache", and additive is true in the second, it will write the + log messages not only to the destinations specified for "Resolver.cache", but also to the destinations as specified in the output_options in the - logger named “Resolverâ€. + logger named "Resolver". - 16.1.2. Output Options + 17.1.2. Output Options The main settings for an output option are the destination and a value called output, the meaning of which depends on the destination that is set. - 16.1.2.1. destination (string) + 17.1.2.1. destination (string) The destination is the type of output. It can be one of: - o console - o file - o syslog + o console + o file + o syslog - 16.1.2.2. output (string) + 17.1.2.2. output (string) Depending on what is set as the output destination, this value is interpreted as follows: - destination is “console†+ destination is "console" - The value of output must be one of “stdout†(messages printed to - standard output) or “stderr†(messages printed to standard error). + The value of output must be one of "stdout" (messages printed to + standard output) or "stderr" (messages printed to standard error). - Note: if output is set to “stderr†and a lot of messages are + Note: if output is set to "stderr" and a lot of messages are produced in a short time (e.g. if the logging level is set to DEBUG), you may occasionally see some messages jumbled up together. This is due to a combination of the way that messages are written to the screen and the unbuffered nature of the standard error stream. If this occurs, it is recommended that - output be set to “stdoutâ€. + output be set to "stdout". - destination is “file†+ destination is "file" The value of output is interpreted as a file name; log messages will be appended to this file. - destination is “syslog†+ destination is "syslog" The value of output is interpreted as the syslog facility (e.g. local0) that should be used for log messages. The other options for output_options are: - 16.1.2.2.1. flush (true of false) + 17.1.2.2.1. flush (true of false) Flush buffers after each log message. Doing this will reduce performance but will ensure that if the program terminates abnormally, all messages up to the point of termination are output. - 16.1.2.2.2. maxsize (integer) + 17.1.2.2.2. maxsize (integer) Only relevant when destination is file, this is maximum file size of output files in bytes. When the maximum size is reached, the file is renamed and a new file opened. (For example, a ".1" is appended to the - name — if a ".1" file exists, it is renamed ".2", etc.) + name -- if a ".1" file exists, it is renamed ".2", etc.) If this is 0, no maximum file size is used. - 16.1.2.2.3. maxver (integer) + 17.1.2.2.3. maxver (integer) Maximum number of old log files to keep around when rolling the output - file. Only relevant when destination is “fileâ€. + file. Only relevant when destination is "file". - 16.1.3. Example session + 17.1.3. Example session In this example we want to set the global logging to write to the file /var/log/my_bind10.log, at severity WARN. We want the authoritative server @@ -1770,9 +2064,9 @@ Chapter 16. Logging > config remove Logging/loggers[1] > config commit - And every module will now be using the values from the logger named “*â€. + And every module will now be using the values from the logger named "*". -16.2. Logging Message Format +17.2. Logging Message Format Each message written by BIND 10 to the configured logging destinations comprises a number of components that identify the origin of the message diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml index d51bb0dd11..1bdc0053f5 100644 --- a/doc/guide/bind10-guide.xml +++ b/doc/guide/bind10-guide.xml @@ -87,9 +87,10 @@

    Supported Platforms - BIND 10 builds have been tested on Debian GNU/Linux 5 and unstable, - Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, CentOS - Linux 5.3, and MacOS 10.6. + BIND 10 builds have been tested on (in no particular order) + Debian GNU/Linux 5 and unstable, Ubuntu 9.10, NetBSD 5, + Solaris 10 and 11, FreeBSD 7 and 8, CentOS Linux 5.3, + MacOS 10.6 and 10.7, and OpenBSD 5.1. It has been tested on Sparc, i386, and amd64 hardware platforms. @@ -127,11 +128,13 @@ - The b10-xfrin, b10-xfrout, - and b10-zonemgr components require the - libpython3 library and the Python _sqlite3.so module - (which is included with Python). - The Python module needs to be built for the corresponding Python 3. + The b10-ddns, b10-xfrin, + b10-xfrout, and b10-zonemgr + components require the libpython3 library and the Python + _sqlite3.so module (which is included with Python). + The b10-stats-httpd component uses the + Python pyexpat.so module. + The Python modules need to be built for the corresponding Python 3. @@ -194,6 +197,16 @@ + + + b10-ddns — + Dynamic DNS update service. + This process is used to handle incoming DNS update + requests to allow granted clients to update zones + for which BIND 10 is serving as a primary server. + + + b10-msgq — @@ -876,7 +889,7 @@ as a dependency earlier --> message bus. The special components already know their address, but the usual ones don't. The address is by convention the thing after b10-, with - the first letter capital (eg. b10-stats + the first letter capitalized (eg. b10-stats would have Stats as its address). @@ -1321,9 +1334,10 @@ This may be a temporary setting until then. class to optionally select the class (it defaults to IN); and - zones to define the - file path name and the - origin (default domain). + zones to define + the file path name, + the filetype (e.g., sqlite3), + and the origin (default domain). By default, this is empty. @@ -1333,7 +1347,8 @@ This may be a temporary setting until then. Only the IN class is supported at this time. By default, the memory data source is disabled. Also, currently the zone file must be canonical such as - generated by named-compilezone -D. + generated by named-compilezone -D, or + must be an SQLite3 database. @@ -1350,6 +1365,24 @@ This may be a temporary setting until then. and port number. By default, b10-auth listens on port 53 on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses. + + + The default configuration is currently not appropriate for a multi-homed host. + In case you have multiple public IP addresses, it is possible the + query UDP packet comes through one interface and the answer goes out + through another. The answer will probably be dropped by the client, as it + has a different source address than the one it sent the query to. The + client would fallback on TCP after several attempts, which works + well in this situation, but is clearly not ideal. + + + There are plans to solve the problem such that the server handles + it by itself. But until it is actually implemented, it is recommended to + alter the configuration — remove the wildcard addresses and list all + addresses explicitly. Then the server will answer on the same + interface the request came on, preserving the correct address. + + @@ -1478,6 +1511,39 @@ This may be a temporary setting until then. after it is loaded. +
    + +
    + In-memory Data Source With SQLite3 Backend + + + + The following commands to bindctl + provide an example of configuring an in-memory data + source containing the example.org zone + with a SQLite3 backend file named example.org.sqlite3: + + + + > config add Auth/datasources +> config set Auth/datasources[1]/type "" +> config add Auth/datasources[1]/zones +> config set Auth/datasources[1]/zones[0]/origin "" +> config set Auth/datasources[1]/zones[0]/file "" +> config set Auth/datasources[1]/zones[0]/filetype "" +> config commit + + The authoritative server will begin serving it immediately + after it is loaded. + + +
    + +
    + Reloading an In-memory Data Source + Use the Auth loadzone command in bindctl to reload a changed master @@ -1496,6 +1562,10 @@ This may be a temporary setting until then. --> +
    +
    + Disabling In-memory Data Sources + By default, the memory data source is disabled; it must be configured explicitly. To disable all the in-memory zones, @@ -1628,12 +1698,6 @@ TODO - - In the current development release of BIND 10, incoming zone - transfers are only available for SQLite3-based data sources, - that is, they don't work for an in-memory data source. - -
    Configuration for Incoming Zone Transfers @@ -1753,6 +1817,26 @@ what if a NOTIFY is sent?
    +
    + Incoming Transfers with In-memory Datasource + + + In the case of an incoming zone transfer, the received zone is + first stored in the corresponding BIND 10 datasource. In + case the secondary zone is served by an in-memory datasource + with an SQLite3 backend, b10-auth is + automatically sent a loadzone command to + reload the corresponding zone into memory from the backend. + + + + The administrator doesn't have to do anything for + b10-auth to serve the new version of the + zone, except for the configuration such as the one described in + . + +
    + @@ -1760,7 +1844,6 @@ what if a NOTIFY is sent? Outbound Zone Transfers - The b10-xfrout process is started by bind10. @@ -1836,6 +1919,325 @@ what is XfroutClient xfr_client?? + + Dynamic DNS Update + + + BIND 10 supports the server side of the Dynamic DNS Update + (DDNS) protocol as defined in RFC 2136. + This service is provided by the b10-ddns + component, which is started by the bind10 + process if configured so. + + + + When the b10-auth authoritative DNS server + receives an UPDATE request, it internally forwards the request + to b10-ddns, which handles the rest of + request processing. + When the processing is completed b10-ddns + will send a response to the client with the RCODE set to the + value as specified in RFC 2136 (NOERROR for successful update, + REFUSED if rejected due to ACL check, etc). + If the zone has been changed as a result, it will internally + notify b10-xfrout so that other secondary + servers will be notified via the DNS notify protocol. + In addition, if b10-auth serves the updated + zone from its in-memory cache (as described in + ), + b10-ddns will also + notify b10-auth so that b10-auth + will re-cache the updated zone content. + + + + The b10-ddns component supports requests over + both UDP and TCP, and both IPv6 and IPv4; for TCP requests, + however, it terminates the TCP connection immediately after + each single request has been processed. Clients cannot reuse the + same TCP connection for multiple requests. (This is a current + implementation limitation of b10-ddns. + While RFC 2136 doesn't specify anything about such reuse of TCP + connection, there is no reason for disallowing it as RFC 1035 + generally allows multiple requests sent over a single TCP + connection. BIND 9 supports such reuse.) + + + + As of this writing b10-ddns does not support + update forwarding for secondary zones. + If it receives an update request for a secondary zone, it will + immediately return a response with an RCODE of NOTIMP. + + For feature completeness update forwarding should be + eventually supported. But right now it's considered a lower + priority task and there is no specific plan of implementing + this feature. + + + + +
    + Enabling Dynamic Update + + First off, it must be made sure that a few components on which + b10-ddns depends are configured to run, + which are b10-auth + and b10-zonemgr. + In addition, b10-xfrout should also be + configured to run; otherwise the notification after an update + (see above) will fail with a timeout, suspending the DDNS + service while b10-ddns waits for the + response (see the description of the DDNS_UPDATE_NOTIFY_FAIL + log message for further details). + If BIND 10 is already configured to provide authoritative DNS + service they should normally be configured to run already. + + + + Second, for the obvious reason dynamic update requires that the + underlying data source storing the zone data be writable. + In the current implementation this means the zone must be stored + in an SQLite3-based data source. + Also, right now, the b10-ddns component + configures itself with the data source referring to the + database_file configuration parameter of + b10-auth. + So this information must be configured correctly before starting + b10-ddns. + + + The way to configure data sources is now being revised. + Configuration on the data source for DDNS will be very + likely to be changed in a backward incompatible manner in + a near future version. + + + + + In general, if something goes wrong regarding the dependency + described above, b10-ddns will log the + related event at the warning or error level. + It's advisable to check the log message when you first enable + DDNS or if it doesn't work as you expect to see if there's any + warning or error log message. + + + + Next, to enable the DDNS service, b10-ddns + needs to be explicitly configured to run. + It can be done by using the bindctl + utility. For example: + +> config add Boss/components b10-ddns +> config set Boss/components/b10-ddns/address DDNS +> config set Boss/components/b10-ddns/kind dispensable +> config commit + + + In theory "kind" could be omitted because "dispensable" is its + default. But there's some peculiar behavior (which should + be a bug and should be fixed eventually; see Trac ticket + #2064) with bindctl and you'll still need to specify that explicitly. + Likewise, "address" may look unnecessary because + b10-ddns would start and work without + specifying it. But for it to shutdown gracefully this + parameter should also be specified. + + +
    + +
    + Access Control + + By default b10-ddns rejects any update + requests from any clients by returning a response with an RCODE + of REFUSED. + To allow updates to take effect, an access control rule + (called update ACL) with a policy allowing updates must explicitly be + configured. + Update ACL must be configured per zone basis in the + zones configuration parameter of + b10-ddns. + This is a list of per-zone configurations regarding DDNS. + Each list element consists of the following parameters: + + + origin + + The zone's origin name + + + + class + + The RR class of the zone + (normally IN, and in that case + can be omitted in configuration) + + + + update_acl + + List of access control rules (ACL) for the zone + + + + The syntax of the ACL is the same as ACLs for other + components. + Specific examples are given below. + + + + In general, an update ACL rule that allows an update request + should be configured with a TSIG key. + This is an example update ACL that allows updates to the zone + named example.org of RR class IN + from clients that send requests signed with a TSIG whose + key name is "key.example.org" (and refuses all others): + +> config add DDNS/zones +> config set DDNS/zones[0]/origin example.org +> config set DDNS/zones[0]/class IN +(Note: "class" can be omitted) +> config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "key": "key.example.org"} +> config commit + + The TSIG key must be configured system wide + (see .) + + + + Multiple rules can be specified in the ACL, and an ACL rule + can consist of multiple constraints, such as a combination of + IP address and TSIG. + The following configuration sequence will add a new rule to + the ACL created in the above example. This additional rule + allows update requests sent from a client + using TSIG key name of "key.example" (different from the + key used in the previous example) and has an IPv6 address of ::1. + +> config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "from": "::1", "key": "key.example"} +> config show DDNS/zones[0]/update_acl +DDNS/zones[0]/update_acl[0] {"action": "ACCEPT", "key": "key.example.org"} any (modified) +DDNS/zones[0]/update_acl[1] {"action": "ACCEPT", "from": "::1", "key": "key.example"} any (modified) +> config commit + + (Note the "add" in the first line. Before this sequence, we + have had only entry in zones[0]/update_acl. The "add" command + with a value (rule) adds a new entry and sets it to the given rule. + Due to a limitation of the current implementation, it doesn't + work if you first try to just add a new entry and then set it to + a given rule). + + + + The b10-ddns component accepts an ACL + rule that just allows updates from a specific IP address + (i.e., without requiring TSIG), but this is highly + discouraged (remember that requests can be made over UDP and + spoofing the source address of a UDP packet is often pretty + easy). + Unless you know what you are doing and that you can accept + its consequence, any update ACL rule that allows updates + should have a TSIG key in its constraints. + + + + The ACL rules will be checked in the listed order, and the + first matching one will apply. + If none of the rules matches, the default rule will apply, + which is rejecting any requests in the case of + b10-ddns. + + + + Other actions than "ACCEPT", namely "REJECT" and "DROP", can be + used, too. + See about their effects. + + + + Currently update ACL can only control updates per zone basis; + it's not possible to specify access control with higher + granularity such as for particular domain names or specific + types of RRs. + + + + + Contrary to what RFC 2136 (literally) specifies, + b10-ddns checks the update ACL before + checking the prerequisites of the update request. + This is a deliberate implementation decision. + This counter intuitive specification has been repeatedly + discussed among implementers and in the IETF, and it is now + widely agreed that it does not make sense to strictly follow + that part of RFC. + One known specific bad result of following the RFC is that it + could leak information about which name or record exists or does not + exist in the zone as a result of prerequisite checks even if a + zone is somehow configured to reject normal queries from + arbitrary clients. + There have been other troubles that could have been avoided if + the ACL could be checked before the prerequisite check. + +
    + +
    + Miscellaneous Operational Issues + + Unlike BIND 9, BIND 10 currently does not support automatic + resigning of DNSSEC-signed zone when it's updated via DDNS. + It could be possible to resign the updated zone afterwards + or make sure the update request also updates related DNSSEC + records, but that will be pretty error-prone operation. + In general, it's not advisable to allow DDNS for a signed zone + at this moment. + + + + Also unlike BIND 9, it's currently not possible + to freeze a zone temporarily in order to + suspend DDNS while you manually update the zone. + If you need to make manual updates to a dynamic zone, + you'll need to temporarily reject any updates to the zone via + the update ACLs. + + + + Dynamic updates are only applicable to primary zones. + In order to avoid updating secondary zones via DDNS requests, + b10-ddns refers to the + secondary_zones configuration of + b10-zonemgr. Zones listed in + secondary_zones will never be updated via DDNS + regardless of the update ACL configuration; + b10-ddns will return a response with an + RCODE of NOTAUTH as specified in RFC 2136. + If you have a "conceptual" secondary zone whose content is a + copy of some external source but is not updated via the + standard zone transfers and therefore not listed in + secondary_zones, be careful not to allow DDNS + for the zone; it would be quite likely to lead to inconsistent + state between different servers. + Normally this should not be a problem because the default + update ACL rejects any update requests, but you may want to + take an extra care about the configuration if you have such + type of secondary zones. + + + The difference of two versions of a zone, before and after a + DDNS transaction, is automatically recorded in the underlying + data source, and can be retrieved in the form of outbound + IXFR. + This is done automatically; it does not require specific + configuration to make this possible. + +
    +
    + Recursive Name Server diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html index d3bcb7c710..456aec4f05 100644 --- a/doc/guide/bind10-messages.html +++ b/doc/guide/bind10-messages.html @@ -1,10 +1,10 @@ -BIND 10 Messages Manual

    BIND 10 Messages Manual

    This is the messages manual for BIND 10 version - 20120127.

    Abstract

    BIND 10 is a Domain Name System (DNS) suite managed by +BIND 10 Messages Manual

    BIND 10 Messages Manual

    This is the messages manual for BIND 10 version + 20120405.

    Abstract

    BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers.

    - This is the messages manual for BIND 10 version 20120127. + This is the messages manual for BIND 10 version 20120405. The most up-to-date version of this document, along with other documents for BIND 10, can be found at http://bind10.isc.org/docs. @@ -131,6 +131,19 @@ discovered that the memory data source is disabled for the given class.

    AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1

    This is a debug message reporting that the authoritative server has discovered that the memory data source is enabled for the given class. +

    AUTH_MESSAGE_FORWARD_ERROR failed to forward %1 request from %2: %3

    +The authoritative server tried to forward some type DNS request +message to a separate process (e.g., forwarding dynamic update +requests to b10-ddns) to handle it, but it failed. The authoritative +server returns SERVFAIL to the client on behalf of the separate +process. The error could be configuration mismatch between b10-auth +and the recipient component, or it may be because the requests are +coming too fast and the receipient process cannot keep up with the +rate, or some system level failure. In either case this means the +BIND 10 system is not working as expected, so the administrator should +look into the cause and address the issue. The log message includes +the client's address (and port), and the error message sent from the +lower layer that detects the failure.

    AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY

    This debug message is logged by the authoritative server when it receives a NOTIFY packet that contains zero or more than one question. (A valid @@ -183,6 +196,17 @@ This is a debug message issued when the authoritative server has received a command from the statistics module to send it data. The 'sendstats' command is handled differently to other commands, which is why the debug message associated with it has its own code. +

    AUTH_RESPONSE_FAILURE exception while building response to query: %1

    +This is a debug message, generated by the authoritative server when an +attempt to create a response to a received DNS packet has failed. The +reason for the failure is given in the log message. A SERVFAIL response +is sent back. The most likely cause of this is an error in the data +source implementation; it is either creating bad responses or raising +exceptions itself. +

    AUTH_RESPONSE_FAILURE_UNKNOWN unknown exception while building response to query

    +This debug message is similar to AUTH_RESPONSE_FAILURE, but further +details about the error are unknown, because it was signaled by something +which is not an exception. This is definitely a bug.

    AUTH_RESPONSE_RECEIVED received response message, ignoring

    This is a debug message, this is output if the authoritative server receives a DNS packet with the QR bit set, i.e. a DNS response. The @@ -275,7 +299,7 @@ NOTIFY request will not be honored. The boss process is starting up and will now check if the message bus daemon is already running. If so, it will not be able to start, as it needs a dedicated message bus. -

    BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status

    +

    BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3

    The process terminated, but the bind10 boss didn't expect it to, which means it must have failed.

    BIND10_COMPONENT_RESTART component %1 is about to restart

    @@ -384,6 +408,10 @@ so BIND 10 will now shut down. The specific error is printed. The boss module is sending a SIGKILL signal to the given process.

    BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)

    The boss module is sending a SIGTERM signal to the given process. +

    BIND10_SETGID setting GID to %1

    +The boss switches the process group ID to the given value. This happens +when BIND 10 starts with the -u option, and the group ID will be set to +that of the specified user.

    BIND10_SETUID setting UID to %1

    The boss switches the user it runs as to the given UID.

    BIND10_SHUTDOWN stopping the server

    @@ -585,7 +613,7 @@ same RRset, but from more trusted source, so the old one is kept and new one ignored.

    CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache

    Debug message. The RRset is updating its data with this given RRset. -

    CC_ASYNC_READ_FAILED asynchronous read failed

    +

    CC_ASYNC_READ_FAILED asynchronous read failed (error code = %1)

    This marks a low level error, we tried to read data from the message queue daemon asynchronously, but the ASIO library returned an error.

    CC_CONN_ERROR error connecting to message queue (%1)

    @@ -660,6 +688,11 @@ all messages must contain at least the envelope. An older version of the configuration database has been found, from which there was an automatic upgrade path to the current version. These changes are now applied, and no action from the administrator is necessary. +

    CFGMGR_BACKED_UP_CONFIG_FILE Config file %1 was removed; a backup was made at %2

    +BIND 10 has been started with the command to clear the configuration +file. The existing file has been backed up (moved) to the given file +name. A new configuration file will be created in the original location +when necessary.

    CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2

    The configuration manager sent a configuration update to a module, but the module responded with an answer that could not be parsed. The answer @@ -669,6 +702,9 @@ assumed to have failed, and will not be stored.

    CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1

    The configuration manager daemon was unable to connect to the messaging system. The most likely cause is that msgq is not running. +

    CFGMGR_CONFIG_FILE Configuration manager starting with configuration file: %1

    +The configuration manager is starting, reading and saving the configuration +settings to the shown file.

    CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1

    There was a problem reading the persistent configuration data as stored on disk. The file may be corrupted, or it is of a version from where @@ -870,6 +906,33 @@ means no limit. The datasource tried to provide an NSEC proof that the named domain does not exist, but the database backend doesn't support DNSSEC. No proof is included in the answer as a result. +

    DATASRC_DATABASE_FINDNSEC3 Looking for NSEC3 for %1 in %2 mode

    +Debug information. A search in an database data source for NSEC3 that +matches or covers the given name is being started. +

    DATASRC_DATABASE_FINDNSEC3_COVER found a covering NSEC3 for %1 at label count %2: %3

    +Debug information. An NSEC3 that covers the given name is found and +being returned. The found NSEC3 RRset is also displayed. When the shown label +count is smaller than that of the given name, the matching NSEC3 is for a +superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH). The +found NSEC3 RRset is also displayed. +

    DATASRC_DATABASE_FINDNSEC3_MATCH found a matching NSEC3 for %1 at label count %2: %3

    +Debug information. An NSEC3 that matches (a possibly superdomain of) +the given name is found and being returned. When the shown label +count is smaller than that of the given name, the matching NSEC3 is +for a superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH). +The found NSEC3 RRset is also displayed. +

    DATASRC_DATABASE_FINDNSEC3_TRYHASH looking for NSEC3 for %1 at label count %2 (hash %3)

    +Debug information. In an attempt of finding an NSEC3 for the give name, +(a possibly superdomain of) the name is hashed and searched for in the +NSEC3 name space. When the shown label count is smaller than that of the +shown name, the search tries the superdomain name that share the shown +(higher) label count of the shown name (e.g., for +www.example.com. with shown label count of 3, example.com. is being +tried, as "." is 1 label long). +

    DATASRC_DATABASE_FINDNSEC3_TRYHASH_PREV looking for previous NSEC3 for %1 at label count %2 (hash %3)

    +Debug information. An exact match on hash (see +DATASRC_DATABASE_FINDNSEC3_TRYHASH) was unsuccessful. We get the previous hash +to that one instead.

    DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4

    Debug information. The database data source is looking up records with the given name and type in the database. @@ -912,7 +975,7 @@ name and class, but not for the given type. A search in the database for RRs for the specified name, type and class has located RRs that match the name and class but not the type. DNSSEC information has been requested and returned. -

    DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5

    +

    DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2

    The data returned by the database backend contained data for the given domain name, and it either matches the type or has a relevant type. The RRset that is returned is printed. @@ -925,10 +988,12 @@ While iterating through the zone, the program reached end of the data. While iterating through the zone, the program extracted next RRset from it. The name and RRtype of the RRset is indicated in the message.

    DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4

    -While iterating through the zone, the time to live for RRs of the given RRset -were found to be different. This isn't allowed on the wire and is considered -an error, so we set it to the lowest value we found (but we don't modify the -database). The data in database should be checked and fixed. +While iterating through the zone, the time to live for RRs of the +given RRset were found to be different. Since an RRset cannot have +multiple TTLs, we set it to the lowest value we found (but we don't +modify the database). This is what the client would do when such RRs +were given in a DNS response according to RFC2181. The data in +database should be checked and fixed.

    DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5

    This is a debug message indicating that the program (successfully) reaches the end of sequences of a zone's differences. The zone's name @@ -1009,7 +1074,7 @@ The given wildcard matches the name being sough but it as an empty nonterminal (e.g. there's nothing at *.example.com but something like subdomain.*.example.org, do exist: so *.example.org exists in the namespace but has no RRs assopciated with it). This will produce NXRRSET. -

    DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6

    +

    DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %2 with RRset %3

    The database doesn't contain directly matching name. When searching for a wildcard match, a wildcard record matching the name and type of the query was found. The data at this point is returned. @@ -1259,8 +1324,10 @@ not have any DS record. This indicates problem with the provided data. An attempt to add a NSEC3 record into the message failed, because the zone does not have any DS record. This indicates problem with the provided data.

    DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'

    -Lookup of domain failed because the data have no zone that contain the -domain. Maybe someone sent a query to the wrong server for some reason. +Debug information. Lookup of domain failed because the datasource +has no zone that contains the domain. Maybe someone sent a query +to the wrong server for some reason. This may also happen when +looking in the datasource for addresses for NS records.

    DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class

    Debug information. A sure query is being processed now.

    DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'

    @@ -1307,6 +1374,16 @@ While processing a wildcard, a referral was met. But it wasn't possible to get enough information for it. The code is 1 for error, 2 for not implemented.

    DATASRC_SQLITE_CLOSE closing SQLite database

    Debug information. The SQLite data source is closing the database file. +

    DATASRC_SQLITE_COMPATIBLE_VERSION database schema V%1.%2 not up to date (expecting V%3.%4) but is compatible

    +The version of the SQLite3 database schema used to hold the zone data +is not the latest one - the current version of BIND 10 was written +with a later schema version in mind. However, the database is +compatible with the current version of BIND 10, and BIND 10 will run +without any problems. +

    +Consult the release notes for your version of BIND 10. Depending on +the changes made to the database schema, it is possible that improved +performance could result if the database were upgraded.

    DATASRC_SQLITE_CONNCLOSE Closing sqlite database

    The database file is no longer needed and is being closed.

    DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'

    @@ -1356,6 +1433,13 @@ source.

    DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'

    The SQLite data source was asked to provide a NSEC3 record for given zone. But it doesn't contain that zone. +

    DATASRC_SQLITE_INCOMPATIBLE_VERSION database schema V%1.%2 incompatible with version (V%3.%4) expected

    +The version of the SQLite3 database schema used to hold the zone data +is incompatible with the version expected by BIND 10. As a result, +BIND 10 is unable to run using the database file as the data source. +

    +The database should be updated using the means described in the BIND +10 documentation.

    DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized

    A wrapper object to hold database connection is being initialized.

    DATASRC_SQLITE_OPEN opening SQLite database '%1'

    @@ -1389,11 +1473,85 @@ data source.

    DATASRC_UNEXPECTED_QUERY_STATE unexpected query state

    This indicates a programming error. An internal task of unknown type was generated. +

    DBUTIL_BACKUP created backup of %1 in %2

    +A backup for the given database file was created. Same of original file and +backup are given in the output message. +

    DBUTIL_CHECK_ERROR unable to check database version: %1

    +There was an error while trying to check the current version of the database +schema. The error is shown in the message. +

    DBUTIL_CHECK_NOCONFIRM --noconfirm is not compatible with --check

    +b10-dbutil was called with --check and --noconfirm. --noconfirm only has +meaning with --upgrade, so this is considered an error. +

    DBUTIL_CHECK_OK this is the latest version of the database schema. No upgrade is required

    +The database schema version has been checked, and is up to date. +No action is required. +

    DBUTIL_CHECK_UPGRADE_NEEDED re-run this program with the --upgrade switch to upgrade

    +The database schema version is not up to date, and an update is required. +Please run the dbutil tool again, with the --upgrade argument. +

    DBUTIL_COMMAND_NONE must select one of --check or --upgrade

    +b10-dbutil was called with neither --check nor --upgrade. One action must be +provided. +

    DBUTIL_COMMAND_UPGRADE_CHECK --upgrade is not compatible with --check

    +b10-dbutil was called with both the commands --upgrade and --check. Only one +action can be performed at a time. +

    DBUTIL_DATABASE_MAY_BE_CORRUPT database file %1 may be corrupt, restore it from backup (%2)

    +The upgrade failed while it was in progress; the database may now be in an +inconsistent state, and it is advised to restore it from the backup that was +created when b10-dbutil started. +

    DBUTIL_EXECUTE Executing SQL statement: %1

    +Debug message; the given SQL statement is executed +

    DBUTIL_FILE Database file: %1

    +The database file that is being checked. +

    DBUTIL_NO_FILE must supply name of the database file to upgrade

    +b10-dbutil was called without a database file. Currently, it cannot find this +file on its own, and it must be provided. +

    DBUTIL_STATEMENT_ERROR failed to execute %1: %2

    +The given database statement failed to execute. The error is shown in the +message. +

    DBUTIL_TOO_MANY_ARGUMENTS too many arguments to the command, maximum of one expected

    +There were too many command-line arguments to b10-dbutil +

    DBUTIL_UPGRADE_CANCELED upgrade canceled; database has not been changed

    +The user aborted the upgrade, and b10-dbutil will now exit. +

    DBUTIL_UPGRADE_DBUTIL please get the latest version of b10-dbutil and re-run

    +A database schema was found that was newer than this version of dbutil, which +is apparently out of date and should be upgraded itself. +

    DBUTIL_UPGRADE_FAILED upgrade failed: %1

    +While the upgrade was in progress, an unexpected error occurred. The error +is shown in the message. +

    DBUTIL_UPGRADE_NOT_ATTEMPTED database upgrade was not attempted

    +Due to the earlier failure, the database schema upgrade was not attempted, +and b10-dbutil will now exit. +

    DBUTIL_UPGRADE_NOT_NEEDED database already at latest version, no upgrade necessary

    +b10-dbutil was told to upgrade the database schema, but it is already at the +latest version. +

    DBUTIL_UPGRADE_NOT_POSSIBLE database at a later version than this utility can support

    +b10-dbutil was told to upgrade the database schema, but it is at a higher +version than this tool currently supports. Please update b10-dbutil and try +again. +

    DBUTIL_UPGRADE_PREPARATION_FAILED upgrade preparation failed: %1

    +An unexpected error occurred while b10-dbutil was preparing to upgrade the +database schema. The error is shown in the message +

    DBUTIL_UPGRADE_SUCCESFUL database upgrade successfully completed

    +The database schema update was completed successfully. +

    DBUTIL_UPGRADING upgrading database from %1 to %2

    +An upgrade is in progress, the versions of the current upgrade action are shown. +

    DBUTIL_VERSION_CURRENT database version %1

    +The current version of the database schema. +

    DBUTIL_VERSION_HIGH database is at a later version (%1) than this program can cope with (%2)

    +The database schema is at a higher version than b10-dbutil knows about. +

    DBUTIL_VERSION_LOW database version %1, latest version is %2.

    +The database schema is not up to date, the current version and the latest +version are in the message.

    DDNS_ACCEPT_FAILURE error accepting a connection: %1

    There was a low-level error when we tried to accept an incoming connection (probably coming from b10-auth). We continue serving on whatever other connections we already have, but this connection is dropped. The reason is logged. +

    DDNS_AUTH_DBFILE_UPDATE updated auth DB file to %1

    +b10-ddns was notified of updates to the SQLite3 DB file that b10-auth +uses for the underlying data source and on which b10-ddns needs to +make updates. b10-ddns then updated its internal setup so further +updates would be made on the new DB.

    DDNS_CC_SESSION_ERROR error reading from cc channel: %1

    There was a problem reading from the command and control channel. The most likely cause is that the msgq process is not running. @@ -1404,12 +1562,41 @@ configuration manager b10-cfgmgr is not running.

    DDNS_CONFIG_ERROR error found in configuration data: %1

    The ddns process encountered an error when installing the configuration at startup time. Details of the error are included in the log message. +

    DDNS_CONFIG_HANDLER_ERROR failed to update ddns configuration: %1

    +An update to b10-ddns configuration was delivered but an error was +found while applying them. None of the delivered updates were applied +to the running b10-ddns system, and the server will keep running with +the existing configuration. If this happened in the initial +configuration setup, the server will be running with the default +configurations.

    DDNS_DROP_CONN dropping connection on file descriptor %1 because of error %2

    There was an error on a connection with the b10-auth server (or whatever connects to the ddns daemon). This might be OK, for example when the authoritative server shuts down, the connection would get closed. It also can mean the system is busy and can't keep up or that the other side got confused and sent bad data. +

    DDNS_GET_REMOTE_CONFIG_FAIL failed to get %1 module configuration %2 times: %3

    +b10-ddns tried to get configuration of some remote modules for its +operation, but it failed. The most likely cause of this is that the +remote module has not fully started up and b10-ddns couldn't get the +configuration in a timely fashion. b10-ddns attempts to retry it a +few times, imposing a short delay, hoping it eventually succeeds if +it's just a timing issue. The number of total failed attempts is also +logged. If it reaches an internal threshold b10-ddns considers it a +fatal error and terminates. Even in that case, if b10-ddns is +configured as a "dispensable" component (which is the default), the +parent bind10 process will restart it, and there will be another +chance of getting the remote configuration successfully. These are +not the optimal behavior, but it's believed to be sufficient in +practice (there would normally be no failure in the first place). If +it really causes an operational trouble other than having a few of +these log messages, please submit a bug report; there can be several +ways to make it more sophisticated. Another, less likely reason for +having this error is because the remote modules are not actually +configured to run. If that's the case fixing the configuration should +solve the problem - either by making sure the remote module will run +or by not running b10-ddns (without these remote modules b10-ddns is +not functional, so there's no point in running it in this case).

    DDNS_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1

    There was a problem in the lower level module handling configuration and control commands. This could happen for various reasons, but the most likely @@ -1421,12 +1608,80 @@ Debug message. We received a connection and we are going to start handling requests from it. The file descriptor number and the address where the request comes from is logged. The connection is over a unix domain socket and is likely coming from a b10-auth process. +

    DDNS_RECEIVED_AUTH_UPDATE received configuration updates from auth server

    +b10-ddns is notified of updates to b10-auth configuration +(including a report of the initial configuration) that b10-ddns might +be interested in.

    DDNS_RECEIVED_SHUTDOWN_COMMAND shutdown command received

    The ddns process received a shutdown command from the command channel and will now shut down. -

    DDNS_RUNNING ddns server is running and listening for updates

    -The ddns process has successfully started and is now ready to receive commands -and updates. +

    DDNS_RECEIVED_ZONEMGR_UPDATE received configuration updates from zonemgr

    +b10-ddns is notified of updates to b10-zonemgr's configuration +(including a report of the initial configuration). It may possibly +contain changes to the secondary zones, in which case b10-ddns will +update its internal copy of that configuration. +

    DDNS_REQUEST_PARSE_FAIL failed to parse update request: %1

    +b10-ddns received an update request via b10-auth, but the received +data failed to pass minimum validation: it was either broken wire +format data for a valid DNS message (e.g. it's shorter than the +fixed-length header), or the opcode is not update, or TSIG is included +in the request but it fails to validate. Since b10-auth should have +performed this level of checks, such an error shouldn't be detected at +this stage and should rather be considered an internal bug. This +event is therefore logged at the error level, and the request is +simply dropped. Additional information of the error is also logged. +

    DDNS_REQUEST_TCP_QUOTA reject TCP update client %1 (%2 running)

    +b10-ddns received a new update request from a client over TCP, but +the number of TCP clients being handled by the server already reached +the configured quota, so the latest client was rejected by closing +the connection. The administrator may want to check the status of +b10-ddns, and if this happens even if the server is not very busy, +the quota may have to be increased. Or, if it's more likely to be +malicious or simply bogus clients that somehow keep the TCP connection +open for a long period, maybe they should be rejected with an +appropriate ACL configuration or some lower layer filtering. The +number of existing TCP clients are shown in the log, which should be +identical to the current quota. +

    DDNS_RESPONSE_SOCKET_ERROR failed to send update response to %1: %2

    +Network I/O error happens in sending an update response. The +client's address that caused the error and error details are also +logged. +

    DDNS_RESPONSE_TCP_SOCKET_ERROR failed to complete sending update response to %1 over TCP

    +b10-ddns had tried to send an update response over TCP, and it hadn't +been completed at that time, and a followup attempt to complete the +send operation failed due to some network I/O error. While a network +error can happen any time, this event is quite unexpected for two +reasons. First, since the size of a response to an update request +should be generally small, it's unlikely that the initial attempt +didn't fail but wasn't completed. Second, since the first attempt +succeeded and the TCP connection had been established in the first +place, it's more likely for the subsequent attempt to succeed. In any +case, there may not be able to do anything to fix it at the server +side, but the administrator may want to check the general reachability +with the client address. +

    DDNS_SECONDARY_ZONES_UPDATE updated secondary zone list (%1 zones are listed)

    +b10-ddns has successfully updated the internal copy of secondary zones +obtained from b10-zonemgr, based on a latest update to zonemgr's +configuration. The number of newly configured (unique) secondary +zones is logged. +

    DDNS_SECONDARY_ZONES_UPDATE_FAIL failed to update secondary zone list: %1

    +An error message. b10-ddns was notified of updates to a list of +secondary zones from b10-zonemgr and tried to update its own internal +copy of the list, but it failed. This can happen if the configuration +contains an error, and b10-zonemgr should also reject that update. +Unfortunately, in the current implementation there is no way to ensure +that both zonemgr and ddns have consistent information when an update +contains an error; further, as of this writing zonemgr has a bug that +it could partially update the list of secondary zones if part of the +list has an error (see Trac ticket #2038). b10-ddns still keeps +running with the previous configuration, but it's strongly advisable +to check log messages from zonemgr, and if it indicates there can be +inconsistent state, it's better to restart the entire BIND 10 system +(just restarting b10-ddns wouldn't be enough, because zonemgr can have +partially updated configuration due to bug #2038). The log message +contains an error description, but it's intentionally kept simple as +it's primarily a matter of zonemgr. To know the details of the error, +log messages of zonemgr should be consulted.

    DDNS_SESSION session arrived on file descriptor %1

    A debug message, informing there's some activity on the given file descriptor. It will be either a request or the file descriptor will be closed. See @@ -1435,6 +1690,9 @@ following log messages to see what of it. The ddns process is shutting down. It will no longer listen for new commands or updates. Any command or update that is being addressed at this moment will be completed, after which the process will exit. +

    DDNS_STARTED ddns server is running and listening for updates

    +The ddns process has successfully started and is now ready to receive commands +and updates.

    DDNS_STOPPED ddns server has stopped

    The ddns process has successfully stopped and is no longer listening for or handling commands or updates, and will now exit. @@ -1445,6 +1703,212 @@ process will now shut down. The b10-ddns process encountered an uncaught exception and will now shut down. This is indicative of a programming error and should not happen under normal circumstances. The exception type and message are printed. +

    DDNS_UPDATE_NOTIFY notified %1 of updates to %2

    +Debug message. b10-ddns has made updates to a zone based on an update +request and has successfully notified an external module of the updates. +The notified module will use that information for updating its own +state or any necessary protocol action such as zone reloading or sending +notify messages to secondary servers. +

    DDNS_UPDATE_NOTIFY_FAIL failed to notify %1 of updates to %2: %3

    +b10-ddns has made updates to a zone based on an update request and +tried to notify an external component of the updates, but the +notification fails. One possible cause of this is that the external +component is not really running and it times out in waiting for the +response, although it will be less likely to happen in practice +because these components will normally be configured to run when the +server provides the authoritative DNS service; ddns is rather optional +among them. If this happens, however, it will suspend b10-ddns for a +few seconds during which it cannot handle new requests (some may be +delayed, some may be dropped, depending on the volume of the incoming +requests). This is obviously bad, and if this error happens due to +this reason, the administrator should make sure the component in +question should be configured to run. For a longer term, b10-ddns +should be more robust about this case such as by making this +notification asynchronously and/or detecting the existence of the +external components to avoid hopeless notification in the first place. +Severity of this error for the receiving components depends on the +type of the component. If it's b10-xfrout, this means DNS notify +messages won't be sent to secondary servers of the zone. It's +suboptimal, but not necessarily critical as the secondary servers will +try to check the zone's status periodically. If it's b10-auth and the +notification was needed to have it reload the corresponding zone, it's +more serious because b10-auth won't be able to serve the new version +of the zone unless some explicit recovery action is taken. So the +administrator needs to examine this message and takes an appropriate +action. In either case, this notification is generally expected to +succeed; so the fact it fails itself means there's something wrong in +the BIND 10 system, and it would be advisable to check other log +messages. +

    LIBDDNS_DATASRC_ERROR update client %1 failed due to data source error: %2

    +An update attempt failed due to some error in the corresponding data +source. This is generally an unexpected event, but can still happen +for various reasons such as DB lock contention or a failure of the +backend DB server. The cause of the error is also logged. It's +advisable to check the message, and, if necessary, take an appropriate +action (e.g., restarting the DB server if it dies). If this message +is logged the data source isn't modified due to the +corresponding update request. When used by the b10-ddns, the server +will return a response with an RCODE of SERVFAIL. +

    LIBDDNS_PREREQ_FORMERR update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL.

    +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it has a non-zero TTL value. +A FORMERR error response is sent to the client. +

    LIBDDNS_PREREQ_FORMERR_ANY update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found.

    +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. +

    LIBDDNS_PREREQ_FORMERR_CLASS update client %1 for zone %2: Format error in prerequisite (%3). Bad class.

    +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, the class of the +prerequisite should either match the class of the zone in the Zone Section, +or it should be ANY or NONE, and it is not. A FORMERR error response is sent +to the client. +

    LIBDDNS_PREREQ_FORMERR_NONE update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found.

    +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. +

    LIBDDNS_PREREQ_NAME_IN_USE_FAILED update client %1 for zone %2: 'Name is in use' prerequisite not satisfied (%3), rcode: %4

    +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is in use'. From RFC2136: +Name is in use. At least one RR with a specified NAME (in +the zone and class specified by the Zone Section) must exist. +Note that this prerequisite is NOT satisfied by empty +nonterminals. +

    LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED update client %1 for zone %2: 'Name is not in use' (%3) prerequisite not satisfied, rcode: %4

    +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is not in use'. +From RFC2136: +Name is not in use. No RR of any type is owned by a +specified NAME. Note that this prerequisite IS satisfied by +empty nonterminals. +

    LIBDDNS_PREREQ_NOTZONE update client %1 for zone %2: prerequisite not in zone (%3)

    +A DDNS UPDATE prerequisite has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific prerequisite is shown. A NOTZONE error response is sent to +the client. +

    LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED update client %1 for zone %2: 'RRset does not exist' (%3) prerequisite not satisfied, rcode: %4

    +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset does not exist'. +From RFC2136: +RRset does not exist. No RRs with a specified NAME and TYPE +(in the zone and class denoted by the Zone Section) can exist. +

    LIBDDNS_PREREQ_RRSET_EXISTS_FAILED update client %1 for zone %2: 'RRset exists (value independent)' (%3) prerequisite not satisfied, rcode: %4

    +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value independent)'. +From RFC2136: +RRset exists (value dependent). A set of RRs with a +specified NAME and TYPE exists and has the same members +with the same RDATAs as the RRset specified here in this +Section. +

    LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED update client %1 for zone %2: 'RRset exists (value dependent)' (%3) prerequisite not satisfied, rcode: %4

    +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value dependent)'. +From RFC2136: +RRset exists (value independent). At least one RR with a +specified NAME and TYPE (in the zone and class specified by +the Zone Section) must exist. +

    LIBDDNS_UPDATE_ADD_BAD_TYPE update client %1 for zone %2: update addition RR bad type: %3

    +The Update section of a DDNS update message contains a statement +that tries to add a record of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_APPROVED update client %1 for zone %2 approved

    +Debug message. An update request was approved in terms of the zone's +update ACL. +

    LIBDDNS_UPDATE_BAD_CLASS update client %1 for zone %2: bad class in update RR: %3

    +The Update section of a DDNS update message contains an RRset with +a bad class. The class of the update RRset must be either the same +as the class in the Zone Section, ANY, or NONE. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1

    +An error occured while committing the DDNS update changes to the +datasource. The specific error is printed. A SERVFAIL response is sent +back to the client. +

    LIBDDNS_UPDATE_DELETE_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3

    +The Update section of a DDNS update message contains a statement +that tries to delete an rrset of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DELETE_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3

    +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY update client %1 for zone %2: update deletion RR contains data %3

    +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-empty RRset. This is not allowed by the protocol. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3

    +The Update section of a DDNS update message contains a statement +that tries to delete one or more rrs of an invalid type. Most +likely the records have an RRType that is considered a 'meta' +type, which cannot be zone content data. The specific record is +shown. A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3

    +The Update section of a DDNS update message contains a 'delete rrs' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. +

    LIBDDNS_UPDATE_DENIED update client %1 for zone %2 denied

    +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will respond to the request with an RCODE of +REFUSED as described in Section 3.3 of RFC2136. +

    LIBDDNS_UPDATE_DROPPED update client %1 for zone %2 dropped

    +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will then completely ignore the request; no +response will be sent. +

    LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3

    +Debug message. An error is found in processing a dynamic update +request. This log message is used for general errors that are not +normally expected to happen. So, in general, it would mean some +problem in the client implementation or an interoperability issue +with this implementation. The client's address, the zone name and +class, and description of the error are logged. +

    LIBDDNS_UPDATE_FORWARD_FAIL update client %1 for zone %2: update forwarding not supported

    +Debug message. An update request is sent to a secondary server. This +is not necessarily invalid, but this implementation does not yet +support update forwarding as specified in Section 6 of RFC2136 and it +will simply return a response with an RCODE of NOTIMP to the client. +The client's address and the zone name/class are logged. +

    LIBDDNS_UPDATE_NOTAUTH update client %1 for zone %2: not authoritative for update zone

    +Debug message. An update request was received for a zone for which +the receiving server doesn't have authority. In theory this is an +unexpected event, but there are client implementations that could send +update requests carelessly, so it may not necessarily be so uncommon +in practice. If possible, you may want to check the implementation or +configuration of those clients to suppress the requests. As specified +in Section 3.1 of RFC2136, the receiving server will return a response +with an RCODE of NOTAUTH. +

    LIBDDNS_UPDATE_NOTZONE update client %1 for zone %2: update RR out of zone %3

    +A DDNS UPDATE record has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific update record is shown. A NOTZONE error response is +sent to the client. +

    LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update client %1 for zone %2: result code %3

    +The handling of the prerequisite section (RFC2136 Section 3.2) found +that one of the prerequisites was not satisfied. The result code +should give more information on what prerequisite type failed. +If the result code is FORMERR, the prerequisite section was not well-formed. +An error response with the given result code is sent back to the client. +

    LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION update client %1 for zone %2: uncaught exception while processing update section: %3

    +An uncaught exception was encountered while processing the Update +section of a DDNS message. The specific exception is shown in the log message. +To make sure DDNS service is not interrupted, this problem is caught instead +of reraised; The update is aborted, and a SERVFAIL is sent back to the client. +This is most probably a bug in the DDNS code, but *could* be caused by +the data source.

    LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4/%5. Adjusting %2 -> %1.

    The xfrin module received an update containing multiple rdata changes for the same RRset. But the TTLs of these don't match each other. As we combine them @@ -1502,6 +1966,8 @@ the reason given. An invalid message identification (ID) has been found during the read of a message file. Message IDs should comprise only alphanumeric characters and the underscore, and should not start with a digit. +

    LOG_LOCK_TEST_MESSAGE this is a test message.

    +This is a log message used in testing.

    LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments

    The $NAMESPACE directive in a message file takes a single argument, a namespace in which all the generated symbol names are placed. This error @@ -1703,6 +2169,30 @@ an answer with a different given type and class.

    This message indicates an internal error in the NSAS. Please raise a bug report. +

    PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1

    +There was an invalid name when parsing Auth configuration. +

    PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1

    +There was an invalid RR class when parsing Auth configuration. +

    PYSERVER_COMMON_DNS_TCP_SEND_DONE completed sending TCP message to %1 (%2 bytes in total)

    +Debug message. A complete DNS message has been successfully +transmitted over a TCP connection, possibly after multiple send +operations. The destination address and the total size of the message +(including the 2-byte length field) are shown in the log message. +

    PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4

    +A DNS message has been attempted to be sent out over a TCP connection, +but it failed due to some network error. Although it's not expected +to happen too often, it can still happen for various reasons. The +administrator may want to examine the cause of the failure, which is +included in the log message, to see if it requires some action to +be taken at the server side. When this message is logged, the +corresponding TCP connection was closed immediately after the error +was detected. +

    PYSERVER_COMMON_DNS_TCP_SEND_PENDING sent part TCP message to %1 (up to %2/%3 bytes)

    +Debug message. A part of DNS message has been transmitted over a TCP +connection, and it's suspended because further attempt would block. +The destination address and the total size of the message that has +been transmitted so far (including the 2-byte length field) are shown +in the log message.

    PYSERVER_COMMON_TSIG_KEYRING_DEINIT Deinitializing global TSIG keyring

    A debug message noting that the global TSIG keyring is being removed from memory. Most programs don't do that, they just exit, which is OK. @@ -2064,7 +2554,7 @@ resolver. It is output during startup and may appear multiple times, once for each root server address.

    RESOLVER_SHUTDOWN resolver shutdown complete

    This informational message is output when the resolver has shut down. -

    RESOLVER_SHUTDOWN (1) asked to shut down, doing so

    +

    RESOLVER_SHUTDOWN_RECEIVED received command to shut down

    A debug message noting that the server was asked to terminate and is complying to the request.

    RESOLVER_STARTED resolver started

    @@ -2270,6 +2760,10 @@ is unknown in the implementation. The most likely cause is an installation problem, where the specification file stats.spec is from a different version of BIND 10 than the stats module itself. Please check your installation. +

    XFRIN_AUTH_LOADZONE sending Auth loadzone for origin=%1, class=%2, datasrc=%3

    +There was a successful zone transfer, and the zone is served by b10-auth +in the in-memory data source using sqlite3 as a backend. We send the +"loadzone" command for the zone to b10-auth.

    XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received

    The serial fields of the first and last SOAs of AXFR (including AXFR-style IXFR) are not the same. According to RFC 5936 these two SOAs must be the @@ -2317,6 +2811,30 @@ is not equal to the requested SOA serial.

    XFRIN_IMPORT_DNS error importing python DNS module: %1

    There was an error importing the python DNS module pydnspp. The most likely cause is a PYTHONPATH problem. +

    XFRIN_IXFR_TRANSFER_SUCCESS incremental IXFR transfer of zone %1 succeeded (messages: %2, changesets: %3, deletions: %4, additions: %5, bytes: %6, run time: %7 seconds, %8 bytes/second)

    +The IXFR transfer for the given zone was successful. +The provided information contains the following values: +

    +messages: Number of overhead DNS messages in the transfer. +

    +changesets: Number of difference sequences. +

    +deletions: Number of Resource Records deleted by all the changesets combined, +including the SOA records. +

    +additions: Number of Resource Records added by all the changesets combined, +including the SOA records. +

    +bytes: Full size of the transfer data on the wire. +

    +run time: Time (in seconds) the complete ixfr took. +

    +bytes/second: Transfer speed. +

    +Note that there is no cross-checking of additions and deletions; if the same +RR gets added and deleted in multiple changesets, it is counted each time; +therefore, for each changeset, there should at least be 1 deletion and 1 +addition (the updated SOA record).

    XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating

    The first SOA record in an IXFR response indicates the zone's serial at the primary server is not newer than the client's. This is @@ -2330,6 +2848,9 @@ aborts the transfer just like a successful case. There was a problem sending a message to the xfrout module or the zone manager. This most likely means that the msgq daemon has quit or was killed. +

    XFRIN_MSGQ_SEND_ERROR_AUTH error while contacting %1

    +There was a problem sending a message to b10-auth. This most likely +means that the msgq daemon has quit or was killed.

    XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1

    There was a problem sending a message to the zone manager. This most likely means that the msgq daemon has quit or was killed. @@ -2343,11 +2864,26 @@ There was an internal command to retransfer the given zone, but the zone is not known to the system. This may indicate that the configuration for xfrin is incomplete, or there was a typographical error in the zone name in the configuration. -

    XFRIN_STARTING starting resolver with command line '%1'

    -An informational message, this is output when the resolver starts up. +

    XFRIN_STARTED xfrin started

    +This informational message is output by xfrin when all initialization +has been completed and it is entering its main loop.

    XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down

    There was a keyboard interrupt signal to stop the xfrin daemon. The daemon will now shut down. +

    XFRIN_TRANSFER_SUCCESS full %1 transfer of zone %2 succeeded (messages: %3, records: %4, bytes: %5, run time: %6 seconds, %7 bytes/second)

    +The AXFR transfer of the given zone was successful. +The provided information contains the following values: +

    +messages: Number of overhead DNS messages in the transfer +

    +records: Number of Resource Records in the full transfer, excluding the +final SOA record that marks the end of the AXFR. +

    +bytes: Full size of the transfer data on the wire. +

    +run time: Time (in seconds) the complete axfr took +

    +bytes/second: Transfer speed

    XFRIN_UNKNOWN_ERROR unknown error: %1

    An uncaught exception was raised while running the xfrin daemon. The exception message is printed in the log message. @@ -2389,8 +2925,6 @@ is recommended to check the primary server configuration.

    XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started

    A connection to the master server has been made, the serial value in the SOA record has been checked, and a zone transfer has been started. -

    XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded

    -The XFR transfer of the given zone was successfully completed.

    XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created

    On starting an xfrin session, it is identified that the zone to be transferred is not found in the data source. This can happen if a @@ -2509,11 +3043,15 @@ do not understand or support. The xfrout request will be ignored. In general, this should only occur for unexpected problems like memory allocation failures, as the query should already have been parsed by the b10-auth daemon, before it was passed here. -

    XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2

    -There was an error processing a transfer request. The error is included -in the log message, but at this point no specific information other -than that could be given. This points to incomplete exception handling -in the code. +

    XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %1

    +There was an error in receiving a transfer request from b10-auth. +This is generally an unexpected event, but is possible when, for +example, b10-auth terminates in the middle of forwarding the request. +When this happens it's unlikely to be recoverable with the same +communication session with b10-auth, so b10-xfrout drops it and +waits for a new session. In any case, this error indicates that +there's something very wrong in the system, so it's advisable to check +the over all status of the BIND 10 system.

    XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped

    The xfrout process silently dropped a request to transfer zone to given host. This is required by the ACLs. The %2 represents the IP @@ -2538,9 +3076,16 @@ The xfrout daemon received a shutdown command from the command channel and will now shut down.

    XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection

    There was an error receiving the file descriptor for the transfer -request. Normally, the request is received by b10-auth, and passed on -to the xfrout daemon, so it can answer directly. However, there was a -problem receiving this file descriptor. The request will be ignored. +request from b10-auth. There can be several reasons for this, but +the most likely cause is that b10-auth terminates for some reason +(maybe it's a bug of b10-auth, maybe it's an intentional restart by +the administrator), so depending on how this happens it may or may not +be a serious error. But in any case this is not expected to happen +frequently, and it's advisable to figure out how this happened if +this message is logged. Even if this error happens xfrout will reset +its internal state and will keep receiving further requests. So +if it's just a temporary restart of b10-auth the administrator does +not have to do anything.

    XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2

    The unix socket file xfrout needs for contact with the auth daemon already exists, and needs to be removed first, but there is a problem @@ -2557,6 +3102,9 @@ the xfrout daemon that a new xfrout request has arrived. This should be a result of rare local error such as memory allocation failure and shouldn't happen under normal conditions. The error is included in the log message. +

    XFROUT_STARTED xfrout started

    +This informational message is output by xfrout when all initialization +has been completed and it is entering its main loop.

    XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down

    There was a keyboard interrupt signal to stop the xfrout daemon. The daemon will now shut down. @@ -2673,6 +3221,9 @@ connecting to the command channel daemon. The most usual cause of this problem is that the daemon is not running.

    ZONEMGR_SHUTDOWN zone manager has shut down

    A debug message, output when the zone manager has shut down completely. +

    ZONEMGR_STARTED zonemgr started

    +This informational message is output by zonemgr when all initialization +has been completed and it is entering its main loop.

    ZONEMGR_STARTING zone manager starting

    A debug message output when the zone manager starts up.

    ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running

    @@ -2683,9 +3234,11 @@ a problem with stopping a previous instance of the timer. Please submit a bug report.

    ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager

    An XFRIN operation has failed but the zone that was the subject of the -operation is not being managed by the zone manager. This may indicate -an error in the program (as the operation should not have been initiated -if this were the case). Please submit a bug report. +operation is not being managed by the zone manager. This can be either the +result of a bindctl command to transfer in a currently unknown (or mistyped) +zone, or, if this error appears without the administrator giving transfer +commands, it can indicate an error in the program, as it should not have +initiated transfers of unknown zones on its own.

    ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager

    A NOTIFY was received but the zone that was the subject of the operation is not being managed by the zone manager. This may indicate an error diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml index 60f9665eb5..e0e192f5db 100644 --- a/doc/guide/bind10-messages.xml +++ b/doc/guide/bind10-messages.xml @@ -303,6 +303,24 @@ discovered that the memory data source is enabled for the given class. + +AUTH_MESSAGE_FORWARD_ERROR failed to forward %1 request from %2: %3 + +The authoritative server tried to forward some type DNS request +message to a separate process (e.g., forwarding dynamic update +requests to b10-ddns) to handle it, but it failed. The authoritative +server returns SERVFAIL to the client on behalf of the separate +process. The error could be configuration mismatch between b10-auth +and the recipient component, or it may be because the requests are +coming too fast and the receipient process cannot keep up with the +rate, or some system level failure. In either case this means the +BIND 10 system is not working as expected, so the administrator should +look into the cause and address the issue. The log message includes +the client's address (and port), and the error message sent from the +lower layer that detects the failure. + + + AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY @@ -882,6 +900,15 @@ The boss module is sending a SIGTERM signal to the given process. + +BIND10_SETGID setting GID to %1 + +The boss switches the process group ID to the given value. This happens +when BIND 10 starts with the -u option, and the group ID will be set to +that of the specified user. + + + BIND10_SETUID setting UID to %1 @@ -1399,7 +1426,7 @@ Debug message. The RRset is updating its data with this given RRset. -CC_ASYNC_READ_FAILED asynchronous read failed +CC_ASYNC_READ_FAILED asynchronous read failed (error code = %1) This marks a low level error, we tried to read data from the message queue daemon asynchronously, but the ASIO library returned an error. @@ -1588,6 +1615,16 @@ are now applied, and no action from the administrator is necessary. + +CFGMGR_BACKED_UP_CONFIG_FILE Config file %1 was removed; a backup was made at %2 + +BIND 10 has been started with the command to clear the configuration +file. The existing file has been backed up (moved) to the given file +name. A new configuration file will be created in the original location +when necessary. + + + CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2 @@ -1607,6 +1644,14 @@ system. The most likely cause is that msgq is not running. + +CFGMGR_CONFIG_FILE Configuration manager starting with configuration file: %1 + +The configuration manager is starting, reading and saving the configuration +settings to the shown file. + + + CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1 @@ -1639,15 +1684,6 @@ configuration is not stored. - -CFGMGR_RENAMED_CONFIG_FILE renamed configuration file %1 to %2, will create new %1 - -BIND 10 has been started with the command to clear the configuration file. -The existing file is backed up to the given file name, so that data is not -immediately lost if this was done by accident. - - - CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down @@ -2057,6 +2093,58 @@ in the answer as a result. + +DATASRC_DATABASE_FINDNSEC3 Looking for NSEC3 for %1 in %2 mode + +Debug information. A search in an database data source for NSEC3 that +matches or covers the given name is being started. + + + + +DATASRC_DATABASE_FINDNSEC3_COVER found a covering NSEC3 for %1 at label count %2: %3 + +Debug information. An NSEC3 that covers the given name is found and +being returned. The found NSEC3 RRset is also displayed. When the shown label +count is smaller than that of the given name, the matching NSEC3 is for a +superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH). The +found NSEC3 RRset is also displayed. + + + + +DATASRC_DATABASE_FINDNSEC3_MATCH found a matching NSEC3 for %1 at label count %2: %3 + +Debug information. An NSEC3 that matches (a possibly superdomain of) +the given name is found and being returned. When the shown label +count is smaller than that of the given name, the matching NSEC3 is +for a superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH). +The found NSEC3 RRset is also displayed. + + + + +DATASRC_DATABASE_FINDNSEC3_TRYHASH looking for NSEC3 for %1 at label count %2 (hash %3) + +Debug information. In an attempt of finding an NSEC3 for the give name, +(a possibly superdomain of) the name is hashed and searched for in the +NSEC3 name space. When the shown label count is smaller than that of the +shown name, the search tries the superdomain name that share the shown +(higher) label count of the shown name (e.g., for +www.example.com. with shown label count of 3, example.com. is being +tried, as "." is 1 label long). + + + + +DATASRC_DATABASE_FINDNSEC3_TRYHASH_PREV looking for previous NSEC3 for %1 at label count %2 (hash %3) + +Debug information. An exact match on hash (see +DATASRC_DATABASE_FINDNSEC3_TRYHASH) was unsuccessful. We get the previous hash +to that one instead. + + + DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4 @@ -2155,7 +2243,7 @@ has been requested and returned. -DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5 +DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2 The data returned by the database backend contained data for the given domain name, and it either matches the type or has a relevant type. The RRset that is @@ -2189,10 +2277,12 @@ The name and RRtype of the RRset is indicated in the message. DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4 -While iterating through the zone, the time to live for RRs of the given RRset -were found to be different. This isn't allowed on the wire and is considered -an error, so we set it to the lowest value we found (but we don't modify the -database). The data in database should be checked and fixed. +While iterating through the zone, the time to live for RRs of the +given RRset were found to be different. Since an RRset cannot have +multiple TTLs, we set it to the lowest value we found (but we don't +modify the database). This is what the client would do when such RRs +were given in a DNS response according to RFC2181. The data in +database should be checked and fixed. @@ -2352,7 +2442,7 @@ namespace but has no RRs assopciated with it). This will produce NXRRSET. -DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6 +DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %2 with RRset %3 The database doesn't contain directly matching name. When searching for a wildcard match, a wildcard record matching the name and type of @@ -3096,6 +3186,21 @@ Debug information. The SQLite data source is closing the database file. + +DATASRC_SQLITE_COMPATIBLE_VERSION database schema V%1.%2 not up to date (expecting V%3.%4) but is compatible + +The version of the SQLite3 database schema used to hold the zone data +is not the latest one - the current version of BIND 10 was written +with a later schema version in mind. However, the database is +compatible with the current version of BIND 10, and BIND 10 will run +without any problems. + +Consult the release notes for your version of BIND 10. Depending on +the changes made to the database schema, it is possible that improved +performance could result if the database were upgraded. + + + DATASRC_SQLITE_CONNCLOSE Closing sqlite database @@ -3235,6 +3340,18 @@ But it doesn't contain that zone. + +DATASRC_SQLITE_INCOMPATIBLE_VERSION database schema V%1.%2 incompatible with version (V%3.%4) expected + +The version of the SQLite3 database schema used to hold the zone data +is incompatible with the version expected by BIND 10. As a result, +BIND 10 is unable to run using the database file as the data source. + +The database should be updated using the means described in the BIND +10 documentation. + + + DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized @@ -3517,6 +3634,16 @@ is logged. + +DDNS_AUTH_DBFILE_UPDATE updated auth DB file to %1 + +b10-ddns was notified of updates to the SQLite3 DB file that b10-auth +uses for the underlying data source and on which b10-ddns needs to +make updates. b10-ddns then updated its internal setup so further +updates would be made on the new DB. + + + DDNS_CC_SESSION_ERROR error reading from cc channel: %1 @@ -3542,6 +3669,18 @@ startup time. Details of the error are included in the log message. + +DDNS_CONFIG_HANDLER_ERROR failed to update ddns configuration: %1 + +An update to b10-ddns configuration was delivered but an error was +found while applying them. None of the delivered updates were applied +to the running b10-ddns system, and the server will keep running with +the existing configuration. If this happened in the initial +configuration setup, the server will be running with the default +configurations. + + + DDNS_DROP_CONN dropping connection on file descriptor %1 because of error %2 @@ -3553,6 +3692,33 @@ confused and sent bad data. + +DDNS_GET_REMOTE_CONFIG_FAIL failed to get %1 module configuration %2 times: %3 + +b10-ddns tried to get configuration of some remote modules for its +operation, but it failed. The most likely cause of this is that the +remote module has not fully started up and b10-ddns couldn't get the +configuration in a timely fashion. b10-ddns attempts to retry it a +few times, imposing a short delay, hoping it eventually succeeds if +it's just a timing issue. The number of total failed attempts is also +logged. If it reaches an internal threshold b10-ddns considers it a +fatal error and terminates. Even in that case, if b10-ddns is +configured as a "dispensable" component (which is the default), the +parent bind10 process will restart it, and there will be another +chance of getting the remote configuration successfully. These are +not the optimal behavior, but it's believed to be sufficient in +practice (there would normally be no failure in the first place). If +it really causes an operational trouble other than having a few of +these log messages, please submit a bug report; there can be several +ways to make it more sophisticated. Another, less likely reason for +having this error is because the remote modules are not actually +configured to run. If that's the case fixing the configuration should +solve the problem - either by making sure the remote module will run +or by not running b10-ddns (without these remote modules b10-ddns is +not functional, so there's no point in running it in this case). + + + DDNS_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1 @@ -3574,6 +3740,15 @@ coming from a b10-auth process. + +DDNS_RECEIVED_AUTH_UPDATE received configuration updates from auth server + +b10-ddns is notified of updates to b10-auth configuration +(including a report of the initial configuration) that b10-ddns might +be interested in. + + + DDNS_RECEIVED_SHUTDOWN_COMMAND shutdown command received @@ -3582,11 +3757,105 @@ and will now shut down. - -DDNS_RUNNING ddns server is running and listening for updates + +DDNS_RECEIVED_ZONEMGR_UPDATE received configuration updates from zonemgr -The ddns process has successfully started and is now ready to receive commands -and updates. +b10-ddns is notified of updates to b10-zonemgr's configuration +(including a report of the initial configuration). It may possibly +contain changes to the secondary zones, in which case b10-ddns will +update its internal copy of that configuration. + + + + +DDNS_REQUEST_PARSE_FAIL failed to parse update request: %1 + +b10-ddns received an update request via b10-auth, but the received +data failed to pass minimum validation: it was either broken wire +format data for a valid DNS message (e.g. it's shorter than the +fixed-length header), or the opcode is not update, or TSIG is included +in the request but it fails to validate. Since b10-auth should have +performed this level of checks, such an error shouldn't be detected at +this stage and should rather be considered an internal bug. This +event is therefore logged at the error level, and the request is +simply dropped. Additional information of the error is also logged. + + + + +DDNS_REQUEST_TCP_QUOTA reject TCP update client %1 (%2 running) + +b10-ddns received a new update request from a client over TCP, but +the number of TCP clients being handled by the server already reached +the configured quota, so the latest client was rejected by closing +the connection. The administrator may want to check the status of +b10-ddns, and if this happens even if the server is not very busy, +the quota may have to be increased. Or, if it's more likely to be +malicious or simply bogus clients that somehow keep the TCP connection +open for a long period, maybe they should be rejected with an +appropriate ACL configuration or some lower layer filtering. The +number of existing TCP clients are shown in the log, which should be +identical to the current quota. + + + + +DDNS_RESPONSE_SOCKET_ERROR failed to send update response to %1: %2 + +Network I/O error happens in sending an update response. The +client's address that caused the error and error details are also +logged. + + + + +DDNS_RESPONSE_TCP_SOCKET_ERROR failed to complete sending update response to %1 over TCP + +b10-ddns had tried to send an update response over TCP, and it hadn't +been completed at that time, and a followup attempt to complete the +send operation failed due to some network I/O error. While a network +error can happen any time, this event is quite unexpected for two +reasons. First, since the size of a response to an update request +should be generally small, it's unlikely that the initial attempt +didn't fail but wasn't completed. Second, since the first attempt +succeeded and the TCP connection had been established in the first +place, it's more likely for the subsequent attempt to succeed. In any +case, there may not be able to do anything to fix it at the server +side, but the administrator may want to check the general reachability +with the client address. + + + + +DDNS_SECONDARY_ZONES_UPDATE updated secondary zone list (%1 zones are listed) + +b10-ddns has successfully updated the internal copy of secondary zones +obtained from b10-zonemgr, based on a latest update to zonemgr's +configuration. The number of newly configured (unique) secondary +zones is logged. + + + + +DDNS_SECONDARY_ZONES_UPDATE_FAIL failed to update secondary zone list: %1 + +An error message. b10-ddns was notified of updates to a list of +secondary zones from b10-zonemgr and tried to update its own internal +copy of the list, but it failed. This can happen if the configuration +contains an error, and b10-zonemgr should also reject that update. +Unfortunately, in the current implementation there is no way to ensure +that both zonemgr and ddns have consistent information when an update +contains an error; further, as of this writing zonemgr has a bug that +it could partially update the list of secondary zones if part of the +list has an error (see Trac ticket #2038). b10-ddns still keeps +running with the previous configuration, but it's strongly advisable +to check log messages from zonemgr, and if it indicates there can be +inconsistent state, it's better to restart the entire BIND 10 system +(just restarting b10-ddns wouldn't be enough, because zonemgr can have +partially updated configuration due to bug #2038). The log message +contains an error description, but it's intentionally kept simple as +it's primarily a matter of zonemgr. To know the details of the error, +log messages of zonemgr should be consulted. @@ -3608,6 +3877,14 @@ be completed, after which the process will exit. + +DDNS_STARTED ddns server is running and listening for updates + +The ddns process has successfully started and is now ready to receive commands +and updates. + + + DDNS_STOPPED ddns server has stopped @@ -3633,6 +3910,362 @@ normal circumstances. The exception type and message are printed. + +DDNS_UPDATE_NOTIFY notified %1 of updates to %2 + +Debug message. b10-ddns has made updates to a zone based on an update +request and has successfully notified an external module of the updates. +The notified module will use that information for updating its own +state or any necessary protocol action such as zone reloading or sending +notify messages to secondary servers. + + + + +DDNS_UPDATE_NOTIFY_FAIL failed to notify %1 of updates to %2: %3 + +b10-ddns has made updates to a zone based on an update request and +tried to notify an external component of the updates, but the +notification fails. One possible cause of this is that the external +component is not really running and it times out in waiting for the +response, although it will be less likely to happen in practice +because these components will normally be configured to run when the +server provides the authoritative DNS service; ddns is rather optional +among them. If this happens, however, it will suspend b10-ddns for a +few seconds during which it cannot handle new requests (some may be +delayed, some may be dropped, depending on the volume of the incoming +requests). This is obviously bad, and if this error happens due to +this reason, the administrator should make sure the component in +question should be configured to run. For a longer term, b10-ddns +should be more robust about this case such as by making this +notification asynchronously and/or detecting the existence of the +external components to avoid hopeless notification in the first place. +Severity of this error for the receiving components depends on the +type of the component. If it's b10-xfrout, this means DNS notify +messages won't be sent to secondary servers of the zone. It's +suboptimal, but not necessarily critical as the secondary servers will +try to check the zone's status periodically. If it's b10-auth and the +notification was needed to have it reload the corresponding zone, it's +more serious because b10-auth won't be able to serve the new version +of the zone unless some explicit recovery action is taken. So the +administrator needs to examine this message and takes an appropriate +action. In either case, this notification is generally expected to +succeed; so the fact it fails itself means there's something wrong in +the BIND 10 system, and it would be advisable to check other log +messages. + + + + +LIBDDNS_DATASRC_ERROR update client %1 failed due to data source error: %2 + +An update attempt failed due to some error in the corresponding data +source. This is generally an unexpected event, but can still happen +for various reasons such as DB lock contention or a failure of the +backend DB server. The cause of the error is also logged. It's +advisable to check the message, and, if necessary, take an appropriate +action (e.g., restarting the DB server if it dies). If this message +is logged the data source isn't modified due to the +corresponding update request. When used by the b10-ddns, the server +will return a response with an RCODE of SERVFAIL. + + + + +LIBDDNS_PREREQ_FORMERR update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL. + +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it has a non-zero TTL value. +A FORMERR error response is sent to the client. + + + + +LIBDDNS_PREREQ_FORMERR_ANY update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found. + +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. + + + + +LIBDDNS_PREREQ_FORMERR_CLASS update client %1 for zone %2: Format error in prerequisite (%3). Bad class. + +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, the class of the +prerequisite should either match the class of the zone in the Zone Section, +or it should be ANY or NONE, and it is not. A FORMERR error response is sent +to the client. + + + + +LIBDDNS_PREREQ_FORMERR_NONE update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found. + +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. + + + + +LIBDDNS_PREREQ_NAME_IN_USE_FAILED update client %1 for zone %2: 'Name is in use' prerequisite not satisfied (%3), rcode: %4 + +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is in use'. From RFC2136: +Name is in use. At least one RR with a specified NAME (in +the zone and class specified by the Zone Section) must exist. +Note that this prerequisite is NOT satisfied by empty +nonterminals. + + + + +LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED update client %1 for zone %2: 'Name is not in use' (%3) prerequisite not satisfied, rcode: %4 + +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is not in use'. +From RFC2136: +Name is not in use. No RR of any type is owned by a +specified NAME. Note that this prerequisite IS satisfied by +empty nonterminals. + + + + +LIBDDNS_PREREQ_NOTZONE update client %1 for zone %2: prerequisite not in zone (%3) + +A DDNS UPDATE prerequisite has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific prerequisite is shown. A NOTZONE error response is sent to +the client. + + + + +LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED update client %1 for zone %2: 'RRset does not exist' (%3) prerequisite not satisfied, rcode: %4 + +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset does not exist'. +From RFC2136: +RRset does not exist. No RRs with a specified NAME and TYPE +(in the zone and class denoted by the Zone Section) can exist. + + + + +LIBDDNS_PREREQ_RRSET_EXISTS_FAILED update client %1 for zone %2: 'RRset exists (value independent)' (%3) prerequisite not satisfied, rcode: %4 + +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value independent)'. +From RFC2136: +RRset exists (value dependent). A set of RRs with a +specified NAME and TYPE exists and has the same members +with the same RDATAs as the RRset specified here in this +Section. + + + + +LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED update client %1 for zone %2: 'RRset exists (value dependent)' (%3) prerequisite not satisfied, rcode: %4 + +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value dependent)'. +From RFC2136: +RRset exists (value independent). At least one RR with a +specified NAME and TYPE (in the zone and class specified by +the Zone Section) must exist. + + + + +LIBDDNS_UPDATE_ADD_BAD_TYPE update client %1 for zone %2: update addition RR bad type: %3 + +The Update section of a DDNS update message contains a statement +that tries to add a record of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_APPROVED update client %1 for zone %2 approved + +Debug message. An update request was approved in terms of the zone's +update ACL. + + + + +LIBDDNS_UPDATE_BAD_CLASS update client %1 for zone %2: bad class in update RR: %3 + +The Update section of a DDNS update message contains an RRset with +a bad class. The class of the update RRset must be either the same +as the class in the Zone Section, ANY, or NONE. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1 + +An error occured while committing the DDNS update changes to the +datasource. The specific error is printed. A SERVFAIL response is sent +back to the client. + + + + +LIBDDNS_UPDATE_DELETE_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3 + +The Update section of a DDNS update message contains a statement +that tries to delete an rrset of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DELETE_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3 + +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY update client %1 for zone %2: update deletion RR contains data %3 + +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-empty RRset. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3 + +The Update section of a DDNS update message contains a statement +that tries to delete one or more rrs of an invalid type. Most +likely the records have an RRType that is considered a 'meta' +type, which cannot be zone content data. The specific record is +shown. A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3 + +The Update section of a DDNS update message contains a 'delete rrs' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + + + + +LIBDDNS_UPDATE_DENIED update client %1 for zone %2 denied + +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will respond to the request with an RCODE of +REFUSED as described in Section 3.3 of RFC2136. + + + + +LIBDDNS_UPDATE_DROPPED update client %1 for zone %2 dropped + +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will then completely ignore the request; no +response will be sent. + + + + +LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3 + +Debug message. An error is found in processing a dynamic update +request. This log message is used for general errors that are not +normally expected to happen. So, in general, it would mean some +problem in the client implementation or an interoperability issue +with this implementation. The client's address, the zone name and +class, and description of the error are logged. + + + + +LIBDDNS_UPDATE_FORWARD_FAIL update client %1 for zone %2: update forwarding not supported + +Debug message. An update request is sent to a secondary server. This +is not necessarily invalid, but this implementation does not yet +support update forwarding as specified in Section 6 of RFC2136 and it +will simply return a response with an RCODE of NOTIMP to the client. +The client's address and the zone name/class are logged. + + + + +LIBDDNS_UPDATE_NOTAUTH update client %1 for zone %2: not authoritative for update zone + +Debug message. An update request was received for a zone for which +the receiving server doesn't have authority. In theory this is an +unexpected event, but there are client implementations that could send +update requests carelessly, so it may not necessarily be so uncommon +in practice. If possible, you may want to check the implementation or +configuration of those clients to suppress the requests. As specified +in Section 3.1 of RFC2136, the receiving server will return a response +with an RCODE of NOTAUTH. + + + + +LIBDDNS_UPDATE_NOTZONE update client %1 for zone %2: update RR out of zone %3 + +A DDNS UPDATE record has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific update record is shown. A NOTZONE error response is +sent to the client. + + + + +LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update client %1 for zone %2: result code %3 + +The handling of the prerequisite section (RFC2136 Section 3.2) found +that one of the prerequisites was not satisfied. The result code +should give more information on what prerequisite type failed. +If the result code is FORMERR, the prerequisite section was not well-formed. +An error response with the given result code is sent back to the client. + + + + +LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION update client %1 for zone %2: uncaught exception while processing update section: %3 + +An uncaught exception was encountered while processing the Update +section of a DDNS message. The specific exception is shown in the log message. +To make sure DDNS service is not interrupted, this problem is caught instead +of reraised; The update is aborted, and a SERVFAIL is sent back to the client. +This is most probably a bug in the DDNS code, but *could* be caused by +the data source. + + + LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4/%5. Adjusting %2 -> %1. @@ -3750,6 +4383,13 @@ and the underscore, and should not start with a digit. + +LOG_LOCK_TEST_MESSAGE this is a test message. + +This is a log message used in testing. + + + LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments @@ -4141,6 +4781,55 @@ bug report. + +PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1 + +There was an invalid name when parsing Auth configuration. + + + + +PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1 + +There was an invalid RR class when parsing Auth configuration. + + + + +PYSERVER_COMMON_DNS_TCP_SEND_DONE completed sending TCP message to %1 (%2 bytes in total) + +Debug message. A complete DNS message has been successfully +transmitted over a TCP connection, possibly after multiple send +operations. The destination address and the total size of the message +(including the 2-byte length field) are shown in the log message. + + + + +PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4 + +A DNS message has been attempted to be sent out over a TCP connection, +but it failed due to some network error. Although it's not expected +to happen too often, it can still happen for various reasons. The +administrator may want to examine the cause of the failure, which is +included in the log message, to see if it requires some action to +be taken at the server side. When this message is logged, the +corresponding TCP connection was closed immediately after the error +was detected. + + + + +PYSERVER_COMMON_DNS_TCP_SEND_PENDING sent part TCP message to %1 (up to %2/%3 bytes) + +Debug message. A part of DNS message has been transmitted over a TCP +connection, and it's suspended because further attempt would block. +The destination address and the total size of the message that has +been transmitted so far (including the 2-byte length field) are shown +in the log message. + + + PYSERVER_COMMON_TSIG_KEYRING_DEINIT Deinitializing global TSIG keyring @@ -5388,6 +6077,15 @@ Please check your installation. + +XFRIN_AUTH_LOADZONE sending Auth loadzone for origin=%1, class=%2, datasrc=%3 + +There was a successful zone transfer, and the zone is served by b10-auth +in the in-memory data source using sqlite3 as a backend. We send the +"loadzone" command for the zone to b10-auth. + + + XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received @@ -5542,6 +6240,14 @@ was killed. + +XFRIN_MSGQ_SEND_ERROR_AUTH error while contacting %1 + +There was a problem sending a message to b10-auth. This most likely +means that the msgq daemon has quit or was killed. + + + XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1 @@ -5570,10 +6276,11 @@ zone name in the configuration. - -XFRIN_STARTING starting resolver with command line '%1' + +XFRIN_STARTED xfrin started -An informational message, this is output when the resolver starts up. +This informational message is output by xfrin when all initialization +has been completed and it is entering its main loop. @@ -5909,12 +6616,16 @@ parsed by the b10-auth daemon, before it was passed here. -XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2 +XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %1 -There was an error processing a transfer request. The error is included -in the log message, but at this point no specific information other -than that could be given. This points to incomplete exception handling -in the code. +There was an error in receiving a transfer request from b10-auth. +This is generally an unexpected event, but is possible when, for +example, b10-auth terminates in the middle of forwarding the request. +When this happens it's unlikely to be recoverable with the same +communication session with b10-auth, so b10-xfrout drops it and +waits for a new session. In any case, this error indicates that +there's something very wrong in the system, so it's advisable to check +the over all status of the BIND 10 system. @@ -5964,9 +6675,16 @@ and will now shut down. XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection There was an error receiving the file descriptor for the transfer -request. Normally, the request is received by b10-auth, and passed on -to the xfrout daemon, so it can answer directly. However, there was a -problem receiving this file descriptor. The request will be ignored. +request from b10-auth. There can be several reasons for this, but +the most likely cause is that b10-auth terminates for some reason +(maybe it's a bug of b10-auth, maybe it's an intentional restart by +the administrator), so depending on how this happens it may or may not +be a serious error. But in any case this is not expected to happen +frequently, and it's advisable to figure out how this happened if +this message is logged. Even if this error happens xfrout will reset +its internal state and will keep receiving further requests. So +if it's just a temporary restart of b10-auth the administrator does +not have to do anything. @@ -6001,6 +6719,14 @@ log message. + +XFROUT_STARTED xfrout started + +This informational message is output by xfrout when all initialization +has been completed and it is entering its main loop. + + + XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down @@ -6257,6 +6983,14 @@ A debug message, output when the zone manager has shut down completely. + +ZONEMGR_STARTED zonemgr started + +This informational message is output by zonemgr when all initialization +has been completed and it is entering its main loop. + + + ZONEMGR_STARTING zone manager starting diff --git a/src/Makefile.am b/src/Makefile.am index ca4a702f4e..395553c6fa 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1 +1,6 @@ SUBDIRS = lib bin + +EXTRA_DIST = \ + cppcheck-suppress.lst \ + valgrind-suppressions \ + valgrind-suppressions.revisit diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am index 66abbe2db6..34b515543f 100644 --- a/src/bin/auth/Makefile.am +++ b/src/bin/auth/Makefile.am @@ -51,9 +51,9 @@ b10_auth_SOURCES += statistics.cc statistics.h b10_auth_SOURCES += main.cc # This is a temporary workaround for #1206, where the InMemoryClient has been # moved to an ldopened library. We could add that library to LDADD, but that -# is nonportable. When #1207 is done this becomes moot anyway, and the -# specific workaround is not needed anymore, so we can then remove this -# line again. +# is nonportable. This should've been moot after #1207, but there is still +# one dependency; the in-memory-specific zone loader call is still in +# auth. b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc @@ -62,6 +62,7 @@ EXTRA_DIST += auth_messages.mes b10_auth_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la +b10_auth_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc index 3a04dc8a6b..c85a4eeccb 100644 --- a/src/bin/auth/auth_config.cc +++ b/src/bin/auth/auth_config.cc @@ -43,22 +43,19 @@ using namespace isc::datasrc; using namespace isc::server_common::portconfig; namespace { -// Forward declaration -AuthConfigParser* -createAuthConfigParser(AuthSrv& server, const std::string& config_id, - bool internal); - /// A derived \c AuthConfigParser class for the "datasources" configuration /// identifier. class DatasourcesConfig : public AuthConfigParser { public: - DatasourcesConfig(AuthSrv& server) : server_(server) {} + DatasourcesConfig(AuthSrv& server) : server_(server) + {} virtual void build(ConstElementPtr config_value); virtual void commit(); private: AuthSrv& server_; vector > datasources_; set configured_sources_; + vector > clients_; }; /// A derived \c AuthConfigParser for the version value @@ -86,14 +83,40 @@ DatasourcesConfig::build(ConstElementPtr config_value) { isc_throw(AuthConfigError, "Data source type '" << datasrc_type->stringValue() << "' already configured"); } - - boost::shared_ptr datasrc_config = - boost::shared_ptr( - createAuthConfigParser(server_, string("datasources/") + - datasrc_type->stringValue(), - true)); - datasrc_config->build(datasrc_elem); - datasources_.push_back(datasrc_config); + + // Apart from that it's not really easy to get at the default + // class value for the class here, it should probably really + // be a property of the instantiated data source. For now + // use hardcoded default IN. + const RRClass rrclass = + datasrc_elem->contains("class") ? + RRClass(datasrc_elem->get("class")->stringValue()) : RRClass::IN(); + + // Right now, we only support the in-memory data source for the + // RR class of IN. We reject other cases explicitly by hardcoded + // checks. This will soon be generalized, at which point these + // checks will also have to be cleaned up. + if (rrclass != RRClass::IN()) { + isc_throw(isc::InvalidParameter, "Unsupported data source class: " + << rrclass); + } + if (datasrc_type->stringValue() != "memory") { + isc_throw(AuthConfigError, "Unsupported data source type: " + << datasrc_type->stringValue()); + } + + // Create a new client for the specified data source and store it + // in the local vector. For now, we always build a new client + // from the scratch, and replace any existing ones with the new ones. + // We might eventually want to optimize building zones (in case of + // reloading) by selectively loading fresh zones for data source + // where zone loading is expensive (such as in-memory). + clients_.push_back( + pair( + rrclass, + DataSourceClientContainerPtr(new DataSourceClientContainer( + datasrc_type->stringValue(), + datasrc_elem)))); configured_sources_.insert(datasrc_type->stringValue()); } @@ -101,122 +124,19 @@ DatasourcesConfig::build(ConstElementPtr config_value) { void DatasourcesConfig::commit() { - // XXX a short term workaround: clear all data sources and then reset - // to new ones so that we can remove data sources that don't exist in - // the new configuration and have been used in the server. - // This could be inefficient and requires knowledge about - // server implementation details, and isn't scalable wrt the number of - // data source types, and should eventually be improved. - // Currently memory data source for class IN is the only possibility. - server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr()); - - BOOST_FOREACH(boost::shared_ptr datasrc_config, - datasources_) { - datasrc_config->commit(); - } -} - -/// A derived \c AuthConfigParser class for the memory type datasource -/// configuration. It does not correspond to the configuration syntax; -/// it's instantiated for internal use. -class MemoryDatasourceConfig : public AuthConfigParser { -public: - MemoryDatasourceConfig(AuthSrv& server) : - server_(server), - rrclass_(0) // XXX: dummy initial value - {} - virtual void build(ConstElementPtr config_value); - virtual void commit() { - server_.setInMemoryClient(rrclass_, memory_client_); - } -private: - AuthSrv& server_; - RRClass rrclass_; - AuthSrv::InMemoryClientPtr memory_client_; -}; - -void -MemoryDatasourceConfig::build(ConstElementPtr config_value) { - // XXX: apparently we cannot retrieve the default RR class from the - // module spec. As a temporary workaround we hardcode the default value. - ConstElementPtr rrclass_elem = config_value->get("class"); - rrclass_ = RRClass(rrclass_elem ? rrclass_elem->stringValue() : "IN"); - - // We'd eventually optimize building zones (in case of reloading) by - // selectively loading fresh zones. Right now we simply check the - // RR class is supported by the server implementation. - server_.getInMemoryClient(rrclass_); - memory_client_ = AuthSrv::InMemoryClientPtr(new InMemoryClient()); - - ConstElementPtr zones_config = config_value->get("zones"); - if (!zones_config) { - // XXX: Like the RR class, we cannot retrieve the default value here, - // so we assume an empty zone list in this case. - return; - } - - BOOST_FOREACH(ConstElementPtr zone_config, zones_config->listValue()) { - ConstElementPtr origin = zone_config->get("origin"); - const string origin_txt = origin ? origin->stringValue() : ""; - if (origin_txt.empty()) { - isc_throw(AuthConfigError, "Missing zone origin"); - } - ConstElementPtr file = zone_config->get("file"); - const string file_txt = file ? file->stringValue() : ""; - if (file_txt.empty()) { - isc_throw(AuthConfigError, "Missing zone file for zone: " - << origin_txt); - } - - // We support the traditional text type and SQLite3 backend. For the - // latter we create a client for the underlying SQLite3 data source, - // and build the in-memory zone using an iterator of the underlying - // zone. - ConstElementPtr filetype = zone_config->get("filetype"); - const string filetype_txt = filetype ? filetype->stringValue() : - "text"; - boost::scoped_ptr container; - if (filetype_txt == "sqlite3") { - container.reset(new DataSourceClientContainer( - "sqlite3", - Element::fromJSON("{\"database_file\": \"" + - file_txt + "\"}"))); - } else if (filetype_txt != "text") { - isc_throw(AuthConfigError, "Invalid filetype for zone " - << origin_txt << ": " << filetype_txt); - } - - // Note: we don't want to have such small try-catch blocks for each - // specific error. We may eventually want to introduce some unified - // error handling framework as we have more configuration parameters. - // See bug #1627 for the relevant discussion. - InMemoryZoneFinder* imzf = NULL; - try { - imzf = new InMemoryZoneFinder(rrclass_, Name(origin_txt)); - } catch (const isc::dns::NameParserException& ex) { - isc_throw(AuthConfigError, "unable to parse zone's origin: " << - ex.what()); - } - - boost::shared_ptr zone_finder(imzf); - const result::Result result = memory_client_->addZone(zone_finder); - if (result == result::EXIST) { - isc_throw(AuthConfigError, "zone "<< origin->str() - << " already exists"); - } - - /* - * TODO: Once we have better reloading of configuration (something - * else than throwing everything away and loading it again), we will - * need the load method to be split into some kind of build and - * commit/abort parts. - */ - if (filetype_txt == "text") { - zone_finder->load(file_txt); - } else { - zone_finder->load(*container->getInstance().getIterator( - Name(origin_txt))); - } + // As noted in build(), the current implementation only supports the + // in-memory data source for class IN, and build() should have ensured + // it. So, depending on the vector is empty or not, we either clear + // or install an in-memory data source for the server. + // + // When we generalize it, we'll somehow install all data source clients + // built in the vector, clearing deleted ones from the server. + if (clients_.empty()) { + server_.setInMemoryClient(RRClass::IN(), + DataSourceClientContainerPtr()); + } else { + server_.setInMemoryClient(clients_.front().first, + clients_.front().second); } } @@ -314,13 +234,10 @@ private: */ AddrListPtr rollbackAddresses_; }; +} // end of unnamed namespace -// This is a generalized version of create function that can create -// an AuthConfigParser object for "internal" use. AuthConfigParser* -createAuthConfigParser(AuthSrv& server, const std::string& config_id, - bool internal) -{ +createAuthConfigParser(AuthSrv& server, const std::string& config_id) { // For the initial implementation we use a naive if-else blocks for // simplicity. In future we'll probably generalize it using map-like // data structure, and may even provide external register interface so @@ -329,8 +246,6 @@ createAuthConfigParser(AuthSrv& server, const std::string& config_id, return (new DatasourcesConfig(server)); } else if (config_id == "statistics-interval") { return (new StatisticsIntervalConfig(server)); - } else if (internal && config_id == "datasources/memory") { - return (new MemoryDatasourceConfig(server)); } else if (config_id == "listen_on") { return (new ListenAddressConfig(server)); } else if (config_id == "_commit_throw") { @@ -351,12 +266,6 @@ createAuthConfigParser(AuthSrv& server, const std::string& config_id, config_id); } } -} // end of unnamed namespace - -AuthConfigParser* -createAuthConfigParser(AuthSrv& server, const std::string& config_id) { - return (createAuthConfigParser(server, config_id, false)); -} void configureAuthServer(AuthSrv& server, ConstElementPtr config_set) { diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes index b18feb13e6..9ac2c499ef 100644 --- a/src/bin/auth/auth_messages.mes +++ b/src/bin/auth/auth_messages.mes @@ -96,6 +96,20 @@ discovered that the memory data source is disabled for the given class. This is a debug message reporting that the authoritative server has discovered that the memory data source is enabled for the given class. +% AUTH_MESSAGE_FORWARD_ERROR failed to forward %1 request from %2: %3 +The authoritative server tried to forward some type DNS request +message to a separate process (e.g., forwarding dynamic update +requests to b10-ddns) to handle it, but it failed. The authoritative +server returns SERVFAIL to the client on behalf of the separate +process. The error could be configuration mismatch between b10-auth +and the recipient component, or it may be because the requests are +coming too fast and the receipient process cannot keep up with the +rate, or some system level failure. In either case this means the +BIND 10 system is not working as expected, so the administrator should +look into the cause and address the issue. The log message includes +the client's address (and port), and the error message sent from the +lower layer that detects the failure. + % AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY This debug message is logged by the authoritative server when it receives a NOTIFY packet that contains zero or more than one question. (A valid diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc index 9f5642ed34..2a47c38822 100644 --- a/src/bin/auth/auth_srv.cc +++ b/src/bin/auth/auth_srv.cc @@ -14,18 +14,10 @@ #include -#include -#include - -#include -#include -#include -#include -#include - -#include +#include #include +#include #include @@ -64,6 +56,18 @@ #include #include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + using namespace std; using namespace isc; @@ -71,6 +75,7 @@ using namespace isc::cc; using namespace isc::datasrc; using namespace isc::dns; using namespace isc::util; +using namespace isc::util::io; using namespace isc::auth; using namespace isc::dns::rdata; using namespace isc::data; @@ -107,6 +112,107 @@ public: private: MessageRenderer& renderer_; }; + +// A helper container of socket session forwarder. +// +// This class provides a simple wrapper interface to SocketSessionForwarder +// so that the caller doesn't have to worry about connection management, +// exception handling or parameter building. +// +// It internally maintains whether the underlying forwarder establishes a +// connection to the receiver. On a forwarding request, if the connection +// hasn't been established yet, it automatically opens a new one, then +// pushes the session over it. It also closes the connection on destruction, +// or a non-recoverable error happens, automatically. So the only thing +// the application has to do is to create this object and push any session +// to be forwarded. +class SocketSessionForwarderHolder { +public: + /// \brief The constructor. + /// + /// \param message_name Any string that can identify the type of messages + /// to be forwarded via this session. It will be only used as part of + /// log message, so it can be anything, but in practice something like + /// "update" or "xfr" is expected. + /// \param forwarder The underlying socket session forwarder. + SocketSessionForwarderHolder(const string& message_name, + BaseSocketSessionForwarder& forwarder) : + message_name_(message_name), forwarder_(forwarder), connected_(false) + {} + + ~SocketSessionForwarderHolder() { + if (connected_) { + forwarder_.close(); + } + } + + /// \brief Push a socket session corresponding to given IOMessage. + /// + /// If the connection with the receiver process hasn't been established, + /// it automatically establishes one, then push the session over it. + /// + /// If either connect or push fails, the underlying forwarder object should + /// throw an exception. This method logs the event, and propagates the + /// exception to the caller, which will eventually result in SERVFAIL. + /// The connection, if established, is automatically closed, so the next + /// forward request will trigger reopening a new connection. + /// + /// \note: Right now, there's no API to retrieve the local address from + /// the IOMessage. Until it's added, we pass the remote address as + /// local. + /// + /// \param io_message The request message to be forwarded as a socket + /// session. It will be converted to the parameters that the underlying + /// SocketSessionForwarder expects. + void push(const IOMessage& io_message) { + const IOEndpoint& remote_ep = io_message.getRemoteEndpoint(); + const int protocol = remote_ep.getProtocol(); + const int sock_type = getSocketType(protocol); + try { + connect(); + forwarder_.push(io_message.getSocket().getNative(), + remote_ep.getFamily(), sock_type, protocol, + remote_ep.getSockAddr(), remote_ep.getSockAddr(), + io_message.getData(), io_message.getDataSize()); + } catch (const SocketSessionError& ex) { + LOG_ERROR(auth_logger, AUTH_MESSAGE_FORWARD_ERROR). + arg(message_name_).arg(remote_ep).arg(ex.what()); + close(); + throw; + } + } + +private: + const string message_name_; + BaseSocketSessionForwarder& forwarder_; + bool connected_; + + void connect() { + if (!connected_) { + forwarder_.connectToReceiver(); + connected_ = true; + } + } + + void close() { + if (connected_) { + forwarder_.close(); + connected_ = false; + } + } + + static int getSocketType(int protocol) { + switch (protocol) { + case IPPROTO_UDP: + return (SOCK_DGRAM); + case IPPROTO_TCP: + return (SOCK_STREAM); + default: + isc_throw(isc::InvalidParameter, + "Unexpected socket address family: " << protocol); + } + } +}; } class AuthSrvImpl { @@ -115,7 +221,8 @@ private: AuthSrvImpl(const AuthSrvImpl& source); AuthSrvImpl& operator=(const AuthSrvImpl& source); public: - AuthSrvImpl(const bool use_cache, AbstractXfroutClient& xfrout_client); + AuthSrvImpl(const bool use_cache, AbstractXfroutClient& xfrout_client, + BaseSocketSessionForwarder& ddns_forwarder); ~AuthSrvImpl(); isc::data::ConstElementPtr setDbFile(isc::data::ConstElementPtr config); @@ -128,6 +235,7 @@ public: bool processNotify(const IOMessage& io_message, Message& message, OutputBuffer& buffer, auto_ptr tsig_context); + bool processUpdate(const IOMessage& io_message); IOService io_service_; @@ -141,7 +249,7 @@ public: /// In-memory data source. Currently class IN only for simplicity. const RRClass memory_client_class_; - AuthSrv::InMemoryClientPtr memory_client_; + isc::datasrc::DataSourceClientContainerPtr memory_client_container_; /// Hot spot cache isc::datasrc::HotCache cache_; @@ -189,6 +297,9 @@ private: bool xfrout_connected_; AbstractXfroutClient& xfrout_client_; + // Socket session forwarder for dynamic update requests + SocketSessionForwarderHolder ddns_forwarder_; + /// Increment query counter void incCounter(const int protocol); @@ -199,7 +310,8 @@ private: }; AuthSrvImpl::AuthSrvImpl(const bool use_cache, - AbstractXfroutClient& xfrout_client) : + AbstractXfroutClient& xfrout_client, + BaseSocketSessionForwarder& ddns_forwarder) : config_session_(NULL), xfrin_session_(NULL), memory_client_class_(RRClass::IN()), @@ -207,7 +319,8 @@ AuthSrvImpl::AuthSrvImpl(const bool use_cache, counters_(), keyring_(NULL), xfrout_connected_(false), - xfrout_client_(xfrout_client) + xfrout_client_(xfrout_client), + ddns_forwarder_("update", ddns_forwarder) { // cur_datasrc_ is automatically initialized by the default constructor, // effectively being an empty (sqlite) data source. once ccsession is up @@ -277,9 +390,11 @@ private: AuthSrv* server_; }; -AuthSrv::AuthSrv(const bool use_cache, AbstractXfroutClient& xfrout_client) +AuthSrv::AuthSrv(const bool use_cache, + isc::xfr::AbstractXfroutClient& xfrout_client, + isc::util::io::BaseSocketSessionForwarder& ddns_forwarder) { - impl_ = new AuthSrvImpl(use_cache, xfrout_client); + impl_ = new AuthSrvImpl(use_cache, xfrout_client, ddns_forwarder); checkin_ = new ConfigChecker(this); dns_lookup_ = new MessageLookup(this); dns_answer_ = new MessageAnswer(this); @@ -389,34 +504,46 @@ AuthSrv::getConfigSession() const { return (impl_->config_session_); } -AuthSrv::InMemoryClientPtr -AuthSrv::getInMemoryClient(const RRClass& rrclass) { - // XXX: for simplicity, we only support the IN class right now. +isc::datasrc::DataSourceClientContainerPtr +AuthSrv::getInMemoryClientContainer(const RRClass& rrclass) { if (rrclass != impl_->memory_client_class_) { isc_throw(InvalidParameter, "Memory data source is not supported for RR class " << rrclass); } - return (impl_->memory_client_); + return (impl_->memory_client_container_); +} + +isc::datasrc::DataSourceClient* +AuthSrv::getInMemoryClient(const RRClass& rrclass) { + if (hasInMemoryClient()) { + return (&getInMemoryClientContainer(rrclass)->getInstance()); + } else { + return (NULL); + } +} + +bool +AuthSrv::hasInMemoryClient() const { + return (impl_->memory_client_container_); } void AuthSrv::setInMemoryClient(const isc::dns::RRClass& rrclass, - InMemoryClientPtr memory_client) + DataSourceClientContainerPtr memory_client) { - // XXX: see above if (rrclass != impl_->memory_client_class_) { isc_throw(InvalidParameter, "Memory data source is not supported for RR class " << rrclass); - } else if (!impl_->memory_client_ && memory_client) { + } else if (!impl_->memory_client_container_ && memory_client) { LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED) .arg(rrclass); - } else if (impl_->memory_client_ && !memory_client) { + } else if (impl_->memory_client_container_ && !memory_client) { LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED) .arg(rrclass); } - impl_->memory_client_ = memory_client; + impl_->memory_client_container_ = memory_client; } uint32_t @@ -515,16 +642,19 @@ AuthSrv::processMessage(const IOMessage& io_message, Message& message, return; } + const Opcode opcode = message.getOpcode(); bool send_answer = true; try { // update per opcode statistics counter. This can only be reliable // after TSIG check succeeds. impl_->counters_.inc(message.getOpcode()); - if (message.getOpcode() == Opcode::NOTIFY()) { + if (opcode == Opcode::NOTIFY()) { send_answer = impl_->processNotify(io_message, message, buffer, tsig_context); - } else if (message.getOpcode() != Opcode::QUERY()) { + } else if (opcode == Opcode::UPDATE()) { + send_answer = impl_->processUpdate(io_message); + } else if (opcode != Opcode::QUERY()) { LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE) .arg(message.getOpcode().toText()); makeErrorMessage(impl_->renderer_, message, buffer, @@ -534,7 +664,7 @@ AuthSrv::processMessage(const IOMessage& io_message, Message& message, Rcode::FORMERR(), tsig_context); } else { ConstQuestionPtr question = *message.beginQuestion(); - const RRType &qtype = question->getType(); + const RRType& qtype = question->getType(); if (qtype == RRType::AXFR()) { send_answer = impl_->processXfrQuery(io_message, message, buffer, tsig_context); @@ -585,10 +715,12 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, Message& message, // If a memory data source is configured call the separate // Query::process() const ConstQuestionPtr question = *message.beginQuestion(); - if (memory_client_ && memory_client_class_ == question->getClass()) { + if (memory_client_container_ && + memory_client_class_ == question->getClass()) { const RRType& qtype = question->getType(); const Name& qname = question->getName(); - query_.process(*memory_client_, qname, qtype, message, dnssec_ok); + query_.process(memory_client_container_->getInstance(), + qname, qtype, message, dnssec_ok); } else { datasrc::Query query(message, cache_, dnssec_ok); data_sources_.doQuery(query); @@ -740,6 +872,15 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, Message& message, return (true); } +bool +AuthSrvImpl::processUpdate(const IOMessage& io_message) { + // Push the update request to a separate process via the forwarder. + // On successful push, the request shouldn't be responded from b10-auth, + // so we return false. + ddns_forwarder_.push(io_message); + return (false); +} + void AuthSrvImpl::incCounter(const int protocol) { // Increment query counter. diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h index 3be711bd0c..18d750331b 100644 --- a/src/bin/auth/auth_srv.h +++ b/src/bin/auth/auth_srv.h @@ -17,12 +17,9 @@ #include -// For InMemoryClientPtr below. This should be a temporary definition until -// we reorganize the data source framework. -#include - #include #include +#include #include #include #include @@ -40,6 +37,11 @@ #include namespace isc { +namespace util { +namespace io { +class BaseSocketSessionForwarder; +} +} namespace datasrc { class InMemoryClient; } @@ -96,7 +98,8 @@ public: /// but can refer to a local mock object for testing (or other /// experimental) purposes. AuthSrv(const bool use_cache, - isc::xfr::AbstractXfroutClient& xfrout_client); + isc::xfr::AbstractXfroutClient& xfrout_client, + isc::util::io::BaseSocketSessionForwarder& ddns_forwarder); ~AuthSrv(); //@} @@ -235,19 +238,14 @@ public: /// void setXfrinSession(isc::cc::AbstractSession* xfrin_session); - /// A shared pointer type for \c InMemoryClient. - /// - /// This is defined inside the \c AuthSrv class as it's supposed to be - /// a short term interface until we integrate the in-memory and other - /// data source frameworks. - typedef boost::shared_ptr InMemoryClientPtr; - - /// An immutable shared pointer type for \c InMemoryClient. - typedef boost::shared_ptr - ConstInMemoryClientPtr; - /// Returns the in-memory data source configured for the \c AuthSrv, - /// if any. + /// if any, as a pointer. + /// + /// This is mostly a convenience function around + /// \c getInMemoryClientContainer, which saves the caller the step + /// of having to call getInstance(). + /// The pointer is of course only valid as long as the container + /// exists. /// /// The in-memory data source is configured per RR class. However, /// the data source may not be available for all RR classes. @@ -262,24 +260,48 @@ public: /// \param rrclass The RR class of the requested in-memory data source. /// \return A pointer to the in-memory data source, if configured; /// otherwise NULL. - InMemoryClientPtr getInMemoryClient(const isc::dns::RRClass& rrclass); + isc::datasrc::DataSourceClient* getInMemoryClient( + const isc::dns::RRClass& rrclass); + + /// Returns the DataSourceClientContainer of the in-memory datasource + /// + /// \exception InvalidParameter if the given class does not match + /// the one in the memory data source, or if the memory + /// datasource has not been set (callers can check with + /// \c hasMemoryDataSource()) + /// + /// \param rrclass The RR class of the requested in-memory data source. + /// \return A shared pointer to the in-memory data source, if configured; + /// otherwise an empty shared pointer. + isc::datasrc::DataSourceClientContainerPtr getInMemoryClientContainer( + const isc::dns::RRClass& rrclass); + + /// Checks if the in-memory data source has been set. + /// + /// Right now, only one datasource at a time is effectively supported. + /// This is a helper method to check whether it is the in-memory one. + /// This is mostly useful for current testing, and is expected to be + /// removed (or changed in behaviour) soon, when the general + /// multi-data-source framework is completed. + /// + /// \return True if the in-memory datasource has been set. + bool hasInMemoryClient() const; /// Sets or replaces the in-memory data source of the specified RR class. /// - /// As noted in \c getInMemoryClient(), some RR classes may not be - /// supported, in which case an exception of class \c InvalidParameter - /// will be thrown. + /// Some RR classes may not be supported, in which case an exception + /// of class \c InvalidParameter will be thrown. /// This method never throws an exception otherwise. /// /// If there is already an in memory data source configured, it will be /// replaced with the newly specified one. - /// \c memory_datasrc can be NULL, in which case it will (re)disable the - /// in-memory data source. + /// \c memory_client can be an empty shared pointer, in which case it + /// will (re)disable the in-memory data source. /// /// \param rrclass The RR class of the in-memory data source to be set. /// \param memory_client A (shared) pointer to \c InMemoryClient to be set. void setInMemoryClient(const isc::dns::RRClass& rrclass, - InMemoryClientPtr memory_client); + isc::datasrc::DataSourceClientContainerPtr memory_client); /// \brief Set the communication session with Statistics. /// diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8 index a5ef4fbc08..accc214af3 100644 --- a/src/bin/auth/b10-auth.8 +++ b/src/bin/auth/b10-auth.8 @@ -2,12 +2,12 @@ .\" Title: b10-auth .\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: March 28, 2012 +.\" Date: June 20, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "B10\-AUTH" "8" "March 28, 2012" "BIND10" "BIND10" +.TH "B10\-AUTH" "8" "June 20, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -49,7 +49,7 @@ Do not cache answers in memory\&. The default is to use the cache for faster res .PP \fB\-v\fR .RS 4 -Enabled verbose mode\&. This enables diagnostic messages to STDERR\&. +Enable verbose logging mode\&. This enables logging of diagnostic messages at the maximum debug level\&. .RE .SH "CONFIGURATION AND COMMANDS" .PP @@ -72,9 +72,21 @@ to optionally select the class (it defaults to \fIzones\fR to define the \fIfile\fR -path name and the +path name, \fIorigin\fR -(default domain)\&. By default, this is empty\&. +(default domain), and optional +\fIfiletype\fR\&. By default, +\fIzones\fR +is empty\&. For the in\-memory data source (i\&.e\&., the +\fItype\fR +is +\(lqmemory\(rq), the optional +\fIfiletype\fR +configuration item for +\fIzones\fR +can be specified so the in\-memory zone data can be built from another data source that is based on a database backend (in practice with current implementation, it would be an SQLite3 database file for the SQLite3 data source)\&. See the +BIND 10 Guide +for configuration details\&. .if n \{\ .sp .\} @@ -88,7 +100,7 @@ path name and the .ps -1 .br .sp -In this development version, currently this is only used for the memory data source\&. Only the IN class is supported at this time\&. By default, the memory data source is disabled\&. Also, currently the zone file must be canonical such as generated by \fBnamed\-compilezone \-D\fR\&. +Only the IN class is supported at this time\&. By default, the memory data source is disabled\&. Also, currently the zone file must be canonical such as generated by \fBnamed\-compilezone \-D\fR\&. .sp .5v .RE .PP diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml index 7f3a492dba..44c036f040 100644 --- a/src/bin/auth/b10-auth.xml +++ b/src/bin/auth/b10-auth.xml @@ -20,7 +20,7 @@ - March 28, 2012 + June 20, 2012 @@ -94,8 +94,8 @@ - Enabled verbose mode. This enables diagnostic messages to - STDERR. + Enable verbose logging mode. This enables logging of + diagnostic messages at the maximum debug level. @@ -125,14 +125,21 @@ (it defaults to IN); and zones to define the - file path name and the - origin (default domain). - - By default, this is empty. + file path name, + origin (default domain), and optional + filetype. + By default, zones is empty. + For the in-memory data source (i.e., the type + is memory), the optional filetype + configuration item for zones can be + specified so the in-memory zone data can be built from another + data source that is based on a database backend (in practice + with current implementation, it would be an SQLite3 database + file for the SQLite3 data source). + See the BIND 10 Guide for configuration + details. - In this development version, currently this is only used for the - memory data source. Only the IN class is supported at this time. By default, the memory data source is disabled. Also, currently the zone file must be canonical such as diff --git a/src/bin/auth/benchmarks/query_bench.cc b/src/bin/auth/benchmarks/query_bench.cc index aa238c0886..2e705e4f2b 100644 --- a/src/bin/auth/benchmarks/query_bench.cc +++ b/src/bin/auth/benchmarks/query_bench.cc @@ -31,9 +31,10 @@ #include #include - #include +#include + #include #include #include @@ -48,6 +49,7 @@ using namespace isc::auth; using namespace isc::dns; using namespace isc::log; using namespace isc::util; +using namespace isc::util::unittests; using namespace isc::xfr; using namespace isc::bench; using namespace isc::asiodns; @@ -78,7 +80,7 @@ protected: QueryBenchMark(const bool enable_cache, const BenchQueries& queries, Message& query_message, OutputBuffer& buffer) : - server_(new AuthSrv(enable_cache, xfrout_client)), + server_(new AuthSrv(enable_cache, xfrout_client, ddns_forwarder)), queries_(queries), query_message_(query_message), buffer_(buffer), @@ -103,6 +105,8 @@ public: return (queries_.size()); } +private: + MockSocketSessionForwarder ddns_forwarder; protected: AuthSrvPtr server_; private: diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc index 055c73a362..750ea28309 100644 --- a/src/bin/auth/command.cc +++ b/src/bin/auth/command.cc @@ -210,8 +210,8 @@ private: const RRClass zone_class = class_elem ? RRClass(class_elem->stringValue()) : RRClass::IN(); - AuthSrv::InMemoryClientPtr datasrc(server. - getInMemoryClient(zone_class)); + isc::datasrc::DataSourceClient* datasrc( + server.getInMemoryClient(zone_class)); if (datasrc == NULL) { isc_throw(AuthCommandError, "Memory data source is disabled"); } @@ -223,13 +223,16 @@ private: const Name origin = Name(origin_elem->stringValue()); // Get the current zone - const InMemoryClient::FindResult result = datasrc->findZone(origin); + const DataSourceClient::FindResult result = datasrc->findZone(origin); if (result.code != result::SUCCESS) { isc_throw(AuthCommandError, "Zone " << origin << " is not found in data source"); } - old_zone_finder_ = boost::dynamic_pointer_cast( + // It would appear that dynamic_cast does not work on all systems; + // it seems to confuse the RTTI system, resulting in NULL return + // values. So we use the more dangerous static_pointer_cast here. + old_zone_finder_ = boost::static_pointer_cast( result.zone_finder); return (true); diff --git a/src/bin/auth/common.cc b/src/bin/auth/common.cc index 1602a1a3a4..2c21895b4a 100644 --- a/src/bin/auth/common.cc +++ b/src/bin/auth/common.cc @@ -33,7 +33,25 @@ getXfroutSocketPath() { if (getenv("BIND10_XFROUT_SOCKET_FILE") != NULL) { return (getenv("BIND10_XFROUT_SOCKET_FILE")); } else { - return (UNIX_SOCKET_FILE); + return (UNIX_XFROUT_SOCKET_FILE); + } + } +} + +string +getDDNSSocketPath() { + if (getenv("B10_FROM_BUILD") != NULL) { + if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR") != NULL) { + return (string(getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) + + "/ddns_socket"); + } else { + return (string(getenv("B10_FROM_BUILD")) + "/ddns_socket"); + } + } else { + if (getenv("BIND10_DDNS_SOCKET_FILE") != NULL) { + return (getenv("BIND10_DDNS_SOCKET_FILE")); + } else { + return (UNIX_DDNS_SOCKET_FILE); } } } diff --git a/src/bin/auth/common.h b/src/bin/auth/common.h index cf71214a5c..9a1942c924 100644 --- a/src/bin/auth/common.h +++ b/src/bin/auth/common.h @@ -38,6 +38,20 @@ public: /// The logic should be the same as in b10-xfrout, so they find each other. std::string getXfroutSocketPath(); +/// \brief Get the path of socket to talk to ddns +/// +/// It takes some environment variables into account (B10_FROM_BUILD, +/// B10_FROM_SOURCE_LOCALSTATEDIR and BIND10_DDNS_SOCKET_FILE). It +/// also considers the installation prefix. +/// +/// The logic should be the same as in b10-ddns, so they find each other. +/// +/// Note: eventually we should find a better way so that we don't have to +/// repeat the same magic value (and how to tweak it with some magic +/// environment variable) twice, at which point this function may be able +/// to be deprecated. +std::string getDDNSSocketPath(); + /// \brief The name used when identifieng the process /// /// This is currently b10-auth, but it can be changed easily in one place. diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc index fc2f7512dc..cf4f52e492 100644 --- a/src/bin/auth/main.cc +++ b/src/bin/auth/main.cc @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -60,6 +61,7 @@ using namespace isc::data; using namespace isc::dns; using namespace isc::log; using namespace isc::util; +using namespace isc::util::io; using namespace isc::xfr; namespace { @@ -85,7 +87,7 @@ usage() { cerr << "Usage: b10-auth [-u user] [-nv]" << endl; cerr << "\t-n: do not cache answers in memory" << endl; - cerr << "\t-v: verbose output" << endl; + cerr << "\t-v: verbose logging (debug-level)" << endl; exit(1); } @@ -130,6 +132,7 @@ main(int argc, char* argv[]) { bool statistics_session_established = false; // XXX (see Trac #287) ModuleCCSession* config_session = NULL; XfroutClient xfrout_client(getXfroutSocketPath()); + SocketSessionForwarder ddns_forwarder(getDDNSSocketPath()); try { string specfile; if (getenv("B10_FROM_BUILD")) { @@ -139,7 +142,7 @@ main(int argc, char* argv[]) { specfile = string(AUTH_SPECFILE_LOCATION); } - auth_server = new AuthSrv(cache, xfrout_client); + auth_server = new AuthSrv(cache, xfrout_client, ddns_forwarder); LOG_INFO(auth_logger, AUTH_SERVER_CREATED); SimpleCallback* checkin = auth_server->getCheckinProvider(); diff --git a/src/bin/auth/spec_config.h.pre.in b/src/bin/auth/spec_config.h.pre.in index 1b1df19b0e..586ea7c984 100644 --- a/src/bin/auth/spec_config.h.pre.in +++ b/src/bin/auth/spec_config.h.pre.in @@ -13,4 +13,5 @@ // PERFORMANCE OF THIS SOFTWARE. #define AUTH_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/auth.spec" -#define UNIX_SOCKET_FILE "@@LOCALSTATEDIR@@/@PACKAGE@/auth_xfrout_conn" +#define UNIX_XFROUT_SOCKET_FILE "@@LOCALSTATEDIR@@/@PACKAGE@/auth_xfrout_conn" +#define UNIX_DDNS_SOCKET_FILE "@@LOCALSTATEDIR@@/@PACKAGE@/ddns_socket" diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am index f9fac2fdac..84fd5fae9c 100644 --- a/src/bin/auth/tests/Makefile.am +++ b/src/bin/auth/tests/Makefile.am @@ -19,6 +19,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + # Do not define global tests, use check-local so # environment can be set (needed for dynamic loading) TESTS = @@ -44,9 +47,9 @@ run_unittests_SOURCES += statistics_unittest.cc run_unittests_SOURCES += run_unittests.cc # This is a temporary workaround for #1206, where the InMemoryClient has been # moved to an ldopened library. We could add that library to LDADD, but that -# is nonportable. When #1207 is done this becomes moot anyway, and the -# specific workaround is not needed anymore, so we can then remove this -# line again. +# is nonportable. This should've been moot after #1207, but there is still +# one dependency; the in-memory-specific zone loader call is still in +# auth. run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc @@ -54,9 +57,7 @@ nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) -run_unittests_LDADD = $(GTEST_LDADD) -run_unittests_LDADD += $(SQLITE_LIBS) -run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la +run_unittests_LDADD = $(top_builddir)/src/lib/testutils/libtestutils.la run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la @@ -71,6 +72,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la +run_unittests_LDADD += $(GTEST_LDADD) +run_unittests_LDADD += $(SQLITE_LIBS) check-local: B10_FROM_BUILD=${abs_top_builddir} ./run_unittests diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc index a8815da971..0a41f5d574 100644 --- a/src/bin/auth/tests/auth_srv_unittest.cc +++ b/src/bin/auth/tests/auth_srv_unittest.cc @@ -14,11 +14,7 @@ #include -#include - -#include - -#include +#include #include #include @@ -38,6 +34,7 @@ #include #include +#include #include #include #include @@ -45,10 +42,24 @@ #include #include +#include + +#include +#include +#include + +#include + +#include +#include +#include + using namespace std; using namespace isc::cc; using namespace isc::dns; using namespace isc::util; +using namespace isc::util::io::internal; +using namespace isc::util::unittests; using namespace isc::dns::rdata; using namespace isc::data; using namespace isc::xfr; @@ -57,6 +68,7 @@ using namespace isc::asiolink; using namespace isc::testutils; using namespace isc::server_common::portconfig; using isc::UnitTestUtil; +using boost::scoped_ptr; namespace { const char* const CONFIG_TESTDB = @@ -77,7 +89,7 @@ class AuthSrvTest : public SrvTestBase { protected: AuthSrvTest() : dnss_(), - server(true, xfrout), + server(true, xfrout, ddns_forwarder), rrclass(RRClass::IN()), // The empty string is expected value of the parameter of // requestSocket, not the app_name (there's no fallback, it checks @@ -89,6 +101,13 @@ protected: server.setStatisticsSession(&statistics_session); } + ~AuthSrvTest() { + // Clear the message now; depending on the RTTI implementation, + // type information may be lost if the message is cleared + // automatically later, so as a precaution we do it now. + parse_message->clear(Message::PARSE); + } + virtual void processMessage() { // If processMessage has been called before, parse_message needs // to be reset. If it hasn't, there's no harm in doing so @@ -136,9 +155,30 @@ protected: opcode.getCode(), QR_FLAG, 1, 0, 0, 0); } + // Convenient shortcut of creating a simple request and having the + // server process it. + void createAndSendRequest(RRType req_type, Opcode opcode = Opcode::QUERY(), + const Name& req_name = Name("example.com"), + RRClass req_class = RRClass::IN(), + int protocol = IPPROTO_UDP, + const char* const remote_address = + DEFAULT_REMOTE_ADDRESS, + uint16_t remote_port = DEFAULT_REMOTE_PORT) + { + UnitTestUtil::createRequestMessage(request_message, opcode, + default_qid, req_name, + req_class, req_type); + createRequestPacket(request_message, protocol, NULL, + remote_address, remote_port); + parse_message->clear(Message::PARSE); + server.processMessage(*io_message, *parse_message, *response_obuffer, + &dnsserv); + } + MockDNSService dnss_; MockSession statistics_session; MockXfroutClient xfrout; + MockSocketSessionForwarder ddns_forwarder; AuthSrv server; const RRClass rrclass; vector response_data; @@ -246,8 +286,8 @@ TEST_F(AuthSrvTest, iqueryViaDNSServer) { // Unsupported requests. Should result in NOTIMP. TEST_F(AuthSrvTest, unsupportedRequest) { unsupportedRequest(); - // unsupportedRequest tries 14 different opcodes - checkAllRcodeCountersZeroExcept(Rcode::NOTIMP(), 14); + // unsupportedRequest tries 13 different opcodes + checkAllRcodeCountersZeroExcept(Rcode::NOTIMP(), 13); } // Multiple questions. Should result in FORMERR. @@ -830,16 +870,23 @@ TEST_F(AuthSrvTest, updateConfigFail) { QR_FLAG | AA_FLAG, 1, 1, 1, 0); } -TEST_F(AuthSrvTest, updateWithInMemoryClient) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_updateWithInMemoryClient +#else + updateWithInMemoryClient +#endif + ) +{ // Test configuring memory data source. Detailed test cases are covered // in the configuration tests. We only check the AuthSrv interface here. // By default memory data source isn't enabled - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); updateConfig(&server, "{\"datasources\": [{\"type\": \"memory\"}]}", true); // after successful configuration, we should have one (with empty zoneset). - ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_TRUE(server.hasInMemoryClient()); EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount()); // The memory data source is empty, should return REFUSED rcode. @@ -851,13 +898,20 @@ TEST_F(AuthSrvTest, updateWithInMemoryClient) { opcode.getCode(), QR_FLAG, 1, 0, 0, 0); } -TEST_F(AuthSrvTest, queryWithInMemoryClientNoDNSSEC) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithInMemoryClientNoDNSSEC +#else + queryWithInMemoryClientNoDNSSEC +#endif + ) +{ // In this example, we do simple check that query is handled from the // query handler class, and confirm it returns no error and a non empty // answer section. Detailed examination on the response content // for various types of queries are tested in the query tests. updateConfig(&server, CONFIG_INMEMORY_EXAMPLE, true); - ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_TRUE(server.hasInMemoryClient()); EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount()); createDataFromFile("nsec3query_nodnssec_fromWire.wire"); @@ -869,12 +923,19 @@ TEST_F(AuthSrvTest, queryWithInMemoryClientNoDNSSEC) { opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 2, 1); } -TEST_F(AuthSrvTest, queryWithInMemoryClientDNSSEC) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithInMemoryClientDNSSEC +#else + queryWithInMemoryClientDNSSEC +#endif + ) +{ // Similar to the previous test, but the query has the DO bit on. // The response should contain RRSIGs, and should have more RRs than // the previous case. updateConfig(&server, CONFIG_INMEMORY_EXAMPLE, true); - ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_TRUE(server.hasInMemoryClient()); EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount()); createDataFromFile("nsec3query_fromWire.wire"); @@ -886,7 +947,14 @@ TEST_F(AuthSrvTest, queryWithInMemoryClientDNSSEC) { opcode.getCode(), QR_FLAG | AA_FLAG, 1, 2, 3, 3); } -TEST_F(AuthSrvTest, chQueryWithInMemoryClient) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_chQueryWithInMemoryClient +#else + chQueryWithInMemoryClient +#endif + ) +{ // Configure memory data source for class IN updateConfig(&server, "{\"datasources\": " "[{\"class\": \"IN\", \"type\": \"memory\"}]}", true); @@ -1108,7 +1176,7 @@ TEST_F(AuthSrvTest, processNormalQuery_reuseRenderer2) { // namespace { -/// A the possible methods to throw in, either in FakeInMemoryClient or +/// The possible methods to throw in, either in FakeClient or /// FakeZoneFinder enum ThrowWhen { THROW_NEVER, @@ -1132,10 +1200,10 @@ checkThrow(ThrowWhen method, ThrowWhen throw_at, bool isc_exception) { } } -/// \brief proxy class for the ZoneFinder returned by the InMemoryClient -/// proxied by FakeInMemoryClient +/// \brief proxy class for the ZoneFinder returned by the Client +/// proxied by FakeClient /// -/// See the documentation for FakeInMemoryClient for more information, +/// See the documentation for FakeClient for more information, /// all methods simply check whether they should throw, and if not, call /// their proxied equivalent. class FakeZoneFinder : public isc::datasrc::ZoneFinder { @@ -1196,11 +1264,6 @@ public: return (real_zone_finder_->findNSEC3(name, recursive)); } - virtual isc::dns::Name - findPreviousName(const isc::dns::Name& query) const { - return (real_zone_finder_->findPreviousName(query)); - } - private: isc::datasrc::ZoneFinderPtr real_zone_finder_; ThrowWhen throw_when_; @@ -1208,15 +1271,15 @@ private: ConstRRsetPtr fake_rrset_; }; -/// \brief Proxy InMemoryClient that can throw exceptions at specified times +/// \brief Proxy FakeClient that can throw exceptions at specified times /// -/// It is based on the memory client since that one is easy to override -/// (with setInMemoryClient) with the current design of AuthSrv. -class FakeInMemoryClient : public isc::datasrc::InMemoryClient { +/// Currently it is used as an 'InMemoryClient' using setInMemoryClient, +/// but it is in effect a general datasource client. +class FakeClient : public isc::datasrc::DataSourceClient { public: /// \brief Create a proxy memory client /// - /// \param real_client The real in-memory client to proxy + /// \param real_client The real (in-memory) client to proxy /// \param throw_when if set to any value other than never, that is /// the method that will throw an exception (either in this /// class or the related FakeZoneFinder) @@ -1224,10 +1287,10 @@ public: /// throw std::exception /// \param fake_rrset If non NULL, it will be used as an answer to /// find() for that name and type. - FakeInMemoryClient(AuthSrv::InMemoryClientPtr real_client, - ThrowWhen throw_when, bool isc_exception, - ConstRRsetPtr fake_rrset = ConstRRsetPtr()) : - real_client_(real_client), + FakeClient(isc::datasrc::DataSourceClientContainerPtr real_client, + ThrowWhen throw_when, bool isc_exception, + ConstRRsetPtr fake_rrset = ConstRRsetPtr()) : + real_client_ptr_(real_client), throw_when_(throw_when), isc_exception_(isc_exception), fake_rrset_(fake_rrset) @@ -1242,7 +1305,8 @@ public: virtual FindResult findZone(const isc::dns::Name& name) const { checkThrow(THROW_AT_FIND_ZONE, throw_when_, isc_exception_); - const FindResult result = real_client_->findZone(name); + const FindResult result = + real_client_ptr_->getInstance().findZone(name); return (FindResult(result.code, isc::datasrc::ZoneFinderPtr( new FakeZoneFinder(result.zone_finder, throw_when_, @@ -1250,28 +1314,74 @@ public: fake_rrset_)))); } + isc::datasrc::ZoneUpdaterPtr + getUpdater(const isc::dns::Name&, bool, bool) const { + isc_throw(isc::NotImplemented, + "Update attempt on in fake data source"); + } + std::pair + getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const { + isc_throw(isc::NotImplemented, "Journaling isn't supported for " + "fake data source"); + } private: - AuthSrv::InMemoryClientPtr real_client_; + const isc::datasrc::DataSourceClientContainerPtr real_client_ptr_; ThrowWhen throw_when_; bool isc_exception_; ConstRRsetPtr fake_rrset_; }; +class FakeContainer : public isc::datasrc::DataSourceClientContainer { +public: + /// \brief Creates a fake container for the given in-memory client + /// + /// The initializer creates a fresh instance of a memory datasource, + /// which is ignored for the rest (but we do not allow 'null' containers + /// atm, and this is only needed in these tests, this may be changed + /// if we generalize the container class a bit more) + /// + /// It will also create a FakeClient, with the given arguments, which + /// is actually used when the instance is requested. + FakeContainer(isc::datasrc::DataSourceClientContainerPtr real_client, + ThrowWhen throw_when, bool isc_exception, + ConstRRsetPtr fake_rrset = ConstRRsetPtr()) : + DataSourceClientContainer("memory", + Element::fromJSON("{\"type\": \"memory\"}")), + client_(new FakeClient(real_client, throw_when, isc_exception, + fake_rrset)) + {} + + isc::datasrc::DataSourceClient& getInstance() { + return (*client_); + } + +private: + const boost::scoped_ptr client_; +}; + } // end anonymous namespace for throwing proxy classes // Test for the tests // // Set the proxies to never throw, this should have the same result as // queryWithInMemoryClientNoDNSSEC, and serves to test the two proxy classes -TEST_F(AuthSrvTest, queryWithInMemoryClientProxy) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithInMemoryClientProxy +#else + queryWithInMemoryClientProxy +#endif + ) +{ // Set real inmem client to proxy updateConfig(&server, CONFIG_INMEMORY_EXAMPLE, true); + EXPECT_TRUE(server.hasInMemoryClient()); - AuthSrv::InMemoryClientPtr fake_client( - new FakeInMemoryClient(server.getInMemoryClient(rrclass), - THROW_NEVER, false)); - ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); - server.setInMemoryClient(rrclass, fake_client); + isc::datasrc::DataSourceClientContainerPtr fake_client_container( + new FakeContainer(server.getInMemoryClientContainer(rrclass), + THROW_NEVER, false)); + server.setInMemoryClient(rrclass, fake_client_container); createDataFromFile("nsec3query_nodnssec_fromWire.wire"); server.processMessage(*io_message, *parse_message, *response_obuffer, @@ -1297,17 +1407,23 @@ setupThrow(AuthSrv* server, const char *config, ThrowWhen throw_when, // Set it to throw on findZone(), this should result in // SERVFAIL on any exception - AuthSrv::InMemoryClientPtr fake_client( - new FakeInMemoryClient( - server->getInMemoryClient(isc::dns::RRClass::IN()), + isc::datasrc::DataSourceClientContainerPtr fake_client_container( + new FakeContainer( + server->getInMemoryClientContainer(isc::dns::RRClass::IN()), throw_when, isc_exception, rrset)); - ASSERT_NE(AuthSrv::InMemoryClientPtr(), - server->getInMemoryClient(isc::dns::RRClass::IN())); - server->setInMemoryClient(isc::dns::RRClass::IN(), fake_client); + ASSERT_TRUE(server->hasInMemoryClient()); + server->setInMemoryClient(isc::dns::RRClass::IN(), fake_client_container); } -TEST_F(AuthSrvTest, queryWithThrowingProxyServfails) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithThrowingProxyServfails +#else + queryWithThrowingProxyServfails +#endif + ) +{ // Test the common cases, all of which should simply return SERVFAIL // Use THROW_NEVER as end marker ThrowWhen throws[] = { THROW_AT_FIND_ZONE, @@ -1331,7 +1447,14 @@ TEST_F(AuthSrvTest, queryWithThrowingProxyServfails) { // Throw isc::Exception in getClass(). (Currently?) getClass is not called // in the processMessage path, so this should result in a normal answer -TEST_F(AuthSrvTest, queryWithInMemoryClientProxyGetClass) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithInMemoryClientProxyGetClass +#else + queryWithInMemoryClientProxyGetClass +#endif + ) +{ createDataFromFile("nsec3query_nodnssec_fromWire.wire"); setupThrow(&server, CONFIG_INMEMORY_EXAMPLE, THROW_AT_GET_CLASS, true); @@ -1344,7 +1467,14 @@ TEST_F(AuthSrvTest, queryWithInMemoryClientProxyGetClass) { opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 2, 1); } -TEST_F(AuthSrvTest, queryWithThrowingInToWire) { +TEST_F(AuthSrvTest, +#ifdef USE_STATIC_LINK + DISABLED_queryWithThrowingInToWire +#else + queryWithThrowingInToWire +#endif + ) +{ // Set up a faked data source. It will return an empty RRset for the // query. ConstRRsetPtr empty_rrset(new RRset(Name("foo.example"), @@ -1385,4 +1515,128 @@ TEST_F(AuthSrvTest, queryWithThrowingInToWire) { opcode.getCode(), QR_FLAG, 1, 0, 0, 0); } +// +// DDNS related tests +// + +// Helper subroutine to check if the given socket address has the expected +// address and port. It depends on specific output of getnameinfo() (while +// there can be multiple textual representation of the same address) but +// in practice it should be reliable. +void +checkAddrPort(const struct sockaddr& actual_sa, + const string& expected_addr, uint16_t expected_port) +{ + char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV]; + const int error = getnameinfo(&actual_sa, getSALength(actual_sa), hbuf, + sizeof(hbuf), sbuf, sizeof(sbuf), + NI_NUMERICHOST | NI_NUMERICSERV); + if (error != 0) { + isc_throw(isc::Unexpected, "getnameinfo failed: " << + gai_strerror(error)); + } + EXPECT_EQ(expected_addr, hbuf); + EXPECT_EQ(boost::lexical_cast(expected_port), sbuf); +} + +TEST_F(AuthSrvTest, DDNSForward) { + EXPECT_FALSE(ddns_forwarder.isConnected()); + + // Repeat sending an update request 4 times, differing some network + // parameters: UDP/IPv4, TCP/IPv4, UDP/IPv6, TCP/IPv6, in this order. + // By doing that we can also confirm the forwarder connection will be + // established exactly once, and kept established. + for (size_t i = 0; i < 4; ++i) { + // Use different names for some different cases + const Name zone_name = Name(i < 2 ? "example.com" : "example.org"); + const socklen_t family = (i < 2) ? AF_INET : AF_INET6; + const char* const remote_addr = + (family == AF_INET) ? "192.0.2.1" : "2001:db8::1"; + const uint16_t remote_port = + (family == AF_INET) ? 53214 : 53216; + const int protocol = ((i % 2) == 0) ? IPPROTO_UDP : IPPROTO_TCP; + + createAndSendRequest(RRType::SOA(), Opcode::UPDATE(), zone_name, + RRClass::IN(), protocol, remote_addr, + remote_port); + EXPECT_FALSE(dnsserv.hasAnswer()); + EXPECT_TRUE(ddns_forwarder.isConnected()); + + // Examine the pushed data (note: currently "local end" has a dummy + // value equal to remote) + EXPECT_EQ(family, ddns_forwarder.getPushedFamily()); + const int expected_type = + (protocol == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM; + EXPECT_EQ(expected_type, ddns_forwarder.getPushedType()); + EXPECT_EQ(protocol, ddns_forwarder.getPushedProtocol()); + checkAddrPort(ddns_forwarder.getPushedRemoteend(), + remote_addr, remote_port); + checkAddrPort(ddns_forwarder.getPushedLocalend(), + remote_addr, remote_port); + EXPECT_EQ(io_message->getDataSize(), + ddns_forwarder.getPushedData().size()); + EXPECT_EQ(0, memcmp(io_message->getData(), + &ddns_forwarder.getPushedData()[0], + ddns_forwarder.getPushedData().size())); + } +} + +TEST_F(AuthSrvTest, DDNSForwardConnectFail) { + // make connect attempt fail. It should result in SERVFAIL. Note that + // the question (zone) section should be cleared for opcode of update. + ddns_forwarder.disableConnect(); + createAndSendRequest(RRType::SOA(), Opcode::UPDATE()); + EXPECT_TRUE(dnsserv.hasAnswer()); + headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(), + Opcode::UPDATE().getCode(), QR_FLAG, 0, 0, 0, 0); + EXPECT_FALSE(ddns_forwarder.isConnected()); + + // Now make connect okay again. Despite the previous failure the new + // connection should now be established. + ddns_forwarder.enableConnect(); + createAndSendRequest(RRType::SOA(), Opcode::UPDATE()); + EXPECT_FALSE(dnsserv.hasAnswer()); + EXPECT_TRUE(ddns_forwarder.isConnected()); +} + +TEST_F(AuthSrvTest, DDNSForwardPushFail) { + // Make first request succeed, which will establish the connection. + EXPECT_FALSE(ddns_forwarder.isConnected()); + createAndSendRequest(RRType::SOA(), Opcode::UPDATE()); + EXPECT_TRUE(ddns_forwarder.isConnected()); + + // make connect attempt fail. It should result in SERVFAIL. The + // connection should be closed. Use IPv6 address for varying log output. + ddns_forwarder.disablePush(); + createAndSendRequest(RRType::SOA(), Opcode::UPDATE(), Name("example.com"), + RRClass::IN(), IPPROTO_UDP, "2001:db8::2"); + EXPECT_TRUE(dnsserv.hasAnswer()); + headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(), + Opcode::UPDATE().getCode(), QR_FLAG, 0, 0, 0, 0); + EXPECT_FALSE(ddns_forwarder.isConnected()); + + // Allow push again. Connection will be reopened, and the request will + // be forwarded successfully. + ddns_forwarder.enablePush(); + createAndSendRequest(RRType::SOA(), Opcode::UPDATE()); + EXPECT_FALSE(dnsserv.hasAnswer()); + EXPECT_TRUE(ddns_forwarder.isConnected()); +} + +TEST_F(AuthSrvTest, DDNSForwardClose) { + scoped_ptr tmp_server(new AuthSrv(true, xfrout, ddns_forwarder)); + UnitTestUtil::createRequestMessage(request_message, Opcode::UPDATE(), + default_qid, Name("example.com"), + RRClass::IN(), RRType::SOA()); + createRequestPacket(request_message, IPPROTO_UDP); + tmp_server->processMessage(*io_message, *parse_message, *response_obuffer, + &dnsserv); + EXPECT_FALSE(dnsserv.hasAnswer()); + EXPECT_TRUE(ddns_forwarder.isConnected()); + + // Destroy the server. The forwarder should close the connection. + tmp_server.reset(); + EXPECT_FALSE(ddns_forwarder.isConnected()); +} + } diff --git a/src/bin/auth/tests/command_unittest.cc b/src/bin/auth/tests/command_unittest.cc index bcaf4b115c..ec00d11bec 100644 --- a/src/bin/auth/tests/command_unittest.cc +++ b/src/bin/auth/tests/command_unittest.cc @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -32,6 +33,7 @@ #include +#include #include #include @@ -51,6 +53,7 @@ using namespace isc::dns; using namespace isc::data; using namespace isc::datasrc; using namespace isc::config; +using namespace isc::util::unittests; using namespace isc::testutils; using namespace isc::auth::unittest; @@ -59,7 +62,7 @@ namespace { class AuthCommandTest : public ::testing::Test { protected: AuthCommandTest() : - server_(false, xfrout_), + server_(false, xfrout_, ddns_forwarder_), rcode_(-1), expect_rcode_(0), itimer_(server_.getIOService()) @@ -68,10 +71,11 @@ protected: } void checkAnswer(const int expected_code) { parseAnswer(rcode_, result_); - EXPECT_EQ(expected_code, rcode_); + EXPECT_EQ(expected_code, rcode_) << result_->str(); } MockSession statistics_session_; MockXfroutClient xfrout_; + MockSocketSessionForwarder ddns_forwarder_; AuthSrv server_; ConstElementPtr result_; // The shutdown command parameter @@ -233,7 +237,14 @@ newZoneChecks(AuthSrv& server) { find(Name("ns.test2.example"), RRType::AAAA())->code); } -TEST_F(AuthCommandTest, loadZone) { +TEST_F(AuthCommandTest, +#ifdef USE_STATIC_LINK + DISABLED_loadZone +#else + loadZone +#endif + ) +{ configureZones(server_); ASSERT_EQ(0, system(INSTALL_PROG " -c " TEST_DATA_DIR @@ -250,8 +261,6 @@ TEST_F(AuthCommandTest, loadZone) { newZoneChecks(server_); } -// This test uses dynamic load of a data source module, and won't work when -// statically linked. TEST_F(AuthCommandTest, #ifdef USE_STATIC_LINK DISABLED_loadZoneSQLite3 @@ -289,36 +298,57 @@ TEST_F(AuthCommandTest, " }" " ]" " }" - "]}")); + "]," + " \"database_file\": \"" + test_db + "\"" + "}")); module_session.setLocalConfig(map); server_.setConfigSession(&module_session); - // The loadzone command needs the zone to be already loaded, because - // it is used for reloading only - AuthSrv::InMemoryClientPtr dsrc(new InMemoryClient()); - dsrc->addZone(ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(), - Name("example.org")))); - server_.setInMemoryClient(RRClass::IN(), dsrc); + server_.updateConfig(map); + + // Check that the A record at www.example.org does not exist + ASSERT_TRUE(server_.hasInMemoryClient()); + EXPECT_EQ(ZoneFinder::NXDOMAIN, server_.getInMemoryClient(RRClass::IN())-> + findZone(Name("example.org")).zone_finder-> + find(Name("www.example.org"), RRType::A())->code); + + // Add the record to the underlying sqlite database, by loading + // it as a separate datasource, and updating it + ConstElementPtr sql_cfg = Element::fromJSON("{ \"type\": \"sqlite3\"," + "\"database_file\": \"" + + test_db + "\"}"); + DataSourceClientContainer sql_ds("sqlite3", sql_cfg); + ZoneUpdaterPtr sql_updater = + sql_ds.getInstance().getUpdater(Name("example.org"), false); + RRsetPtr rrset(new RRset(Name("www.example.org."), RRClass::IN(), + RRType::A(), RRTTL(60))); + rrset->addRdata(rdata::createRdata(rrset->getType(), + rrset->getClass(), + "192.0.2.1")); + sql_updater->addRRset(*rrset); + sql_updater->commit(); + + // This new record is in the database now, but should not be in the + // memory-datasource yet, so check again + EXPECT_EQ(ZoneFinder::NXDOMAIN, server_.getInMemoryClient(RRClass::IN())-> + findZone(Name("example.org")).zone_finder-> + find(Name("www.example.org"), RRType::A())->code); // Now send the command to reload it result_ = execAuthServerCommand(server_, "loadzone", - Element::fromJSON("{\"origin\": \"example.org\"}")); + Element::fromJSON( + "{\"origin\": \"example.org\"}")); checkAnswer(0); - // Get the zone and look if there are data in it (the original one was - // empty) - ASSERT_TRUE(server_.getInMemoryClient(RRClass::IN())); + // And now it should be present too. EXPECT_EQ(ZoneFinder::SUCCESS, server_.getInMemoryClient(RRClass::IN())-> findZone(Name("example.org")).zone_finder-> - find(Name("example.org"), RRType::SOA())->code); + find(Name("www.example.org"), RRType::A())->code); - // Some error cases. First, the zone has no configuration. - dsrc->addZone(ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(), - Name("example.com")))); + // Some error cases. First, the zone has no configuration. (note .com here) result_ = execAuthServerCommand(server_, "loadzone", Element::fromJSON("{\"origin\": \"example.com\"}")); checkAnswer(1); - // The previous zone is not hurt in any way EXPECT_EQ(ZoneFinder::SUCCESS, server_.getInMemoryClient(RRClass::IN())-> findZone(Name("example.org")).zone_finder-> @@ -326,7 +356,8 @@ TEST_F(AuthCommandTest, module_session.setLocalConfig(Element::fromJSON("{\"datasources\": []}")); result_ = execAuthServerCommand(server_, "loadzone", - Element::fromJSON("{\"origin\": \"example.org\"}")); + Element::fromJSON( + "{\"origin\": \"example.org\"}")); checkAnswer(1); // The previous zone is not hurt in any way @@ -373,7 +404,14 @@ TEST_F(AuthCommandTest, find(Name("example.org"), RRType::SOA())->code); } -TEST_F(AuthCommandTest, loadBrokenZone) { +TEST_F(AuthCommandTest, +#ifdef USE_STATIC_LINK + DISABLED_loadBrokenZone +#else + loadBrokenZone +#endif + ) +{ configureZones(server_); ASSERT_EQ(0, system(INSTALL_PROG " -c " TEST_DATA_DIR @@ -386,7 +424,14 @@ TEST_F(AuthCommandTest, loadBrokenZone) { zoneChecks(server_); // zone shouldn't be replaced } -TEST_F(AuthCommandTest, loadUnreadableZone) { +TEST_F(AuthCommandTest, +#ifdef USE_STATIC_LINK + DISABLED_loadUnreadableZone +#else + loadUnreadableZone +#endif + ) +{ configureZones(server_); // install the zone file as unreadable @@ -419,7 +464,14 @@ TEST_F(AuthCommandTest, loadSqlite3DataSrc) { checkAnswer(0); } -TEST_F(AuthCommandTest, loadZoneInvalidParams) { +TEST_F(AuthCommandTest, +#ifdef USE_STATIC_LINK + DISABLED_loadZoneInvalidParams +#else + loadZoneInvalidParams +#endif + ) +{ configureZones(server_); // null arg diff --git a/src/bin/auth/tests/common_unittest.cc b/src/bin/auth/tests/common_unittest.cc index 184988d179..b2d072af32 100644 --- a/src/bin/auth/tests/common_unittest.cc +++ b/src/bin/auth/tests/common_unittest.cc @@ -60,37 +60,63 @@ protected: EXPECT_EQ(0, setenv(name.c_str(), value.c_str(), 1)); } } - // Test getXfroutSocketPath under given environment - void testXfrout(const string& fromBuild, const string& localStateDir, - const string& socketFile, const string& expected) + // Test getter functions for a socket file path under given environment + void testSocketPath(const string& fromBuild, const string& localStateDir, + const string& socketFile, const string& env_name, + const string& expected, string (*actual_fn)()) { setEnv("B10_FROM_BUILD", fromBuild); setEnv("B10_FROM_SOURCE_LOCALSTATEDIR", localStateDir); - setEnv("BIND10_XFROUT_SOCKET_FILE", socketFile); - EXPECT_EQ(expected, getXfroutSocketPath()); + setEnv(env_name, socketFile); + EXPECT_EQ(expected, actual_fn()); } }; // Test that when we have no special environment, we get the default from prefix TEST_F(Paths, xfroutNoEnv) { - testXfrout("", "", "", UNIX_SOCKET_FILE); + testSocketPath("", "", "", "BIND10_XFROUT_SOCKET_FILE", + UNIX_XFROUT_SOCKET_FILE, getXfroutSocketPath); +} + +TEST_F(Paths, ddnsNoEnv) { + testSocketPath("", "", "", "BIND10_DDNS_SOCKET_FILE", + UNIX_DDNS_SOCKET_FILE, getDDNSSocketPath); } // Override by B10_FROM_BUILD TEST_F(Paths, xfroutFromBuild) { - testXfrout("/from/build", "", "/wrong/path", - "/from/build/auth_xfrout_conn"); + testSocketPath("/from/build", "", "/wrong/path", + "BIND10_XFROUT_SOCKET_FILE", "/from/build/auth_xfrout_conn", + getXfroutSocketPath); +} + +TEST_F(Paths, ddnsFromBuild) { + testSocketPath("/from/build", "", "/wrong/path", "BIND10_DDNS_SOCKET_FILE", + "/from/build/ddns_socket", getDDNSSocketPath); } // Override by B10_FROM_SOURCE_LOCALSTATEDIR TEST_F(Paths, xfroutLocalStatedir) { - testXfrout("/wrong/path", "/state/dir", "/wrong/path", - "/state/dir/auth_xfrout_conn"); + testSocketPath("/wrong/path", "/state/dir", "/wrong/path", + "BIND10_XFROUT_SOCKET_FILE", "/state/dir/auth_xfrout_conn", + getXfroutSocketPath); } -// Override by BIND10_XFROUT_SOCKET_FILE explicitly +TEST_F(Paths, ddnsLocalStatedir) { + testSocketPath("/wrong/path", "/state/dir", "/wrong/path", + "BIND10_DDNS_SOCKET_FILE", "/state/dir/ddns_socket", + getDDNSSocketPath); +} + +// Override by BIND10_xxx_SOCKET_FILE explicitly TEST_F(Paths, xfroutFromEnv) { - testXfrout("", "", "/the/path/to/file", "/the/path/to/file"); + testSocketPath("", "", "/the/path/to/file", "BIND10_XFROUT_SOCKET_FILE", + "/the/path/to/file", getXfroutSocketPath); +} + +TEST_F(Paths, ddnsFromEnv) { + testSocketPath("", "", "/the/path/to/file", "BIND10_DDNS_SOCKET_FILE", + "/the/path/to/file", getDDNSSocketPath); } } diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc index d471a53c9a..e2d193ab5d 100644 --- a/src/bin/auth/tests/config_unittest.cc +++ b/src/bin/auth/tests/config_unittest.cc @@ -32,6 +32,7 @@ #include "datasrc_util.h" +#include #include #include #include @@ -44,6 +45,7 @@ using namespace isc::data; using namespace isc::datasrc; using namespace isc::asiodns; using namespace isc::auth::unittest; +using namespace isc::util::unittests; using namespace isc::testutils; namespace { @@ -52,7 +54,7 @@ protected: AuthConfigTest() : dnss_(), rrclass(RRClass::IN()), - server(true, xfrout), + server(true, xfrout, ddns_forwarder), // The empty string is expected value of the parameter of // requestSocket, not the app_name (there's no fallback, it checks // the empty string is passed). @@ -63,19 +65,27 @@ protected: MockDNSService dnss_; const RRClass rrclass; MockXfroutClient xfrout; + MockSocketSessionForwarder ddns_forwarder; AuthSrv server; isc::server_common::portconfig::AddressList address_store_; private: isc::testutils::TestSocketRequestor sock_requestor_; }; -TEST_F(AuthConfigTest, datasourceConfig) { +TEST_F(AuthConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_datasourceConfig +#else + datasourceConfig +#endif + ) +{ // By default, we don't have any in-memory data source. - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); configureAuthServer(server, Element::fromJSON( "{\"datasources\": [{\"type\": \"memory\"}]}")); // after successful configuration, we should have one (with empty zoneset). - ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_TRUE(server.hasInMemoryClient()); EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount()); } @@ -96,7 +106,7 @@ TEST_F(AuthConfigTest, versionConfig) { } TEST_F(AuthConfigTest, exceptionGuarantee) { - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); // This configuration contains an invalid item, which will trigger // an exception. EXPECT_THROW(configureAuthServer( @@ -106,7 +116,7 @@ TEST_F(AuthConfigTest, exceptionGuarantee) { " \"no_such_config_var\": 1}")), AuthConfigError); // The server state shouldn't change - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); } TEST_F(AuthConfigTest, exceptionConversion) { @@ -176,25 +186,46 @@ protected: TEST_F(MemoryDatasrcConfigTest, addZeroDataSrc) { parser->build(Element::fromJSON("[]")); parser->commit(); - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); } -TEST_F(MemoryDatasrcConfigTest, addEmpty) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_addEmpty +#else + addEmpty +#endif + ) +{ // By default, we don't have any in-memory data source. - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); parser->build(Element::fromJSON("[{\"type\": \"memory\"}]")); parser->commit(); EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount()); } -TEST_F(MemoryDatasrcConfigTest, addZeroZone) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_addZeroZone +#else + addZeroZone +#endif + ) +{ parser->build(Element::fromJSON("[{\"type\": \"memory\"," " \"zones\": []}]")); parser->commit(); EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount()); } -TEST_F(MemoryDatasrcConfigTest, addOneZone) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_addOneZone +#else + addOneZone +#endif + ) +{ EXPECT_NO_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"," @@ -245,7 +276,14 @@ TEST_F(MemoryDatasrcConfigTest, DataSourceError); } -TEST_F(MemoryDatasrcConfigTest, addOneWithFiletypeText) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_addOneWithFiletypeText +#else + addOneWithFiletypeText +#endif + ) +{ // Explicitly specifying "text" is okay. parser->build(Element::fromJSON( "[{\"type\": \"memory\"," @@ -257,7 +295,14 @@ TEST_F(MemoryDatasrcConfigTest, addOneWithFiletypeText) { EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount()); } -TEST_F(MemoryDatasrcConfigTest, addMultiZones) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_addMultiZones +#else + addMultiZones +#endif + ) +{ EXPECT_NO_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"," @@ -273,7 +318,14 @@ TEST_F(MemoryDatasrcConfigTest, addMultiZones) { EXPECT_EQ(3, server.getInMemoryClient(rrclass)->getZoneCount()); } -TEST_F(MemoryDatasrcConfigTest, replace) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_replace +#else + replace +#endif + ) +{ EXPECT_NO_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"," @@ -304,7 +356,14 @@ TEST_F(MemoryDatasrcConfigTest, replace) { Name("example.com")).code); } -TEST_F(MemoryDatasrcConfigTest, exception) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_exception +#else + exception +#endif + ) +{ // Load a zone EXPECT_NO_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," @@ -328,7 +387,8 @@ TEST_F(MemoryDatasrcConfigTest, exception) { "/example.org.zone\"}," " {\"origin\": \"example.net\"," " \"file\": \"" TEST_DATA_DIR - "/nonexistent.zone\"}]}]")), isc::dns::MasterLoadError); + "/nonexistent.zone\"}]}]")), + isc::datasrc::DataSourceError); // As that one throwed exception, it is not expected from us to // commit it @@ -339,7 +399,14 @@ TEST_F(MemoryDatasrcConfigTest, exception) { Name("example.com")).code); } -TEST_F(MemoryDatasrcConfigTest, remove) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_remove +#else + remove +#endif + ) +{ EXPECT_NO_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"," @@ -352,7 +419,7 @@ TEST_F(MemoryDatasrcConfigTest, remove) { parser = createAuthConfigParser(server, "datasources"); EXPECT_NO_THROW(parser->build(Element::fromJSON("[]"))); EXPECT_NO_THROW(parser->commit()); - EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass)); + EXPECT_FALSE(server.hasInMemoryClient()); } TEST_F(MemoryDatasrcConfigTest, addDuplicateZones) { @@ -365,7 +432,7 @@ TEST_F(MemoryDatasrcConfigTest, addDuplicateZones) { " {\"origin\": \"example.com\"," " \"file\": \"" TEST_DATA_DIR "/example.com.zone\"}]}]")), - AuthConfigError); + DataSourceError); } TEST_F(MemoryDatasrcConfigTest, addBadZone) { @@ -374,35 +441,35 @@ TEST_F(MemoryDatasrcConfigTest, addBadZone) { Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{}]}]")), - AuthConfigError); + DataSourceError); // origin is missing EXPECT_THROW(parser->build( Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"file\": \"example.zone\"}]}]")), - AuthConfigError); + DataSourceError); // file is missing EXPECT_THROW(parser->build( Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"}]}]")), - AuthConfigError); + DataSourceError); // missing zone file EXPECT_THROW(parser->build( Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example.com\"}]}]")), - AuthConfigError); + DataSourceError); // bogus origin name EXPECT_THROW(parser->build(Element::fromJSON( "[{\"type\": \"memory\"," " \"zones\": [{\"origin\": \"example..com\"," " \"file\": \"example.zone\"}]}]")), - AuthConfigError); + DataSourceError); // bogus RR class name EXPECT_THROW(parser->build( @@ -423,7 +490,14 @@ TEST_F(MemoryDatasrcConfigTest, addBadZone) { isc::InvalidParameter); } -TEST_F(MemoryDatasrcConfigTest, badDatasrcType) { +TEST_F(MemoryDatasrcConfigTest, +#ifdef USE_STATIC_LINK + DISABLED_badDatasrcType +#else + badDatasrcType +#endif + ) +{ EXPECT_THROW(parser->build(Element::fromJSON("[{\"type\": \"badsrc\"}]")), AuthConfigError); EXPECT_THROW(parser->build(Element::fromJSON("[{\"notype\": \"memory\"}]")), diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc index 63429aeb49..4c404e6cfb 100644 --- a/src/bin/auth/tests/query_unittest.cc +++ b/src/bin/auth/tests/query_unittest.cc @@ -425,10 +425,6 @@ public: // answers when DNSSEC is required. void setNSEC3Flag(bool on) { use_nsec3_ = on; } - virtual Name findPreviousName(const Name&) const { - isc_throw(isc::NotImplemented, "Mock doesn't support previous name"); - } - // This method allows tests to insert new record in the middle of the test. // // \param record_txt textual RR representation of RR (such as soa_txt, etc) diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8 index 2dafaab362..9e085e31cd 100644 --- a/src/bin/bind10/bind10.8 +++ b/src/bin/bind10/bind10.8 @@ -2,12 +2,12 @@ .\" Title: bind10 .\" Author: [see the "AUTHORS" section] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: March 1, 2012 +.\" Date: April 12, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "BIND10" "8" "March 1, 2012" "BIND10" "BIND10" +.TH "BIND10" "8" "April 12, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -22,7 +22,7 @@ bind10 \- BIND 10 boss process .SH "SYNOPSIS" .HP \w'\fBbind10\fR\ 'u -\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-i\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-no\-kill\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR] +\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-i\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-clear\-config\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-no\-kill\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR] .SH "DESCRIPTION" .PP The @@ -38,6 +38,13 @@ The configuration filename to use\&. Can be either absolute or relative to data b10\-config\&.db\&. .RE .PP +\fB\-\-clear\-config\fR +.RS 4 +This will create a backup of the existing configuration file, remove it and start +b10\-cfgmgr(8) +with the default configuration\&. The name of the backup file can be found in the logs (\fICFGMGR_BACKED_UP_CONFIG_FILE\fR)\&. (It will append a number to the backup filename if a previous backup file exists\&.) +.RE +.PP \fB\-\-cmdctl\-port\fR \fIport\fR .RS 4 The @@ -130,18 +137,6 @@ to manage under .IP \(bu 2.3 .\} -\fI/Boss/components/b10\-auth\fR -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} - \fI/Boss/components/b10\-cmdctl\fR .RE .sp @@ -156,54 +151,6 @@ to manage under \fI/Boss/components/b10\-stats\fR .RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} - -\fI/Boss/components/b10\-stats\-httpd\fR -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} - -\fI/Boss/components/b10\-xfrin\fR -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} - -\fI/Boss/components/b10\-xfrout\fR -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} - -\fI/Boss/components/b10\-zonemgr\fR -.RE .PP (Note that the startup of \fBb10\-sockcreator\fR, diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml index 2501fee2bb..40537830ec 100644 --- a/src/bin/bind10/bind10.xml +++ b/src/bin/bind10/bind10.xml @@ -20,7 +20,7 @@ - March 1, 2012 + April 12, 2012 @@ -52,6 +52,7 @@ + port config-filename directory @@ -104,6 +105,25 @@ + + + + + + + This will create a backup of the existing configuration + file, remove it and start + b10-cfgmgr8 + with the default configuration. + The name of the backup file can be found in the logs + (CFGMGR_BACKED_UP_CONFIG_FILE). + (It will append a number to the backup filename if a + previous backup file exists.) + + + + + port diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes index 3dd938feb3..c7515833e8 100644 --- a/src/bin/bind10/bind10_messages.mes +++ b/src/bin/bind10/bind10_messages.mes @@ -20,10 +20,6 @@ The boss process is starting up and will now check if the message bus daemon is already running. If so, it will not be able to start, as it needs a dedicated message bus. -% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified -An error was encountered when the boss module specified -statistics data which is invalid for the boss specification file. - % BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3 The process terminated, but the bind10 boss didn't expect it to, which means it must have failed. @@ -86,6 +82,10 @@ the boss process will try to force them). A debug message. The configurator is about to perform one task of the plan it is currently executing on the named component. +% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified +An error was encountered when the boss module specified +statistics data which is invalid for the boss specification file. + % BIND10_INVALID_USER invalid user: %1 The boss process was started with the -u option, to drop root privileges and continue running as the specified user, but the user is unknown. @@ -160,6 +160,11 @@ The boss module is sending a SIGKILL signal to the given process. % BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2) The boss module is sending a SIGTERM signal to the given process. +% BIND10_SETGID setting GID to %1 +The boss switches the process group ID to the given value. This happens +when BIND 10 starts with the -u option, and the group ID will be set to +that of the specified user. + % BIND10_SETUID setting UID to %1 The boss switches the user it runs as to the given UID. @@ -290,4 +295,3 @@ the configuration manager to start up. The total length of time Boss will wait for the configuration manager before reporting an error is set with the command line --wait switch, which has a default value of ten seconds. - diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in index 37b845d5e6..b9dbc3656d 100755 --- a/src/bin/bind10/bind10_src.py.in +++ b/src/bin/bind10/bind10_src.py.in @@ -64,6 +64,7 @@ import posix import copy from bind10_config import LIBEXECPATH +import bind10_config import isc.cc import isc.util.process import isc.net.parse @@ -168,8 +169,8 @@ class BoB: def __init__(self, msgq_socket_file=None, data_path=None, config_filename=None, clear_config=False, nocache=False, - verbose=False, nokill=False, setuid=None, username=None, - cmdctl_port=None, wait_time=10): + verbose=False, nokill=False, setuid=None, setgid=None, + username=None, cmdctl_port=None, wait_time=10): """ Initialize the Boss of BIND. This is a singleton (only one can run). @@ -207,6 +208,7 @@ class BoB: self.components_to_restart = [] self.runnable = False self.uid = setuid + self.gid = setgid self.username = username self.verbose = verbose self.nokill = nokill @@ -1122,6 +1124,28 @@ def unlink_pid_file(pid_file): if error.errno is not errno.ENOENT: raise +def remove_lock_files(): + """ + Remove various lock files which were created by code such as in the + logger. This function should be called after BIND 10 shutdown. + """ + + lockfiles = ["logger_lockfile"] + + lpath = bind10_config.DATA_PATH + if "B10_FROM_BUILD" in os.environ: + lpath = os.environ["B10_FROM_BUILD"] + if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ: + lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"] + if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ: + lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + + for f in lockfiles: + fname = lpath + '/' + f + if os.path.isfile(fname): + os.unlink(fname) + + return def main(): global options @@ -1133,12 +1157,14 @@ def main(): # Check user ID. setuid = None + setgid = None username = None if options.user: # Try getting information about the user, assuming UID passed. try: pw_ent = pwd.getpwuid(int(options.user)) setuid = pw_ent.pw_uid + setgid = pw_ent.pw_gid username = pw_ent.pw_name except ValueError: pass @@ -1152,6 +1178,7 @@ def main(): try: pw_ent = pwd.getpwnam(options.user) setuid = pw_ent.pw_uid + setgid = pw_ent.pw_gid username = pw_ent.pw_name except KeyError: pass @@ -1182,7 +1209,7 @@ def main(): boss_of_bind = BoB(options.msgq_socket_file, options.data_path, options.config_file, options.clear_config, options.nocache, options.verbose, options.nokill, - setuid, username, options.cmdctl_port, + setuid, setgid, username, options.cmdctl_port, options.wait_time) startup_result = boss_of_bind.startup() if startup_result: @@ -1201,6 +1228,7 @@ def main(): finally: # Clean up the filesystem unlink_pid_file(options.pid_file) + remove_lock_files() if boss_of_bind is not None: boss_of_bind.remove_socket_srv() sys.exit(boss_of_bind.exitcode) diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am index d54ee56be1..a5e3fabc9b 100644 --- a/src/bin/bind10/tests/Makefile.am +++ b/src/bin/bind10/tests/Makefile.am @@ -23,6 +23,7 @@ endif chmod +x $(abs_builddir)/$$pytest ; \ $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \ done diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in index 84a9da968d..6ed7411050 100644 --- a/src/bin/bind10/tests/bind10_test.py.in +++ b/src/bin/bind10/tests/bind10_test.py.in @@ -1055,22 +1055,29 @@ class TestPIDFile(unittest.TestCase): # dump PID to the file, and confirm the content is correct dump_pid(self.pid_file) my_pid = os.getpid() - self.assertEqual(my_pid, int(open(self.pid_file, "r").read())) + with open(self.pid_file, "r") as f: + self.assertEqual(my_pid, int(f.read())) def test_dump_pid(self): self.check_pid_file() # make sure any existing content will be removed - open(self.pid_file, "w").write('dummy data\n') + with open(self.pid_file, "w") as f: + f.write('dummy data\n') self.check_pid_file() def test_unlink_pid_file_notexist(self): dummy_data = 'dummy_data\n' - open(self.pid_file, "w").write(dummy_data) + + with open(self.pid_file, "w") as f: + f.write(dummy_data) + unlink_pid_file("no_such_pid_file") + # the file specified for unlink_pid_file doesn't exist, # and the original content of the file should be intact. - self.assertEqual(dummy_data, open(self.pid_file, "r").read()) + with open(self.pid_file, "r") as f: + self.assertEqual(dummy_data, f.read()) def test_dump_pid_with_none(self): # Check the behavior of dump_pid() and unlink_pid_file() with None. @@ -1079,9 +1086,14 @@ class TestPIDFile(unittest.TestCase): self.assertFalse(os.path.exists(self.pid_file)) dummy_data = 'dummy_data\n' - open(self.pid_file, "w").write(dummy_data) + + with open(self.pid_file, "w") as f: + f.write(dummy_data) + unlink_pid_file(None) - self.assertEqual(dummy_data, open(self.pid_file, "r").read()) + + with open(self.pid_file, "r") as f: + self.assertEqual(dummy_data, f.read()) def test_dump_pid_failure(self): # the attempt to open file will fail, which should result in exception. @@ -1463,6 +1475,41 @@ class SocketSrvTest(unittest.TestCase): self.assertEqual({}, self.__boss._unix_sockets) self.assertTrue(sock.closed) +class TestFunctions(unittest.TestCase): + def setUp(self): + self.lockfile_testpath = \ + "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test" + self.assertFalse(os.path.exists(self.lockfile_testpath)) + os.mkdir(self.lockfile_testpath) + self.assertTrue(os.path.isdir(self.lockfile_testpath)) + + def tearDown(self): + os.rmdir(self.lockfile_testpath) + self.assertFalse(os.path.isdir(self.lockfile_testpath)) + os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@" + + def test_remove_lock_files(self): + os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath + + # create lockfiles for the testcase + lockfiles = ["logger_lockfile"] + for f in lockfiles: + fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f + self.assertFalse(os.path.exists(fname)) + open(fname, "w").close() + self.assertTrue(os.path.isfile(fname)) + + # first call should clear up all the lockfiles + bind10_src.remove_lock_files() + + # check if the lockfiles exist + for f in lockfiles: + fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f + self.assertFalse(os.path.isfile(fname)) + + # second call should not assert anyway + bind10_src.remove_lock_files() + if __name__ == '__main__': # store os.environ for test_unchanged_environment original_os_environ = copy.deepcopy(os.environ) diff --git a/src/bin/bindctl/bindctl.1 b/src/bin/bindctl/bindctl.1 index 97700d6ea2..6aee29c279 100644 --- a/src/bin/bindctl/bindctl.1 +++ b/src/bin/bindctl/bindctl.1 @@ -2,12 +2,12 @@ .\" Title: bindctl .\" Author: [see the "AUTHORS" section] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: December 23, 2010 +.\" Date: June 20, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "BINDCTL" "1" "December 23, 2010" "BIND10" "BIND10" +.TH "BINDCTL" "1" "June 20, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -35,7 +35,7 @@ via its interactive command interpreter\&. communicates over a HTTPS REST\-ful interface provided by \fBb10-cmdctl\fR(8)\&. The \fBb10-cfgmgr\fR(8) -daemon stores the configurations and defines the commands\&. +daemon stores the configurations\&. .SH "ARGUMENTS" .PP The arguments are as follows: @@ -91,9 +91,9 @@ Display the version number and exit\&. .SH "AUTHENTICATION" .PP The tool will authenticate using a username and password\&. On the first successful login, it will save the details to a comma\-separated\-value (CSV) file which will be used for later uses of -\fBbindctl\fR\&. The file name is -default_user\&.csv -located under the directory specified by the \-\-csv\-file\-dir option\&. +\fBbindctl\fR\&. The file name is "default_user\&.csv" located under the directory specified by the +\fB\-\-csv\-file\-dir\fR +option\&. .SH "USAGE" .PP The @@ -115,8 +115,7 @@ keyword to receive usage assistance for a module or a module\'s command\&. The \fBquit\fR command is used to exit -\fBbindctl\fR -(and doesn\'t stop the BIND 10 services)\&. +\fBbindctl\fR\&. (It doesn\'t stop the BIND 10 services\&.) .PP The following module is available by default: \fBconfig\fR diff --git a/src/bin/bindctl/bindctl.xml b/src/bin/bindctl/bindctl.xml index eff1de2c18..3993739169 100644 --- a/src/bin/bindctl/bindctl.xml +++ b/src/bin/bindctl/bindctl.xml @@ -2,7 +2,7 @@ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" []> @@ -208,8 +209,8 @@ The quit command is used to exit - bindctl - (and doesn't stop the BIND 10 services). + bindctl. + (It doesn't stop the BIND 10 services.) diff --git a/src/bin/bindctl/command_sets.py b/src/bin/bindctl/command_sets.py index 9e2c2efd47..c001ec8ca5 100644 --- a/src/bin/bindctl/command_sets.py +++ b/src/bin/bindctl/command_sets.py @@ -92,4 +92,3 @@ def prepare_execute_commands(tool): module.add_command(cmd) tool.add_module_info(module) - diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py index 1ddb9163d4..bcfb6c594d 100644 --- a/src/bin/bindctl/tests/bindctl_test.py +++ b/src/bin/bindctl/tests/bindctl_test.py @@ -425,6 +425,12 @@ class FakeBindCmdInterpreter(bindcmd.BindCmdInterpreter): class TestBindCmdInterpreter(unittest.TestCase): + def setUp(self): + self.old_stdout = sys.stdout + + def tearDown(self): + sys.stdout = self.old_stdout + def _create_invalid_csv_file(self, csvfilename): import csv csvfile = open(csvfilename, 'w') @@ -447,19 +453,17 @@ class TestBindCmdInterpreter(unittest.TestCase): self.assertEqual(new_csv_dir, custom_cmd.csv_file_dir) def test_get_saved_user_info(self): - old_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') - cmd = bindcmd.BindCmdInterpreter() - users = cmd._get_saved_user_info('/notexist', 'csv_file.csv') - self.assertEqual([], users) - - csvfilename = 'csv_file.csv' - self._create_invalid_csv_file(csvfilename) - users = cmd._get_saved_user_info('./', csvfilename) - self.assertEqual([], users) - os.remove(csvfilename) - sys.stdout = old_stdout + with open(os.devnull, 'w') as f: + sys.stdout = f + cmd = bindcmd.BindCmdInterpreter() + users = cmd._get_saved_user_info('/notexist', 'csv_file.csv') + self.assertEqual([], users) + csvfilename = 'csv_file.csv' + self._create_invalid_csv_file(csvfilename) + users = cmd._get_saved_user_info('./', csvfilename) + self.assertEqual([], users) + os.remove(csvfilename) class TestCommandLineOptions(unittest.TestCase): def setUp(self): diff --git a/src/bin/cfgmgr/b10-cfgmgr.8 b/src/bin/cfgmgr/b10-cfgmgr.8 index 719f4c67cb..e8ec567cc0 100644 --- a/src/bin/cfgmgr/b10-cfgmgr.8 +++ b/src/bin/cfgmgr/b10-cfgmgr.8 @@ -2,12 +2,12 @@ .\" Title: b10-cfgmgr .\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: March 10, 2010 +.\" Date: June 20, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "B10\-CFGMGR" "8" "March 10, 2010" "BIND10" "BIND10" +.TH "B10\-CFGMGR" "8" "June 20, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -22,7 +22,7 @@ b10-cfgmgr \- Configuration manager .SH "SYNOPSIS" .HP \w'\fBb10\-cfgmgr\fR\ 'u -\fBb10\-cfgmgr\fR [\fB\-c\fR\fB\fIconfig\-filename\fR\fR] [\fB\-p\fR\fB\fIdata_path\fR\fR] +\fBb10\-cfgmgr\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-\-clear\-config\fR] [\fB\-\-config\-filename\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-\-data\-path\ \fR\fB\fIdata_path\fR\fR] .SH "DESCRIPTION" .PP The @@ -42,22 +42,23 @@ C\-Channel connection\&. If this connection is not established, will exit\&. .PP The daemon may be cleanly stopped by sending the SIGTERM signal to the process\&. This shutdown does not notify the subscribers\&. -.PP -When it exits, it saves its current configuration to -/usr/local/var/bind10\-devel/b10\-config\&.db\&. - .SH "ARGUMENTS" .PP The arguments are as follows: .PP -\fB\-c\fR\fIconfig\-filename\fR, \fB\-\-config\-filename\fR \fIconfig\-filename\fR +\fB\-\-clear\-config\fR .RS 4 -The configuration database filename to use\&. Can be either absolute or relative to data path\&. -.sp -Defaults to b10\-config\&.db +This will create a backup of the existing configuration file, remove it, and +b10\-cfgmgr(8) +will use the default configurations\&. The name of the backup file can be found in the logs (\fICFGMGR_BACKED_UP_CONFIG_FILE\fR)\&. (It will append a number to the backup filename if a previous backup file exists\&.) .RE .PP -\fB\-p\fR\fIdata\-path\fR, \fB\-\-data\-path\fR \fIdata\-path\fR +\fB\-c\fR \fIconfig\-filename\fR, \fB\-\-config\-filename\fR \fIconfig\-filename\fR +.RS 4 +The configuration database filename to use\&. Can be either absolute or relative to data path\&. It defaults to "b10\-config\&.db"\&. +.RE +.PP +\fB\-p\fR \fIdata\-path\fR, \fB\-\-data\-path\fR \fIdata\-path\fR .RS 4 The path where BIND 10 looks for files\&. The configuration file is looked for here, if it is relative\&. If it is absolute, the path is ignored\&. .RE diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in index 760b6d8e97..f1d0308adf 100755 --- a/src/bin/cfgmgr/b10-cfgmgr.py.in +++ b/src/bin/cfgmgr/b10-cfgmgr.py.in @@ -44,11 +44,11 @@ def parse_options(args=sys.argv[1:], Parser=OptionParser): parser = Parser() parser.add_option("-p", "--data-path", dest="data_path", help="Directory to search for configuration files " + - "(default=" + DATA_PATH + ")", default=DATA_PATH) + "(default=" + DATA_PATH + ")", default=None) parser.add_option("-c", "--config-filename", dest="config_file", help="Configuration database filename " + "(default=" + DEFAULT_CONFIG_FILE + ")", - default=DEFAULT_CONFIG_FILE) + default=None) parser.add_option("--clear-config", action="store_true", dest="clear_config", default=False, help="Back up the configuration file and start with " + @@ -85,12 +85,37 @@ def load_plugins(path, cm): # Restore the search path sys.path = sys.path[1:] + +def determine_path_and_file(data_path_option, config_file_option): + """Given the data path and config file as specified on the command line + (or not specified, as may be the case), determine the full path and + file to use when starting the config manager; + - if neither are given, use defaults + - if both are given, use both + - if only data path is given, use default file in that path + - if only file is given, use cwd() + file (if file happens to + be an absolute file name, path will be ignored) + Arguments are either a string, or None. + Returns a tuple containing (result_path, result_file). + """ + data_path = data_path_option + config_file = config_file_option + if config_file is None: + config_file = DEFAULT_CONFIG_FILE + if data_path is None: + data_path = DATA_PATH + else: + if data_path is None: + data_path = os.getcwd() + return (data_path, config_file) + def main(): options = parse_options() global cm try: - cm = ConfigManager(options.data_path, options.config_file, - None, options.clear_config) + (data_path, config_file) = determine_path_and_file(options.data_path, + options.config_file) + cm = ConfigManager(data_path, config_file, None, options.clear_config) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) cm.read_config() diff --git a/src/bin/cfgmgr/b10-cfgmgr.xml b/src/bin/cfgmgr/b10-cfgmgr.xml index 785a05841f..ff5706c9cd 100644 --- a/src/bin/cfgmgr/b10-cfgmgr.xml +++ b/src/bin/cfgmgr/b10-cfgmgr.xml @@ -20,7 +20,7 @@ - March 10, 2010 + June 20, 2012 @@ -44,8 +44,11 @@ b10-cfgmgr - - + + + + + @@ -77,13 +80,6 @@ subscribers. - - When it exits, it saves its current configuration to - /usr/local/var/bind10-devel/b10-config.db. - - - - @@ -95,19 +91,37 @@ - config-filename, - config-filename + - The configuration database filename to use. Can be either - absolute or relative to data path. - Defaults to b10-config.db + + This will create a backup of the existing configuration + file, remove it, and + b10-cfgmgr8 + will use the default configurations. + The name of the backup file can be found in the logs + (CFGMGR_BACKED_UP_CONFIG_FILE). + (It will append a number to the backup filename if a + previous backup file exists.) + - data-path, + config-filename, + config-filename + + + The configuration database filename to use. Can be either + absolute or relative to data path. + It defaults to "b10-config.db". + + + + + + data-path, data-path diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in index ca91c9c79f..351e8bf964 100644 --- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in +++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in @@ -141,8 +141,8 @@ class TestParseArgs(unittest.TestCase): # Pass it empty array, not our arguments b = __import__("b10-cfgmgr") parsed = b.parse_options([], TestOptParser) - self.assertEqual(b.DATA_PATH, parsed.data_path) - self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.config_file) + self.assertEqual(None, parsed.data_path) + self.assertEqual(None, parsed.config_file) def test_wrong_args(self): """ @@ -168,10 +168,10 @@ class TestParseArgs(unittest.TestCase): b = __import__("b10-cfgmgr") parsed = b.parse_options(['--data-path=/path'], TestOptParser) self.assertEqual('/path', parsed.data_path) - self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.config_file) + self.assertEqual(None, parsed.config_file) parsed = b.parse_options(['-p', '/path'], TestOptParser) self.assertEqual('/path', parsed.data_path) - self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.config_file) + self.assertEqual(None, parsed.config_file) self.assertRaises(OptsError, b.parse_options, ['-p'], TestOptParser) self.assertRaises(OptsError, b.parse_options, ['--data-path'], TestOptParser) @@ -183,22 +183,32 @@ class TestParseArgs(unittest.TestCase): b = __import__("b10-cfgmgr") parsed = b.parse_options(['--config-filename=filename'], TestOptParser) - self.assertEqual(b.DATA_PATH, parsed.data_path) + self.assertEqual(None, parsed.data_path) self.assertEqual("filename", parsed.config_file) parsed = b.parse_options(['-c', 'filename'], TestOptParser) - self.assertEqual(b.DATA_PATH, parsed.data_path) + self.assertEqual(None, parsed.data_path) self.assertEqual("filename", parsed.config_file) self.assertRaises(OptsError, b.parse_options, ['-c'], TestOptParser) self.assertRaises(OptsError, b.parse_options, ['--config-filename'], TestOptParser) + def test_determine_path_and_file(self): + b = __import__("b10-cfgmgr") + self.assertEqual((b.DATA_PATH, b.DEFAULT_CONFIG_FILE), + b.determine_path_and_file(None, None)) + self.assertEqual(("/foo", b.DEFAULT_CONFIG_FILE), + b.determine_path_and_file("/foo", None)) + self.assertEqual((os.getcwd(), "file.config"), + b.determine_path_and_file(None, "file.config")) + self.assertEqual(("/foo", "bar"), + b.determine_path_and_file("/foo", "bar")) + def test_clear_config(self): b = __import__("b10-cfgmgr") parsed = b.parse_options([], TestOptParser) self.assertFalse(parsed.clear_config) parsed = b.parse_options(['--clear-config'], TestOptParser) self.assertTrue(parsed.clear_config) - if __name__ == '__main__': unittest.main() diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am index 89d89ea64d..b5b65f6b33 100644 --- a/src/bin/cmdctl/tests/Makefile.am +++ b/src/bin/cmdctl/tests/Makefile.am @@ -22,5 +22,6 @@ endif PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \ CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \ CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done diff --git a/src/bin/cmdctl/tests/cmdctl_test.py b/src/bin/cmdctl/tests/cmdctl_test.py index 5fdabb4852..856adf1c79 100644 --- a/src/bin/cmdctl/tests/cmdctl_test.py +++ b/src/bin/cmdctl/tests/cmdctl_test.py @@ -84,6 +84,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase): self.handler.rfile = open("check.tmp", 'w+b') def tearDown(self): + sys.stdout.close() sys.stdout = self.old_stdout self.handler.rfile.close() os.remove('check.tmp') @@ -306,6 +307,7 @@ class TestCommandControl(unittest.TestCase): self.cmdctl = MyCommandControl(None, True) def tearDown(self): + sys.stdout.close() sys.stdout = self.old_stdout def _check_config(self, cmdctl): @@ -427,6 +429,9 @@ class TestSecureHTTPServer(unittest.TestCase): MyCommandControl, verbose=True) def tearDown(self): + # both sys.stdout and sys.stderr are the same, so closing one is + # sufficient + sys.stdout.close() sys.stdout = self.old_stdout sys.stderr = self.old_stderr diff --git a/src/bin/dbutil/b10-dbutil.8 b/src/bin/dbutil/b10-dbutil.8 index 437a69d1ec..cb43fa382d 100644 --- a/src/bin/dbutil/b10-dbutil.8 +++ b/src/bin/dbutil/b10-dbutil.8 @@ -2,21 +2,12 @@ .\" Title: b10-dbutil .\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: March 20, 2012 +.\" Date: June 20, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "B10\-DBUTIL" "8" "March 20, 2012" "BIND10" "BIND10" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' +.TH "B10\-DBUTIL" "8" "June 20, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -38,13 +29,14 @@ b10-dbutil \- Zone Database Maintenance Utility .PP The \fBb10\-dbutil\fR -utility is a general administration utility for SQL databases\&. (Currently only SQLite is supported by BIND 10\&.) It can report the current verion of the schema, and upgrade an existing database to the latest version of the schema\&. +utility is a general administration utility for SQL databases for BIND 10\&. (Currently only SQLite is supported by BIND 10\&.) It can report the current verion of the schema, and upgrade an existing database to the latest version of the schema\&. .PP \fBb10\-dbutil\fR -operates in one of two modes, check mode or upgrade mode\&. +operates in one of two modesr: check mode or upgrade mode\&. .PP -In check mode (\fBb10\-dbutil \-\-check\fR), the utility reads the version of the database schema from the database and prints it\&. It will tell you whether the schema is at the latest version supported by BIND 10\&. Exit status is 0 if the schema is at the correct version, 1 if the schema is at an older version, 2 if the schema is at a version not yet supported by this version of b10\-dbutil\&. Any higher value indicates an error during command\-line parsing or execution\&. +In check mode (\fBb10\-dbutil \-\-check\fR), the utility reads the version of the database schema from the database and prints it\&. It will tell you whether the schema is at the latest version supported by BIND 10\&. Exit status is 0 if the schema is at the correct version, 1 if the schema is at an older version, or 2 if the schema is at a version not yet supported by this version of +\fBb10\-dbutil\fR\&. Any higher value indicates an error during command\-line parsing or execution\&. .PP When the upgrade function is selected (\fBb10\-dbutil \-\-upgrade\fR), the utility takes a copy of the database, then upgrades it to the latest version of the schema\&. The contents of the database remain intact\&. (The backup file is a file in the same directory as the database file\&. It has the same name, with "\&.backup" appended to it\&. If a file of that name already exists, the file will have the suffix "\&.backup\-1"\&. If that exists, the file will be suffixed "\&.backup\-2", and so on)\&. Exit status is 0 if the upgrade is either succesful or aborted by the user, and non\-zero if there is an error\&. .PP @@ -57,24 +49,29 @@ The arguments are as follows: .PP \fB\-\-check\fR .RS 4 -Selects the version check function, which reports the current version of the database\&. This is incompatible with the \-\-upgrade option\&. +Selects the version check function, which reports the current version of the database\&. This is mutually exclusive with the +\fB\-\-upgrade\fR +option\&. .RE .PP \fB\-\-noconfirm\fR .RS 4 -Only valid with \-\-upgrade, this disables the prompt\&. Normally the utility will print a warning that an upgrade is about to take place and request that you type "Yes" to continue\&. If this switch is given on the command line, no prompt will be issued: the utility will just perform the upgrade\&. +Only valid with +\fB\-\-upgrade\fR, this disables the prompt\&. Normally the utility will print a warning that an upgrade is about to take place and request that you type "Yes" to continue\&. If this switch is given on the command line, no prompt will be issued and the utility will just perform the upgrade\&. .RE .PP \fB\-\-upgrade\fR .RS 4 -Selects the upgrade function, which upgrades the database to the latest version of the schema\&. This is incompatible with the \-\-upgrade option\&. +Selects the upgrade function, which upgrades the database to the latest version of the schema\&. This is mutually exclusive with the +\fB\-\-check\fR +option\&. .sp -The upgrade function will upgrade a BIND 10 database \- no matter how old the schema \- preserving all data\&. A backup file is created before the upgrade (with the same name as the database, but with "\&.backup" suffixed to it)\&. If the upgrade fails, this file can be copied back to restore the original database\&. +The upgrade function will upgrade a BIND 10 database \(em no matter how old the schema \(em preserving all data\&. A backup file is created before the upgrade (with the same name as the database, but with "\&.backup" suffixed to it)\&. If the upgrade fails, this file can be copied back to restore the original database\&. .RE .PP \fB\-\-verbose\fR .RS 4 -Enable verbose mode\&. Each SQL command issued by the utility will be printed to stderr before it is executed\&. +Enable verbose mode\&. Each SQL command issued by the utility will be printed to STDERR before it is executed\&. .RE .PP \fB\-\-quiet\fR @@ -84,7 +81,7 @@ Enable quiet mode\&. No output is printed, except errors during command\-line ar .PP \fB\fIdbfile\fR\fR .RS 4 -Name of the database file to check of upgrade\&. +Name of the database file to check or upgrade\&. .RE .SH "COPYRIGHT" .br diff --git a/src/bin/dbutil/b10-dbutil.xml b/src/bin/dbutil/b10-dbutil.xml index c1c0dee1ca..752b8a8504 100644 --- a/src/bin/dbutil/b10-dbutil.xml +++ b/src/bin/dbutil/b10-dbutil.xml @@ -20,7 +20,7 @@ - March 20, 2012 + June 20, 2012 @@ -60,14 +60,15 @@ DESCRIPTION - The b10-dbutil utility is a general administration - utility for SQL databases. (Currently only SQLite is supported by - BIND 10.) It can report the current verion of the schema, and upgrade - an existing database to the latest version of the schema. + The b10-dbutil utility is a general + administration utility for SQL databases for BIND 10. (Currently + only SQLite is supported by BIND 10.) It can report the + current verion of the schema, and upgrade an existing database + to the latest version of the schema. - b10-dbutil operates in one of two modes, check mode + b10-dbutil operates in one of two modesr: check mode or upgrade mode. @@ -76,9 +77,10 @@ utility reads the version of the database schema from the database and prints it. It will tell you whether the schema is at the latest version supported by BIND 10. Exit status is 0 if the schema is at - the correct version, 1 if the schema is at an older version, 2 if + the correct version, 1 if the schema is at an older version, or 2 if the schema is at a version not yet supported by this version of - b10-dbutil. Any higher value indicates an error during command-line + b10-dbutil. + Any higher value indicates an error during command-line parsing or execution. @@ -115,8 +117,8 @@ Selects the version check function, which reports the - current version of the database. This is incompatible - with the --upgrade option. + current version of the database. This is mutually exclusive + with the option. @@ -126,11 +128,12 @@ - Only valid with --upgrade, this disables the prompt. + Only valid with , this disables + the prompt. Normally the utility will print a warning that an upgrade is about to take place and request that you type "Yes" to continue. If this switch is given on the command line, no prompt will - be issued: the utility will just perform the upgrade. + be issued and the utility will just perform the upgrade. @@ -141,15 +144,16 @@ Selects the upgrade function, which upgrades the database - to the latest version of the schema. This is incompatible - with the --upgrade option. + to the latest version of the schema. This is mutually exclusive + with the option. - The upgrade function will upgrade a BIND 10 database - no matter how - old the schema - preserving all data. A backup file is created - before the upgrade (with the same name as the database, but with - ".backup" suffixed to it). If the upgrade fails, this file can - be copied back to restore the original database. + The upgrade function will upgrade a BIND 10 database — + no matter how old the schema — preserving all data. + A backup file is created before the upgrade (with the + same name as the database, but with ".backup" suffixed + to it). If the upgrade fails, this file can be copied + back to restore the original database. @@ -160,7 +164,7 @@ Enable verbose mode. Each SQL command issued by the - utility will be printed to stderr before it is executed. + utility will be printed to STDERR before it is executed. @@ -181,7 +185,7 @@ - Name of the database file to check of upgrade. + Name of the database file to check or upgrade. diff --git a/src/bin/dbutil/dbutil.py.in b/src/bin/dbutil/dbutil.py.in index 81f351e7ad..4b76a5680e 100755 --- a/src/bin/dbutil/dbutil.py.in +++ b/src/bin/dbutil/dbutil.py.in @@ -196,7 +196,7 @@ UPGRADES = [ } # To extend this, leave the above statements in place and add another -# dictionary to the list. The "from" version should be (2, 0), the "to" +# dictionary to the list. The "from" version should be (2, 0), the "to" # version whatever the version the update is to, and the SQL statements are # the statements required to perform the upgrade. This way, the upgrade # program will be able to upgrade both a V1.0 and a V2.0 database. diff --git a/src/bin/dbutil/tests/Makefile.am b/src/bin/dbutil/tests/Makefile.am index c03b262aa2..b4231b3fea 100644 --- a/src/bin/dbutil/tests/Makefile.am +++ b/src/bin/dbutil/tests/Makefile.am @@ -3,4 +3,5 @@ SUBDIRS = . testdata # Tests of the update script. check-local: + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(SHELL) $(abs_builddir)/dbutil_test.sh diff --git a/src/bin/ddns/b10-ddns.8 b/src/bin/ddns/b10-ddns.8 index 131b6ccc37..95d82f4555 100644 --- a/src/bin/ddns/b10-ddns.8 +++ b/src/bin/ddns/b10-ddns.8 @@ -2,12 +2,12 @@ .\" Title: b10-ddns .\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: February 28, 2012 +.\" Date: June 18, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "B10\-DDNS" "8" "February 28, 2012" "BIND10" "BIND10" +.TH "B10\-DDNS" "8" "June 18, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -29,33 +29,29 @@ The \fBb10\-ddns\fR daemon provides the BIND 10 Dynamic Update (DDNS) service, as specified in RFC 2136\&. Normally it is started by the \fBbind10\fR(8) -boss process\&. When the -\fBb10\-auth\fR -DNS server receives a DDNS update, -\fBb10\-ddns\fR -updates the zone in the BIND 10 zone data store\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBNote\fR -.ps -1 -.br +boss process\&. .PP -Currently installed is a dummy component\&. It does not provide any functionality\&. It is a skeleton implementation that will be expanded later\&. -.sp .5v -.RE +When the +\fBb10\-auth\fR +authoritative DNS server receives an UPDATE request, it internally forwards the request to +\fBb10\-ddns\fR, which handles the rest of the request processing\&. When the processing is completed +\fBb10\-ddns\fR +will send a response to the client with the RCODE set to the value as specified in RFC 2136\&. If the zone has been changed as a result, it will internally notify +\fBb10\-auth\fR +and +\fBb10\-xfrout\fR +so the new version of the zone will be served, and other secondary servers will be notified via the DNS notify protocol\&. .PP This daemon communicates with BIND 10 over a \fBb10-msgq\fR(8) C\-Channel connection\&. If this connection is not established, \fBb10\-ddns\fR -will exit\&. +will exit\&. The +\fBb10\-ddns\fR +daemon also depends on some other BIND 10 components (either directly or indirectly): +\fBb10-auth\fR(8), +\fBb10-xfrout\fR(8), and +\fBb10-zonemgr\fR(8)\&. .PP \fBb10\-ddns\fR @@ -65,9 +61,16 @@ receives its configurations from .PP The arguments are as follows: .PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Print the command line arguments and exit\&. +.RE +.PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 -This value is ignored at this moment, but is provided for compatibility with the bind10 Boss process +This value is ignored at this moment, but is provided for compatibility with the +\fBbind10\fR +Boss process\&. .RE .SH "CONFIGURATION AND COMMANDS" .PP @@ -75,7 +78,13 @@ The configurable settings are: .PP \fIzones\fR -The zones option is a named set of zones that can be updated with DDNS\&. Each entry has one element called update_acl, which is a list of access control rules that define update permissions\&. By default this is empty; DDNS must be explicitely enabled per zone\&. +The zones option is a list of configuration items for specific zones that can be updated with DDNS\&. Each entry is a map that can contain the following items: +\fIorigin\fR +is a textual domain name of the zone; +\fIclass\fR +(text) is the RR class of the zone; and +\fIupdate_acl\fR +is an ACL that controls permission for updates\&. See the BIND 10 Guide for configuration details\&. Note that not listing a zone in this list does not directly mean update requests for the zone are rejected, but the end result is the same because the default ACL for updates is to deny all requests\&. .PP The module commands are: .PP @@ -91,13 +100,15 @@ argument to select the process ID to stop\&. (Note that the BIND 10 boss process \fBb10-auth\fR(8), \fBb10-cfgmgr\fR(8), \fBb10-msgq\fR(8), +\fBb10-xfrout\fR(8), +\fBb10-zonemgr\fR(8), \fBbind10\fR(8), BIND 10 Guide\&. .SH "HISTORY" .PP The \fBb10\-ddns\fR -daemon was first implemented in December 2011 for the ISC BIND 10 project\&. +daemon was first implemented in December 2011 for the ISC BIND 10 project\&. The first functional version was released in June 2012\&. .SH "COPYRIGHT" .br Copyright \(co 2011-2012 Internet Systems Consortium, Inc. ("ISC") diff --git a/src/bin/ddns/b10-ddns.xml b/src/bin/ddns/b10-ddns.xml index 15fcb1a029..fb895b9952 100644 --- a/src/bin/ddns/b10-ddns.xml +++ b/src/bin/ddns/b10-ddns.xml @@ -20,7 +20,7 @@ - February 28, 2012 + June 18, 2012 @@ -58,23 +58,33 @@ Normally it is started by the bind108 boss process. - When the b10-auth DNS server receives - a DDNS update, b10-ddns updates the zone - in the BIND 10 zone data store. - - Currently installed is a dummy component. It does not provide - any functionality. It is a skeleton implementation that - will be expanded later. - - + + When the b10-auth authoritative DNS server + receives an UPDATE request, it internally forwards the request + to b10-ddns, which handles the rest of the + request processing. + When the processing is completed b10-ddns + will send a response to the client with the RCODE set to the + value as specified in RFC 2136. + If the zone has been changed as a result, it will internally + notify b10-auth and + b10-xfrout so the new version of the zone will + be served, and other secondary servers will be notified via the + DNS notify protocol. + This daemon communicates with BIND 10 over a b10-msgq8 C-Channel connection. If this connection is not established, b10-ddns will exit. + The b10-ddns daemon also depends on some other + BIND 10 components (either directly or indirectly): + b10-auth8, + b10-xfrout8, and + b10-zonemgr8. @@ -90,6 +100,17 @@ + + + , + + + + + Print the command line arguments and exit. + + + , @@ -98,7 +119,7 @@ This value is ignored at this moment, but is provided for - compatibility with the bind10 Boss process + compatibility with the bind10 Boss process. @@ -112,10 +133,18 @@ zones - The zones option is a named set of zones that can be updated with - DDNS. Each entry has one element called update_acl, which is - a list of access control rules that define update permissions. - By default this is empty; DDNS must be explicitely enabled per zone. + The zones option is a list of configuration items for specific + zones that can be updated with DDNS. Each entry is a map that + can contain the following items: + origin is a textual domain name of the zone; + class (text) is the RR class of the zone; and + update_acl is an ACL that controls + permission for updates. + See the BIND 10 Guide for configuration details. + Note that not listing a zone in this list does not directly + mean update requests for the zone are rejected, but the end + result is the same because the default ACL for updates is to + deny all requests. @@ -144,6 +173,12 @@ b10-msgq8 , + + b10-xfrout8 + , + + b10-zonemgr8 + , bind108 , @@ -156,6 +191,7 @@ The b10-ddns daemon was first implemented in December 2011 for the ISC BIND 10 project. + The first functional version was released in June 2012. + OPTIONS The argument is as follow: diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8 index e72ec0ecc1..2b9df85a51 100644 --- a/src/bin/stats/b10-stats.8 +++ b/src/bin/stats/b10-stats.8 @@ -2,12 +2,12 @@ .\" Title: b10-stats .\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] .\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: March 1, 2012 +.\" Date: June 20, 2012 .\" Manual: BIND10 .\" Source: BIND10 .\" Language: English .\" -.TH "B10\-STATS" "8" "March 1, 2012" "BIND10" "BIND10" +.TH "B10\-STATS" "8" "June 20, 2012" "BIND10" "BIND10" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -27,18 +27,21 @@ b10-stats \- BIND 10 statistics module .PP The \fBb10\-stats\fR -is a daemon forked by -\fBbind10\fR\&. Stats module collects statistics data from each module and reports statistics information via -\fBbindctl\fR\&. It communicates by using the Command Channel by +daemon collects statistics data from each BIND 10 module\&. Its statistics information may be reported via +\fBbindctl\fR +or +\fBb10\-stats\-httpd\fR\&. It is started by +\fBbind10\fR +and communicates by using the Command Channel by \fBb10\-msgq\fR with other modules like \fBbind10\fR, \fBb10\-auth\fR -and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&. +and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. The stats module collects data and aggregates it\&. \fBb10\-stats\fR invokes an internal command for \fBbind10\fR -after its initial starting because it\'s sure to collect statistics data from +after its initial starting to make sure it collects statistics data from \fBbind10\fR\&. .SH "OPTIONS" .PP @@ -46,9 +49,7 @@ The arguments are as follows: .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 -This -\fBb10\-stats\fR -switches to verbose mode\&. It sends verbose messages to STDOUT\&. +This enables maximum debug logging\&. .RE .SH "CONFIGURATION AND COMMANDS" .PP diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml index b353f8f075..32b636f66c 100644 --- a/src/bin/stats/b10-stats.xml +++ b/src/bin/stats/b10-stats.xml @@ -20,7 +20,7 @@ - March 1, 2012 + June 20, 2012 @@ -52,22 +52,22 @@ DESCRIPTION - The b10-stats is a daemon forked by - bind10. Stats module collects statistics data - from each module and reports statistics information - via bindctl. It communicates by using the + The b10-stats daemon collects statistics data + from each BIND 10 module. Its statistics information may be + reported via bindctl or + b10-stats-httpd. It is started by + bind10 and communicates by using the Command Channel by b10-msgq with other - modules - like bind10, b10-auth and - so on. It waits for coming data from other modules, then other - modules send data to stats module periodically. Other modules - send stats data to stats module independently from - implementation of stats module, so the frequency of sending data - may not be constant. Stats module collects data and aggregates - it. b10-stats invokes an internal command - for bind10 after its initial starting because it's - sure to collect statistics data from bind10. - + modules like bind10, b10-auth + and so on. It waits for coming data from other modules, then + other modules send data to stats module periodically. Other + modules send stats data to stats module independently from + implementation of stats module, so the frequency of sending + data may not be constant. The stats module collects data and + aggregates it. b10-stats invokes an internal + command for bind10 after its initial + starting to make sure it collects statistics data from + bind10. @@ -79,8 +79,7 @@ , - This b10-stats switches to verbose - mode. It sends verbose messages to STDOUT. + This enables maximum debug logging. diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in index fd59c3cd00..b2e341726d 100755 --- a/src/bin/stats/stats.py.in +++ b/src/bin/stats/stats.py.in @@ -495,7 +495,7 @@ if __name__ == "__main__": parser = OptionParser() parser.add_option( "-v", "--verbose", dest="verbose", action="store_true", - help="display more about what is going on") + help="enable maximum debug logging") (options, args) = parser.parse_args() if options.verbose: isc.log.init("b10-stats", "DEBUG", 99) diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in index 7e4da96d0c..8b13766cd6 100644 --- a/src/bin/stats/stats_httpd.py.in +++ b/src/bin/stats/stats_httpd.py.in @@ -826,7 +826,7 @@ if __name__ == "__main__": parser = OptionParser() parser.add_option( "-v", "--verbose", dest="verbose", action="store_true", - help="display more about what is going on") + help="enable maximum debug logging") (options, args) = parser.parse_args() if options.verbose: isc.log.init("b10-stats-httpd", "DEBUG", 99) diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes index dbd065078c..ad2e97f387 100644 --- a/src/bin/stats/stats_httpd_messages.mes +++ b/src/bin/stats/stats_httpd_messages.mes @@ -24,14 +24,14 @@ The stats-httpd module was unable to connect to the BIND 10 command and control bus. A likely problem is that the message bus daemon (b10-msgq) is not running. The stats-httpd module will now shut down. -% STATHTTPD_CLOSING_CC_SESSION stopping cc session -Debug message indicating that the stats-httpd module is disconnecting -from the command and control bus. - % STATHTTPD_CLOSING closing %1#%2 The stats-httpd daemon will stop listening for requests on the given address and port number. +% STATHTTPD_CLOSING_CC_SESSION stopping cc session +Debug message indicating that the stats-httpd module is disconnecting +from the command and control bus. + % STATHTTPD_HANDLE_CONFIG reading configuration: %1 The stats-httpd daemon has received new configuration data and will now process it. The (changed) data is printed. @@ -49,18 +49,18 @@ An unknown command has been sent to the stats-httpd module. The stats-httpd module will respond with an error, and the command will be ignored. -% STATHTTPD_SERVER_ERROR HTTP server error: %1 -An internal error occurred while handling an HTTP request. An HTTP 500 -response will be sent back, and the specific error is printed. This -is an error condition that likely points to a module that is not -responding correctly to statistic requests. - % STATHTTPD_SERVER_DATAERROR HTTP server data error: %1 An internal error occurred while handling an HTTP request. An HTTP 404 response will be sent back, and the specific error is printed. This is an error condition that likely points the specified data corresponding to the requested URI is incorrect. +% STATHTTPD_SERVER_ERROR HTTP server error: %1 +An internal error occurred while handling an HTTP request. An HTTP 500 +response will be sent back, and the specific error is printed. This +is an error condition that likely points to a module that is not +responding correctly to statistic requests. + % STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1 There was a problem initializing the HTTP server in the stats-httpd module upon receiving its configuration data. The most likely cause @@ -71,12 +71,6 @@ and an error is sent back. % STATHTTPD_SHUTDOWN shutting down The stats-httpd daemon is shutting down. -% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1 -There was a problem initializing the HTTP server in the stats-httpd -module upon startup. The most likely cause is that it was not able -to bind to the listening port. The specific error is printed, and the -module will shut down. - % STATHTTPD_STARTED listening on %1#%2 The stats-httpd daemon will now start listening for requests on the given address and port number. @@ -85,6 +79,12 @@ given address and port number. Debug message indicating that the stats-httpd module is connecting to the command and control bus. +% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1 +There was a problem initializing the HTTP server in the stats-httpd +module upon startup. The most likely cause is that it was not able +to bind to the listening port. The specific error is printed, and the +module will shut down. + % STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down There was a keyboard interrupt signal to stop the stats-httpd daemon. The daemon will now shut down. diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes index cfffb3adb8..3e75348c88 100644 --- a/src/bin/stats/stats_messages.mes +++ b/src/bin/stats/stats_messages.mes @@ -28,6 +28,12 @@ control bus. A likely problem is that the message bus daemon This debug message is printed when the stats module has received a configuration update from the configuration manager. +% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema +The stats module received a command to show all statistics schemas of all modules. + +% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1 +The stats module received a command to show the specified statistics schema of the specified module. + % STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics The stats module received a command to show all statistics that it has collected. @@ -51,6 +57,13 @@ will respond with an error and the command will be ignored. This debug message is printed when a request is sent to the boss module to send its data to the stats module. +% STATS_STARTING starting +The stats module will be now starting. + +% STATS_START_ERROR stats module error: %1 +An internal error occurred while starting the stats module. The stats +module will be now shutting down. + % STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down There was a keyboard interrupt signal to stop the stats module. The daemon will now shut down. @@ -61,16 +74,3 @@ is unknown in the implementation. The most likely cause is an installation problem, where the specification file stats.spec is from a different version of BIND 10 than the stats module itself. Please check your installation. - -% STATS_STARTING starting -The stats module will be now starting. - -% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema -The stats module received a command to show all statistics schemas of all modules. - -% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1 -The stats module received a command to show the specified statistics schema of the specified module. - -% STATS_START_ERROR stats module error: %1 -An internal error occurred while starting the stats module. The stats -module will be now shutting down. diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am index 01254d411b..b98996a6ba 100644 --- a/src/bin/stats/tests/Makefile.am +++ b/src/bin/stats/tests/Makefile.am @@ -24,6 +24,7 @@ endif B10_FROM_SOURCE=$(abs_top_srcdir) \ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in index f96c023841..1156b29292 100644 --- a/src/bin/tests/process_rename_test.py.in +++ b/src/bin/tests/process_rename_test.py.in @@ -25,7 +25,8 @@ class TestRename(unittest.TestCase): def __scan(self, directory, script, fun): # Scan one script if it contains call to the renaming function filename = os.path.join(directory, script) - data = ''.join(open(filename).readlines()) + with open(filename) as f: + data = ''.join(f.readlines()) prettyname = 'src' + filename[filename.rfind('../') + 2:] self.assertTrue(fun.search(data), "Didn't find a call to isc.util.process.rename in " + prettyname) @@ -53,8 +54,8 @@ class TestRename(unittest.TestCase): # Find all Makefile and extract names of scripts for (d, _, fs) in os.walk('@top_builddir@'): if 'Makefile' in fs: - makefile = ''.join(open(os.path.join(d, - "Makefile")).readlines()) + with open(os.path.join(d, "Makefile")) as f: + makefile = ''.join(f.readlines()) for (var, _) in lines.findall(re.sub(excluded_lines, '', makefile)): for (script, _) in scripts.findall(var): diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py index b88d6a99b9..a1f4d28263 100644 --- a/src/bin/xfrin/tests/xfrin_test.py +++ b/src/bin/xfrin/tests/xfrin_test.py @@ -2127,7 +2127,8 @@ class TestXfrin(unittest.TestCase): self.assertFalse(self.xfr._module_cc.stopped); self.xfr.shutdown() self.assertTrue(self.xfr._module_cc.stopped); - sys.stderr= self.stderr_backup + sys.stderr.close() + sys.stderr = self.stderr_backup def _do_parse_zone_name_class(self): return self.xfr._parse_zone_name_and_class(self.args) @@ -2577,7 +2578,7 @@ class TestXfrin(unittest.TestCase): self.common_ixfr_setup('refresh', False) self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type) -class TextXfrinMemoryZones(unittest.TestCase): +class TestXfrinMemoryZones(unittest.TestCase): def setUp(self): self.xfr = MockXfrin() # Configuration snippet containing 2 memory datasources, @@ -2736,6 +2737,44 @@ class TestMain(unittest.TestCase): MockXfrin.check_command_hook = raise_exception main(MockXfrin, False) +class TestXfrinProcessMockCC: + def __init__(self): + self.get_called = False + self.get_called_correctly = False + self.config = [] + + def get_remote_config_value(self, module, identifier): + self.get_called = True + if module == 'Auth' and identifier == 'datasources': + self.get_called_correctly = True + return (self.config, False) + else: + return (None, True) + +class TestXfrinProcessMockCCSession: + def __init__(self): + self.send_called = False + self.send_called_correctly = False + self.recv_called = False + self.recv_called_correctly = False + + def group_sendmsg(self, msg, module): + self.send_called = True + if module == 'Auth' and msg['command'][0] == 'loadzone': + self.send_called_correctly = True + seq = "random-e068c2de26d760f20cf10afc4b87ef0f" + else: + seq = None + + return seq + + def group_recvmsg(self, message, seq): + self.recv_called = True + if message == False and seq == "random-e068c2de26d760f20cf10afc4b87ef0f": + self.recv_called_correctly = True + # return values are ignored + return (None, None) + class TestXfrinProcess(unittest.TestCase): """ Some tests for the xfrin_process function. This replaces the @@ -2751,6 +2790,8 @@ class TestXfrinProcess(unittest.TestCase): Also sets up several internal variables to watch what happens. """ + self._module_cc = TestXfrinProcessMockCC() + self._send_cc_session = TestXfrinProcessMockCCSession() # This will hold a "log" of what transfers were attempted. self.__transfers = [] # This will "log" if failures or successes happened. @@ -2795,6 +2836,9 @@ class TestXfrinProcess(unittest.TestCase): Part of pretending to be the server as well. This just logs the success/failure of the previous operation. """ + if ret == XFRIN_OK: + xfrin._do_auth_loadzone(self, zone_name, rrclass) + self.__published.append(ret) def close(self): @@ -2825,12 +2869,22 @@ class TestXfrinProcess(unittest.TestCase): # Create a connection for each attempt self.assertEqual(len(transfers), self.__created_connections) self.assertEqual([published], self.__published) + if published == XFRIN_OK: + self.assertTrue(self._module_cc.get_called) + self.assertTrue(self._module_cc.get_called_correctly) + else: + self.assertFalse(self._module_cc.get_called) + self.assertFalse(self._module_cc.get_called_correctly) def test_ixfr_ok(self): """ Everything OK the first time, over IXFR. """ self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) def test_axfr_ok(self): """ @@ -2861,6 +2915,138 @@ class TestXfrinProcess(unittest.TestCase): """ self.__do_test([XFRIN_FAIL, XFRIN_FAIL], [RRType.IXFR(), RRType.AXFR()], RRType.IXFR()) + + def test_inmem_ok(self): + """ + Inmem configuration where all the configuration is just right + for loadzone to be sent to b10-auth (origin is the name received + by xfrin, filetype is sqlite3, type is memory and class is the + one received by xfrin). + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'memory', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertTrue(self._send_cc_session.send_called) + self.assertTrue(self._send_cc_session.send_called_correctly) + self.assertTrue(self._send_cc_session.recv_called) + self.assertTrue(self._send_cc_session.recv_called_correctly) + + def test_inmem_datasource_type_not_memory(self): + """ + Inmem configuration where the datasource type is not memory. In + this case, loadzone should not be sent to b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'punched-card', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_datasource_type_is_missing(self): + """ + Inmem configuration where the datasource type is missing. In + this case, loadzone should not be sent to b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_backend_type_not_sqlite3(self): + """ + Inmem configuration where the datasource backing file is not of + type sqlite3. In this case, loadzone should not be sent to + b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'postgresql', + 'file': 'data/inmem-xfrin.db'}], + 'type': 'memory', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_backend_type_is_missing(self): + """ + Inmem configuration where the datasource backing file type is + not set. In this case, loadzone should not be sent to b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', + 'file': 'data/inmem-xfrin'}], + 'type': 'memory', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_class_is_different(self): + """ + Inmem configuration where the datasource class does not match + the received class. In this case, loadzone should not be sent to + b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'memory', 'class': 'XX'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_class_is_missing(self): + """ + Inmem configuration where the datasource class is missing. In + this case, we assume the IN class and loadzone may be sent to + b10-auth if everything else matches. + """ + self._module_cc.config = [{'zones': [{'origin': 'example.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'memory'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertTrue(self._send_cc_session.send_called) + self.assertTrue(self._send_cc_session.send_called_correctly) + self.assertTrue(self._send_cc_session.recv_called) + self.assertTrue(self._send_cc_session.recv_called_correctly) + + def test_inmem_name_doesnt_match(self): + """ + Inmem configuration where the origin does not match the received + name. In this case, loadzone should not be sent to b10-auth. + """ + self._module_cc.config = [{'zones': [{'origin': 'isc.org', 'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'memory', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + + def test_inmem_name_is_missing(self): + """ + Inmem configuration where the origin is missing. In this case, + loadzone should not be sent to b10-auth. + """ + self._module_cc.config = [{'zones': [{'filetype': 'sqlite3', + 'file': 'data/inmem-xfrin.sqlite3'}], + 'type': 'memory', 'class': 'IN'}] + self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR()) + self.assertFalse(self._send_cc_session.send_called) + self.assertFalse(self._send_cc_session.send_called_correctly) + self.assertFalse(self._send_cc_session.recv_called) + self.assertFalse(self._send_cc_session.recv_called_correctly) + class TestFormatting(unittest.TestCase): # If the formatting functions are moved to a more general library # (ticket #1379), these tests should be moved with them. diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in index 27b91a99de..652f870a5a 100755 --- a/src/bin/xfrin/xfrin.py.in +++ b/src/bin/xfrin/xfrin.py.in @@ -33,6 +33,7 @@ import isc.util.process from isc.datasrc import DataSourceClient, ZoneFinder import isc.net.parse from isc.xfrin.diff import Diff +from isc.server_common.auth_command import auth_loadzone_command from isc.log_messages.xfrin_messages import * isc.log.init("b10-xfrin") @@ -66,10 +67,10 @@ else: SPECFILE_LOCATION = SPECFILE_PATH + "/xfrin.spec" AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + "/auth.spec" +AUTH_MODULE_NAME = 'Auth' XFROUT_MODULE_NAME = 'Xfrout' ZONE_MANAGER_MODULE_NAME = 'Zonemgr' REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr' -ZONE_XFRIN_FAILED = 'zone_xfrin_failed' # Constants for debug levels. DBG_XFRIN_TRACE = logger.DBGLVL_TRACE_BASIC @@ -1246,6 +1247,15 @@ class ZoneInfo: return (self.master_addr.family, socket.SOCK_STREAM, (str(self.master_addr), self.master_port)) +def _do_auth_loadzone(server, zone_name, zone_class): + msg = auth_loadzone_command(server._module_cc, zone_name, zone_class) + if msg is not None: + param = msg['command'][1] + logger.debug(DBG_XFRIN_TRACE, XFRIN_AUTH_LOADZONE, param["origin"], + param["class"], param["datasrc"]) + seq = server._send_cc_session.group_sendmsg(msg, AUTH_MODULE_NAME) + answer, env = server._send_cc_session.group_recvmsg(False, seq) + class Xfrin: def __init__(self): self._max_transfers_in = 10 @@ -1529,7 +1539,7 @@ class Xfrin: def _set_db_file(self): db_file, is_default =\ - self._module_cc.get_remote_config_value("Auth", "database_file") + self._module_cc.get_remote_config_value(AUTH_MODULE_NAME, "database_file") if is_default and "B10_FROM_BUILD" in os.environ: # override the local database setting if it is default and we # are running from the source tree @@ -1539,7 +1549,7 @@ class Xfrin: "bind10_zones.sqlite3" self._db_file = db_file - def publish_xfrin_news(self, zone_name, zone_class, xfr_result): + def publish_xfrin_news(self, zone_name, zone_class, xfr_result): '''Send command to xfrout/zone manager module. If xfrin has finished successfully for one zone, tell the good news(command: zone_new_data_ready) to zone manager and xfrout. @@ -1548,6 +1558,7 @@ class Xfrin: param = {'zone_name': zone_name.to_text(), 'zone_class': zone_class.to_text()} if xfr_result == XFRIN_OK: + _do_auth_loadzone(self, zone_name, zone_class) msg = create_command(notify_out.ZONE_NEW_DATA_READY_CMD, param) # catch the exception, in case msgq has been killed. try: @@ -1566,8 +1577,9 @@ class Xfrin: pass # for now we just ignore the failure except socket.error as err: logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME) + else: - msg = create_command(ZONE_XFRIN_FAILED, param) + msg = create_command(notify_out.ZONE_XFRIN_FAILED, param) # catch the exception, in case msgq has been killed. try: seq = self._send_cc_session.group_sendmsg(msg, ZONE_MANAGER_MODULE_NAME) diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes index 25a1fc1c06..ffea249c2e 100644 --- a/src/bin/xfrin/xfrin_messages.mes +++ b/src/bin/xfrin/xfrin_messages.mes @@ -15,6 +15,11 @@ # No namespace declaration - these constants go in the global namespace # of the xfrin messages python module. +% XFRIN_AUTH_LOADZONE sending Auth loadzone for origin=%1, class=%2, datasrc=%3 +There was a successful zone transfer, and the zone is served by b10-auth +in the in-memory data source using sqlite3 as a backend. We send the +"loadzone" command for the zone to b10-auth. + % XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received The serial fields of the first and last SOAs of AXFR (including AXFR-style IXFR) are not the same. According to RFC 5936 these two SOAs must be the @@ -113,6 +118,10 @@ There was a problem sending a message to the xfrout module or the zone manager. This most likely means that the msgq daemon has quit or was killed. +% XFRIN_MSGQ_SEND_ERROR_AUTH error while contacting %1 +There was a problem sending a message to b10-auth. This most likely +means that the msgq daemon has quit or was killed. + % XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1 There was a problem sending a message to the zone manager. This most likely means that the msgq daemon has quit or was killed. diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in index b60535c49b..e4fc873e46 100644 --- a/src/bin/xfrout/tests/xfrout_test.py.in +++ b/src/bin/xfrout/tests/xfrout_test.py.in @@ -60,6 +60,9 @@ class MySocket(): self.sendqueue.extend(data); return len(data) + def fileno(self): + return 42 # simply return a constant dummy value + def readsent(self): if len(self.sendqueue) >= 2: size = 2 + struct.unpack("!H", self.sendqueue[:2])[0] @@ -1155,6 +1158,15 @@ class TestUnixSockServer(unittest.TestCase): def setUp(self): self.write_sock, self.read_sock = socket.socketpair() self.unix = MyUnixSockServer() + # Some test below modify these module-wide attributes. We'll need + # to restore them at the end of each test, so we remember them here. + self.__select_bak = xfrout.select.select + self.__recv_fd_back = xfrout.recv_fd + + def tearDown(self): + # Restore possibly faked module-wide attributes. + xfrout.select.select = self.__select_bak + xfrout.recv_fd = self.__recv_fd_back def test_tsig_keyring(self): """ @@ -1201,6 +1213,7 @@ class TestUnixSockServer(unittest.TestCase): self.assertEqual((socket.AF_INET, socket.SOCK_STREAM, ('127.0.0.1', 12345)), self.unix._guess_remote(sock.fileno())) + sock.close() if socket.has_ipv6: # Don't check IPv6 address on hosts not supporting them sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) @@ -1208,6 +1221,7 @@ class TestUnixSockServer(unittest.TestCase): self.assertEqual((socket.AF_INET6, socket.SOCK_STREAM, ('::1', 12345, 0, 0)), self.unix._guess_remote(sock.fileno())) + sock.close() # Try when pretending there's no IPv6 support # (No need to pretend when there's really no IPv6) xfrout.socket.has_ipv6 = False @@ -1216,6 +1230,7 @@ class TestUnixSockServer(unittest.TestCase): self.assertEqual((socket.AF_INET, socket.SOCK_STREAM, ('127.0.0.1', 12345)), self.unix._guess_remote(sock.fileno())) + sock.close() # Return it back xfrout.socket.has_ipv6 = True @@ -1375,19 +1390,13 @@ class TestUnixSockServer(unittest.TestCase): self._remove_file(sock_file) self.assertFalse(self.unix._sock_file_in_use(sock_file)) self._start_unix_sock_server(sock_file) - - old_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') self.assertTrue(self.unix._sock_file_in_use(sock_file)) - sys.stdout = old_stdout def test_remove_unused_sock_file_in_use(self): sock_file = 'temp.sock.file' self._remove_file(sock_file) self.assertFalse(self.unix._sock_file_in_use(sock_file)) self._start_unix_sock_server(sock_file) - old_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') try: self.unix._remove_unused_sock_file(sock_file) except SystemExit: @@ -1396,8 +1405,6 @@ class TestUnixSockServer(unittest.TestCase): # This should never happen self.assertTrue(False) - sys.stdout = old_stdout - def test_remove_unused_sock_file_dir(self): import tempfile dir_name = tempfile.mkdtemp() @@ -1411,9 +1418,46 @@ class TestUnixSockServer(unittest.TestCase): # This should never happen self.assertTrue(False) + sys.stdout.close() sys.stdout = old_stdout os.rmdir(dir_name) + def __fake_select(self, r, w, e): + '''select emulator used in select_loop_fail test.''' + # This simplified faked function assumes to be called at most once, + # and in that case just return a pre-configured "readable" sockets. + if self.__select_count > 0: + raise RuntimeError('select called unexpected number of times') + self.__select_count += 1 + return (self.__select_return_redable, [], []) + + def test_select_loop_fail(self): + '''Check failure events in the main loop.''' + # setup faked select() environments + self.unix._read_sock = MySocket(socket.AF_INET6, socket.SOCK_STREAM) + xfrout.select.select = self.__fake_select + self.__select_return_redable = [MySocket(socket.AF_INET6, + socket.SOCK_STREAM)] + + # Check that loop terminates if recv_fd() fails. + for ret_code in [-1, FD_SYSTEM_ERROR]: + # fake recv_fd so it returns the faked failure code. + xfrout.recv_fd = lambda fileno: ret_code + + # reset the counter, go to the loop. + self.__select_count = 0 + self.unix._select_loop(self.__select_return_redable[0]) + # select should have been called exactly once. + self.assertEqual(1, self.__select_count) + + # Next, we test the case where recf_fd succeeds but receiving the + # request fails. + self.__select_count = 0 + xfrout.recv_fd = lambda fileno: 1 + self.unix._receive_query_message = lambda fd: None + self.unix._select_loop(self.__select_return_redable[0]) + self.assertEqual(1, self.__select_count) + class TestInitialization(unittest.TestCase): def setEnv(self, name, value): if value is None: diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in index 4dd12cee17..46ae687323 100755 --- a/src/bin/xfrout/xfrout.py.in +++ b/src/bin/xfrout/xfrout.py.in @@ -678,30 +678,40 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, except socket.error: logger.error(XFROUT_FETCH_REQUEST_ERROR) return + self._select_loop(request) + + def _select_loop(self, request_sock): + '''Main loop for a single session between xfrout and auth. + + This is a dedicated subroutine of handle_request(), but is defined + as a separate "protected" method for the convenience of tests. + ''' # Check self._shutdown_event to ensure the real shutdown comes. # Linux could trigger a spurious readable event on the _read_sock # due to a bug, so we need perform a double check. while not self._shutdown_event.is_set(): # Check if xfrout is shutdown try: - (rlist, wlist, xlist) = select.select([self._read_sock, request], [], []) + (rlist, wlist, xlist) = select.select([self._read_sock, + request_sock], [], []) except select.error as e: if e.args[0] == errno.EINTR: (rlist, wlist, xlist) = ([], [], []) continue else: - logger.error(XFROUT_SOCKET_SELECT_ERROR, str(e)) + logger.error(XFROUT_SOCKET_SELECT_ERROR, e) break - # self.server._shutdown_event will be set by now, if it is not a false - # alarm + # self.server._shutdown_event will be set by now, if it is not a + # false alarm if self._read_sock in rlist: continue try: - self.process_request(request) + if not self.process_request(request_sock): + break except Exception as pre: - logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre)) + logger.error(XFROUT_PROCESS_REQUEST_ERROR, pre) break def _handle_request_noblock(self): @@ -713,26 +723,33 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, def process_request(self, request): """Receive socket fd and query message from auth, then - start a new thread to process the request.""" + start a new thread to process the request. + + Return: True if everything is okay; otherwise False, in which case + the calling thread will terminate. + + """ sock_fd = recv_fd(request.fileno()) if sock_fd < 0: - # This may happen when one xfrout process try to connect to - # xfrout unix socket server, to check whether there is another - # xfrout running. - if sock_fd == FD_SYSTEM_ERROR: - logger.error(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR) - return + logger.warn(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR) + return False - # receive request msg + # receive request msg. If it fails we simply terminate the thread; + # it might be possible to recover from this state, but it's more likely + # that auth and xfrout are in inconsistent states. So it will make + # more sense to restart in a new session. request_data = self._receive_query_message(request) - if not request_data: - return + if request_data is None: + # The specific exception type doesn't matter so we use session + # error. + raise XfroutSessionError('Failed to get complete xfr request') t = threading.Thread(target=self.finish_request, - args = (sock_fd, request_data)) + args=(sock_fd, request_data)) if self.daemon_threads: t.daemon = True t.start() + return True def _guess_remote(self, sock_fd): """Guess remote address and port of the socket. @@ -747,12 +764,15 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, # to care about the SOCK_STREAM parameter at all (which it really is, # except for testing) if socket.has_ipv6: - sock = socket.fromfd(sock_fd, socket.AF_INET6, socket.SOCK_STREAM) + sock_domain = socket.AF_INET6 else: # To make it work even on hosts without IPv6 support # (Any idea how to simulate this in test?) - sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM) + sock_domain = socket.AF_INET + + sock = socket.fromfd(sock_fd, sock_domain, socket.SOCK_STREAM) peer = sock.getpeername() + sock.close() # Identify the correct socket family. Due to the above "trick", # we cannot simply use sock.family. @@ -761,6 +781,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, socket.inet_pton(socket.AF_INET6, peer[0]) except socket.error: family = socket.AF_INET + return (family, socket.SOCK_STREAM, peer) def finish_request(self, sock_fd, request_data): @@ -805,8 +826,10 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, sock = socket.socket(socket.AF_UNIX) sock.connect(sock_file) except socket.error as err: + sock.close() return False else: + sock.close() return True def shutdown(self): @@ -985,7 +1008,7 @@ class XfroutServer: self.shutdown() answer = create_answer(0) - elif cmd == notify_out.ZONE_NEW_DATA_READY_CMD: + elif cmd == "notify": zone_name = args.get('zone_name') zone_class = args.get('zone_class') if not zone_class: diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes index 9996a5ad70..9f674a2cf8 100644 --- a/src/bin/xfrout/xfrout_messages.mes +++ b/src/bin/xfrout/xfrout_messages.mes @@ -23,22 +23,15 @@ a valid TSIG key. There was a problem reading from the command and control channel. The most likely cause is that the msgq daemon is not running. -% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1 -There was a problem in the lower level module handling configuration and -control commands. This could happen for various reasons, but the most likely -cause is that the configuration database contains a syntax error and xfrout -failed to start at initialization. A detailed error message from the module -will also be displayed. - -% XFROUT_CONFIG_ERROR error found in configuration data: %1 -The xfrout process encountered an error when installing the configuration at -startup time. Details of the error are included in the log message. - % XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response There was a problem reading a response from another module over the command and control channel. The most likely cause is that the configuration manager b10-cfgmgr is not running. +% XFROUT_CONFIG_ERROR error found in configuration data: %1 +The xfrout process encountered an error when installing the configuration at +startup time. Details of the error are included in the log message. + % XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon There was a socket error while contacting the b10-auth daemon to fetch a transfer request. The auth daemon may have shutdown. @@ -56,6 +49,52 @@ are missing on the system, or the PYTHONPATH variable is not correct. The specific place where this library needs to be depends on your system and your specific installation. +% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs +An IXFR request was received with more than one SOA RRs in the authority +section. The xfrout daemon rejects the request with an RCODE of +FORMERR. + +% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR +An IXFR request was received but the underlying data source did +not support journaling. The xfrout daemon fell back to AXFR-style +IXFR. + +% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA +An IXFR request was received with no SOA RR in the authority section. +The xfrout daemon rejects the request with an RCODE of FORMERR. + +% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR +An IXFR request was received, but the requested range of differences +were not found in the data source. The xfrout daemon fell back to +AXFR-style IXFR. + +% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal +The requested zone in IXFR was not found in the data source +even though the xfrout daemon sucessfully found the SOA RR of the zone +in the data source. This can happen if the administrator removed the +zone from the data source within the small duration between these +operations, but it's more likely to be a bug or broken data source. +Unless you know why this message was logged, and especially if it +happens often, it's advisable to check whether the data source is +valid for this zone. The xfrout daemon considers it a possible, +though unlikely, event, and returns a response with an RCODE of +NOTAUTH. + +% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4) +An IXFR request was received, but the client's SOA version is the same as +or newer than that of the server. The xfrout server responds to the +request with the answer section being just one SOA of that version. +Note: as of this wrting the 'newer version' cannot be identified due to +the lack of support for the serial number arithmetic. This will soon +be implemented. + +% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1 +There was a problem in the lower level module handling configuration and +control commands. This could happen for various reasons, but the most likely +cause is that the configuration database contains a syntax error and xfrout +failed to start at initialization. A detailed error message from the module +will also be displayed. + % XFROUT_NEW_CONFIG Update xfrout configuration New configuration settings have been sent from the configuration manager. The xfrout daemon will now apply them. @@ -76,11 +115,15 @@ In general, this should only occur for unexpected problems like memory allocation failures, as the query should already have been parsed by the b10-auth daemon, before it was passed here. -% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2 -There was an error processing a transfer request. The error is included -in the log message, but at this point no specific information other -than that could be given. This points to incomplete exception handling -in the code. +% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %1 +There was an error in receiving a transfer request from b10-auth. +This is generally an unexpected event, but is possible when, for +example, b10-auth terminates in the middle of forwarding the request. +When this happens it's unlikely to be recoverable with the same +communication session with b10-auth, so b10-xfrout drops it and +waits for a new session. In any case, this error indicates that +there's something very wrong in the system, so it's advisable to check +the over all status of the BIND 10 system. % XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped The xfrout process silently dropped a request to transfer zone to @@ -88,12 +131,6 @@ given host. This is required by the ACLs. The %2 represents the IP address and port of the peer requesting the transfer, and the %3 represents the zone name and class. -% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected -The xfrout process rejected (by REFUSED rcode) a request to transfer zone to -given host. This is because of ACLs. The %2 represents the IP -address and port of the peer requesting the transfer, and the %3 -represents the zone name and class. - % XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3) The xfr request was rejected because the server was already handling the maximum number of allowable transfers as specified in the transfers_out @@ -104,20 +141,28 @@ this parameter; if the server is being too busy due to requests from unexpected clients you may want to restrict the legitimate clients with ACL. -% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection -There was an error receiving the file descriptor for the transfer -request. Normally, the request is received by b10-auth, and passed on -to the xfrout daemon, so it can answer directly. However, there was a -problem receiving this file descriptor. The request will be ignored. +% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected +The xfrout process rejected (by REFUSED rcode) a request to transfer zone to +given host. This is because of ACLs. The %2 represents the IP +address and port of the peer requesting the transfer, and the %3 +represents the zone name and class. % XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received The xfrout daemon received a shutdown command from the command channel and will now shut down. -% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2 -When shutting down, the xfrout daemon tried to clear the unix socket -file used for communication with the auth daemon. It failed to remove -the file. The reason for the failure is given in the error message. +% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection +There was an error receiving the file descriptor for the transfer +request from b10-auth. There can be several reasons for this, but +the most likely cause is that b10-auth terminates for some reason +(maybe it's a bug of b10-auth, maybe it's an intentional restart by +the administrator), so depending on how this happens it may or may not +be a serious error. But in any case this is not expected to happen +frequently, and it's advisable to figure out how this happened if +this message is logged. Even if this error happens xfrout will reset +its internal state and will keep receiving further requests. So +if it's just a temporary restart of b10-auth the administrator does +not have to do anything. % XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2 The unix socket file xfrout needs for contact with the auth daemon @@ -126,6 +171,11 @@ removing it. It is likely that we do not have permission to remove this file. The specific error is show in the log message. The xfrout daemon will shut down. +% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2 +When shutting down, the xfrout daemon tried to clear the unix socket +file used for communication with the auth daemon. It failed to remove +the file. The reason for the failure is given in the error message. + % XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1 There was an error while calling select() on the socket that informs the xfrout daemon that a new xfrout request has arrived. This should @@ -151,6 +201,13 @@ on, but the file is in use. The most likely cause is that another xfrout daemon process is still running. This xfrout daemon (the one printing this message) will not start. +% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4 +Pre-response check for an incomding XFR request failed unexpectedly. +The most likely cause of this is that some low level error in the data +source, but it may also be other general (more unlikely) errors such +as memory shortage. Some detail of the error is also included in the +message. The xfrout server tries to return a SERVFAIL response in this case. + % XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete The transfer of the given zone has been completed successfully, or was aborted due to a shutdown event. @@ -161,13 +218,6 @@ an AXFR query. The error message of the exception is included in the log message, but this error most likely points to incomplete exception handling in the code. -% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4 -Pre-response check for an incomding XFR request failed unexpectedly. -The most likely cause of this is that some low level error in the data -source, but it may also be other general (more unlikely) errors such -as memory shortage. Some detail of the error is also included in the -message. The xfrout server tries to return a SERVFAIL response in this case. - % XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4 A transfer out for the given zone failed. An error response is sent to the client. The given rcode is the rcode that is set in the error @@ -181,42 +231,3 @@ Xfrout/max_transfers_out, has been reached). % XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started A transfer out of the given zone has started. - -% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs -An IXFR request was received with more than one SOA RRs in the authority -section. The xfrout daemon rejects the request with an RCODE of -FORMERR. - -% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA -An IXFR request was received with no SOA RR in the authority section. -The xfrout daemon rejects the request with an RCODE of FORMERR. - -% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR -An IXFR request was received but the underlying data source did -not support journaling. The xfrout daemon fell back to AXFR-style -IXFR. - -% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4) -An IXFR request was received, but the client's SOA version is the same as -or newer than that of the server. The xfrout server responds to the -request with the answer section being just one SOA of that version. -Note: as of this wrting the 'newer version' cannot be identified due to -the lack of support for the serial number arithmetic. This will soon -be implemented. - -% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR -An IXFR request was received, but the requested range of differences -were not found in the data source. The xfrout daemon fell back to -AXFR-style IXFR. - -% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal -The requested zone in IXFR was not found in the data source -even though the xfrout daemon sucessfully found the SOA RR of the zone -in the data source. This can happen if the administrator removed the -zone from the data source within the small duration between these -operations, but it's more likely to be a bug or broken data source. -Unless you know why this message was logged, and especially if it -happens often, it's advisable to check whether the data source is -valid for this zone. The xfrout daemon considers it a possible, -though unlikely, event, and returns a response with an RCODE of -NOTAUTH. diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py index 548d921a54..42ed679661 100644 --- a/src/bin/zonemgr/tests/zonemgr_test.py +++ b/src/bin/zonemgr/tests/zonemgr_test.py @@ -21,6 +21,7 @@ import os import tempfile from zonemgr import * from isc.testutils.ccsession_mock import MockModuleCCSession +from isc.notify import notify_out ZONE_NAME_CLASS1_IN = ("example.net.", "IN") ZONE_NAME_CLASS1_CH = ("example.net.", "CH") @@ -111,6 +112,7 @@ class TestZonemgrRefresh(unittest.TestCase): def tearDown(self): if os.path.exists(TEST_SQLITE3_DBFILE): os.unlink(TEST_SQLITE3_DBFILE) + sys.stderr.close() sys.stderr = self.stderr_backup def test_random_jitter(self): @@ -683,7 +685,7 @@ class TestZonemgr(unittest.TestCase): self.assertEqual(answer1, self.zonemgr._parse_cmd_params(params1, ZONE_NOTIFY_COMMAND)) params2 = {"zone_name" : "example.com.", "zone_class" : "IN"} answer2 = ZONE_NAME_CLASS3_IN - self.assertEqual(answer2, self.zonemgr._parse_cmd_params(params2, ZONE_XFRIN_SUCCESS_COMMAND)) + self.assertEqual(answer2, self.zonemgr._parse_cmd_params(params2, notify_out.ZONE_NEW_DATA_READY_CMD)) self.assertRaises(ZonemgrException, self.zonemgr._parse_cmd_params, params2, ZONE_NOTIFY_COMMAND) params1 = {"zone_class" : "CH"} self.assertRaises(ZonemgrException, self.zonemgr._parse_cmd_params, params2, ZONE_NOTIFY_COMMAND) diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in index 87589a84bc..8cb616d917 100755 --- a/src/bin/zonemgr/zonemgr.py.in +++ b/src/bin/zonemgr/zonemgr.py.in @@ -39,6 +39,7 @@ from optparse import OptionParser, OptionValueError from isc.config.ccsession import * import isc.util.process from isc.log_messages.zonemgr_messages import * +from isc.notify import notify_out # Initialize logging for called modules. isc.log.init("b10-zonemgr") @@ -78,8 +79,6 @@ XFRIN_MODULE_NAME = 'Xfrin' AUTH_MODULE_NAME = 'Auth' # define command name -ZONE_XFRIN_FAILED_COMMAND = 'zone_xfrin_failed' -ZONE_XFRIN_SUCCESS_COMMAND = 'zone_new_data_ready' ZONE_REFRESH_COMMAND = 'refresh_from_zonemgr' ZONE_NOTIFY_COMMAND = 'notify' @@ -428,6 +427,8 @@ class ZonemgrRefresh: self._thread.join() # Wipe out what we do not need self._thread = None + self._read_sock.close() + self._write_sock.close() self._read_sock = None self._write_sock = None @@ -621,7 +622,7 @@ class Zonemgr: def command_handler(self, command, args): """Handle command receivd from command channel. ZONE_NOTIFY_COMMAND is issued by Auth process; - ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by + ZONE_NEW_DATA_READY_CMD and ZONE_XFRIN_FAILED are issued by Xfrin process; shutdown is issued by a user or Boss process. """ answer = create_answer(0) @@ -635,7 +636,7 @@ class Zonemgr: # Send notification to zonemgr timer thread self._master_socket.send(b" ")# make self._slave_socket readble - elif command == ZONE_XFRIN_SUCCESS_COMMAND: + elif command == notify_out.ZONE_NEW_DATA_READY_CMD: """ Handle xfrin success command""" zone_name_class = self._parse_cmd_params(args, command) logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1]) @@ -643,7 +644,7 @@ class Zonemgr: self._zone_refresh.zone_refresh_success(zone_name_class) self._master_socket.send(b" ")# make self._slave_socket readble - elif command == ZONE_XFRIN_FAILED_COMMAND: + elif command == notify_out.ZONE_XFRIN_FAILED: """ Handle xfrin fail command""" zone_name_class = self._parse_cmd_params(args, command) logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1]) diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes index c866b792fd..88f8dcf237 100644 --- a/src/bin/zonemgr/zonemgr_messages.mes +++ b/src/bin/zonemgr/zonemgr_messages.mes @@ -67,10 +67,6 @@ zone manager to record the master server for the zone and start a timer; when the timer expires, the master will be polled to see if it contains new data. -% ZONEMGR_STARTED zonemgr started -This informational message is output by zonemgr when all initialization -has been completed and it is entering its main loop. - % ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command This is a debug message indicating that the zone manager has received a SHUTDOWN command over the command channel from the Boss process. @@ -120,6 +116,10 @@ problem is that the daemon is not running. % ZONEMGR_SHUTDOWN zone manager has shut down A debug message, output when the zone manager has shut down completely. +% ZONEMGR_STARTED zonemgr started +This informational message is output by zonemgr when all initialization +has been completed and it is entering its main loop. + % ZONEMGR_STARTING zone manager starting A debug message output when the zone manager starts up. diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am index 636951199b..6df91c7703 100644 --- a/src/lib/acl/tests/Makefile.am +++ b/src/lib/acl/tests/Makefile.am @@ -8,6 +8,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/asiodns/tests/Makefile.am b/src/lib/asiodns/tests/Makefile.am index 95094f01b7..a96a3e67da 100644 --- a/src/lib/asiodns/tests/Makefile.am +++ b/src/lib/asiodns/tests/Makefile.am @@ -12,6 +12,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/asiolink/io_endpoint.cc b/src/lib/asiolink/io_endpoint.cc index 63830a5b5e..2354521906 100644 --- a/src/lib/asiolink/io_endpoint.cc +++ b/src/lib/asiolink/io_endpoint.cc @@ -14,10 +14,6 @@ #include -#include // for some IPC/network system calls -#include -#include - #include #include @@ -26,6 +22,13 @@ #include #include +#include + +#include +#include // for some IPC/network system calls +#include +#include + using namespace std; namespace isc { @@ -58,5 +61,18 @@ IOEndpoint::operator!=(const IOEndpoint& other) const { return (!operator==(other)); } +ostream& +operator<<(ostream& os, const IOEndpoint& endpoint) { + if (endpoint.getFamily() == AF_INET6) { + os << "[" << endpoint.getAddress().toText() << "]"; + } else { + // In practice this should be AF_INET, but it's not guaranteed by + // the interface. We'll use the result of textual address + // representation opaquely. + os << endpoint.getAddress().toText(); + } + os << ":" << boost::lexical_cast(endpoint.getPort()); + return (os); +} } // namespace asiolink } // namespace isc diff --git a/src/lib/asiolink/io_endpoint.h b/src/lib/asiolink/io_endpoint.h index 11ea97be03..973fc8b4dd 100644 --- a/src/lib/asiolink/io_endpoint.h +++ b/src/lib/asiolink/io_endpoint.h @@ -18,9 +18,6 @@ // IMPORTANT NOTE: only very few ASIO headers files can be included in // this file. In particular, asio.hpp should never be included here. // See the description of the namespace below. -#include // for some network system calls - -#include // for sockaddr #include #include @@ -28,6 +25,12 @@ #include #include +# include + +#include // for some network system calls + +#include // for sockaddr + namespace isc { namespace asiolink { @@ -158,6 +161,27 @@ public: const unsigned short port); }; +/// \brief Insert the \c IOEndpoint as a string into stream. +/// +/// This method converts \c endpoint into a string and inserts it into the +/// output stream \c os. +/// +/// This method converts the address and port of the endpoint in the textual +/// format that other BIND 10 modules would use in logging, i.e., +/// - For IPv6 address: [<address>]:port (e.g., [2001:db8::5300]:53) +/// - For IPv4 address: <address>:port (e.g., 192.0.2.53:5300) +/// +/// If it's neither IPv6 nor IPv4, it converts the endpoint into text in the +/// same format as that for IPv4, although in practice such a case is not +/// really expected. +/// +/// \param os A \c std::ostream object on which the insertion operation is +/// performed. +/// \param endpoint A reference to an \c IOEndpoint object output by the +/// operation. +/// \return A reference to the same \c std::ostream object referenced by +/// parameter \c os after the insertion operation. +std::ostream& operator<<(std::ostream& os, const IOEndpoint& endpoint); } // namespace asiolink } // namespace isc #endif // __IO_ENDPOINT_H diff --git a/src/lib/asiolink/tests/Makefile.am b/src/lib/asiolink/tests/Makefile.am index 984cf07bfd..39b098d331 100644 --- a/src/lib/asiolink/tests/Makefile.am +++ b/src/lib/asiolink/tests/Makefile.am @@ -18,6 +18,9 @@ AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG) CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests @@ -33,11 +36,11 @@ run_unittests_SOURCES += udp_socket_unittest.cc run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) -run_unittests_LDADD = $(GTEST_LDADD) -run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la +run_unittests_LDADD = $(top_builddir)/src/lib/asiolink/libasiolink.la run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la +run_unittests_LDADD += $(GTEST_LDADD) run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc index 948e7089b3..462a2fbac1 100644 --- a/src/lib/asiolink/tests/io_endpoint_unittest.cc +++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc @@ -13,18 +13,22 @@ // PERFORMANCE OF THIS SOFTWARE. #include + +#include +#include + #include +#include + +#include +#include + #include #include #include #include -#include - -#include -#include - using namespace isc::asiolink; namespace { @@ -240,4 +244,51 @@ TEST(IOEndpointTest, getSockAddr) { sockAddrMatch(ep->getSockAddr(), "2001:db8::5300", "35"); } +// A faked IOEndpoint for an uncommon address family. It wouldn't be possible +// to create via the normal factory, so we define a special derived class +// for it. +class TestIOEndpoint : public IOEndpoint { + virtual IOAddress getAddress() const { + return IOAddress("2001:db8::bad:add"); + } + virtual uint16_t getPort() const { return (42); } + virtual short getProtocol() const { return (IPPROTO_UDP); } + virtual short getFamily() const { return (AF_UNSPEC); } + virtual const struct sockaddr& getSockAddr() const { + static struct sockaddr sa_placeholder; + return (sa_placeholder); + } +}; + +void +checkEndpointText(const std::string& expected, const IOEndpoint& ep) { + std::ostringstream oss; + oss << ep; + EXPECT_EQ(expected, oss.str()); +} + +// test operator<<. We simply confirm it appends the result of toText(). +TEST(IOEndpointTest, LeftShiftOperator) { + // UDP/IPv4 + ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP, + IOAddress("192.0.2.1"), 53210)); + checkEndpointText("192.0.2.1:53210", *ep); + + // UDP/IPv6 + ep.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::53"), 53)); + checkEndpointText("[2001:db8::53]:53", *ep); + + // Same for TCP: shouldn't be different + ep.reset(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 53210)); + checkEndpointText("192.0.2.1:53210", *ep); + ep.reset(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::53"), 53)); + checkEndpointText("[2001:db8::53]:53", *ep); + + // Uncommon address family. The actual behavior doesn't matter much + // in practice, but we check such input doesn't make it crash. + // We explicitly instantiate the test EP because otherwise some compilers + // would be confused and complain. + TestIOEndpoint test_ep; + checkEndpointText("2001:db8::bad:add:42", test_ep); +} } diff --git a/src/lib/bench/tests/Makefile.am b/src/lib/bench/tests/Makefile.am index 3f8a67863b..80695596fd 100644 --- a/src/lib/bench/tests/Makefile.am +++ b/src/lib/bench/tests/Makefile.am @@ -5,6 +5,9 @@ AM_CXXFLAGS = $(B10_CXXFLAGS) CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes index 19102aec4a..51f2264cde 100644 --- a/src/lib/cache/cache_messages.mes +++ b/src/lib/cache/cache_messages.mes @@ -66,14 +66,14 @@ is created. Debug message. The resolver cache is looking up the deepest known nameserver, so the resolution doesn't have to start from the root. +% CACHE_RESOLVER_INIT initializing resolver cache for class %1 +Debug message. The resolver cache is being created for this given class. + % CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1 Debug message, the resolver cache is being created for this given class. The difference from CACHE_RESOLVER_INIT is only in different format of passed information, otherwise it does the same. -% CACHE_RESOLVER_INIT initializing resolver cache for class %1 -Debug message. The resolver cache is being created for this given class. - % CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data Debug message. The resolver cache found a complete message for the user query in the zone data. diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am index b638f55cbc..fe33d3c3bf 100644 --- a/src/lib/cache/tests/Makefile.am +++ b/src/lib/cache/tests/Makefile.am @@ -28,6 +28,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/cache/tests/message_entry_unittest.cc b/src/lib/cache/tests/message_entry_unittest.cc index d9709ed298..86cc89fefa 100644 --- a/src/lib/cache/tests/message_entry_unittest.cc +++ b/src/lib/cache/tests/message_entry_unittest.cc @@ -1,3 +1,5 @@ +// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC") +// // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. diff --git a/src/lib/cc/tests/Makefile.am b/src/lib/cc/tests/Makefile.am index 08b7f33a5a..b891628e2b 100644 --- a/src/lib/cc/tests/Makefile.am +++ b/src/lib/cc/tests/Makefile.am @@ -16,6 +16,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests @@ -26,7 +29,7 @@ run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) # We need to put our libs first, in case gtest (or any dependency, really) # is installed in the same location as a different version of bind10 -# Otherwise the linker may not use the source tree libs +# Otherwise the linker may not use the source tree libs run_unittests_LDADD = $(top_builddir)/src/lib/cc/libcc.la run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am index 500ff1223c..518d4979c1 100644 --- a/src/lib/config/Makefile.am +++ b/src/lib/config/Makefile.am @@ -17,6 +17,11 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc libcfgclient_la_SOURCES += ccsession.cc ccsession.h libcfgclient_la_SOURCES += config_log.h config_log.cc +libcfgclient_la_LIBADD = $(top_builddir)/src/lib/cc/libcc.la +libcfgclient_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la + +libcfgclient_la_LDFLAGS = -no-undefined -version-info 1:0:1 + nodist_libcfgclient_la_SOURCES = config_messages.h config_messages.cc # The message file should be in the distribution. diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc index 63fa4cdcf9..d4c6653b6c 100644 --- a/src/lib/config/ccsession.cc +++ b/src/lib/config/ccsession.cc @@ -601,6 +601,11 @@ ModuleCCSession::checkCommand() { ConstElementPtr cmd, routing, data; if (session_.group_recvmsg(routing, data, true)) { + // In case the message is wanted asynchronously, it gets used. + if (checkAsyncRecv(routing, data)) { + return (0); + } + /* ignore result messages (in case we're out of sync, to prevent * pingpongs */ if (data->getType() != Element::map || data->contains("result")) { @@ -764,5 +769,95 @@ ModuleCCSession::sendStopping() { session_.group_sendmsg(cmd, "ConfigManager"); } +class ModuleCCSession::AsyncRecvRequest { +public: // Everything is public here, as the definition is hidden anyway + AsyncRecvRequest(const AsyncRecvCallback& cb, const string& rcp, int sq, + bool reply) : + callback(cb), + recipient(rcp), + seq(sq), + is_reply(reply) + {} + const AsyncRecvCallback callback; + const string recipient; + const int seq; + const bool is_reply; +}; + +ModuleCCSession::AsyncRecvRequestID +ModuleCCSession::groupRecvMsgAsync(const AsyncRecvCallback& callback, + bool is_reply, int seq, + const string& recipient) { + // This just stores the request, the handling is done in checkCommand() + + // push_back would be simpler, but it does not return the iterator we need + return (async_recv_requests_.insert(async_recv_requests_.end(), + AsyncRecvRequest(callback, recipient, + seq, is_reply))); +} + +bool +ModuleCCSession::checkAsyncRecv(const ConstElementPtr& envelope, + const ConstElementPtr& msg) +{ + for (AsyncRecvRequestID request(async_recv_requests_.begin()); + request != async_recv_requests_.end(); ++request) { + // Just go through all the requests and look for a matching one + if (requestMatch(*request, envelope)) { + // We want the request to be still alive at the time we + // call the callback. But we need to remove it on an exception + // too, so we use the class. If just C++ had the finally keyword. + class RequestDeleter { + public: + RequestDeleter(AsyncRecvRequests& requests, + AsyncRecvRequestID& request) : + requests_(requests), + request_(request) + { } + ~RequestDeleter() { + requests_.erase(request_); + } + private: + AsyncRecvRequests& requests_; + AsyncRecvRequestID& request_; + }; + RequestDeleter deleter(async_recv_requests_, request); + // Call the callback + request->callback(envelope, msg, request); + return (true); + } + } + return (false); +} + +bool +ModuleCCSession::requestMatch(const AsyncRecvRequest& request, + const ConstElementPtr& envelope) const +{ + if (request.is_reply != envelope->contains("reply")) { + // Wrong type of message + return (false); + } + if (request.is_reply && + (request.seq == -1 || + request.seq == envelope->get("reply")->intValue())) { + // This is the correct reply + return (true); + } + if (!request.is_reply && + (request.recipient.empty() || + request.recipient == envelope->get("group")->stringValue())) { + // This is the correct command + return (true); + } + // If nothing from the above, we don't want it + return (false); +} + +void +ModuleCCSession::cancelAsyncRecv(const AsyncRecvRequestID& id) { + async_recv_requests_.erase(id); +} + } } diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h index 059968c71a..e96a33d44b 100644 --- a/src/lib/config/ccsession.h +++ b/src/lib/config/ccsession.h @@ -15,13 +15,16 @@ #ifndef __CCSESSION_H #define __CCSESSION_H 1 -#include - #include #include + #include #include +#include +#include +#include + namespace isc { namespace config { @@ -358,15 +361,140 @@ public: return (session_.group_recvmsg(envelope, msg, nonblock, seq)); }; + /// \brief Forward declaration of internal data structure. + /// + /// This holds information about one asynchronous request to receive + /// a message. It is declared as public to allow declaring other derived + /// types, but without showing the internal representation. + class AsyncRecvRequest; + + /// \brief List of all requests for asynchronous reads. + typedef std::list AsyncRecvRequests; + + /// \brief Identifier of single request for asynchronous read. + typedef AsyncRecvRequests::iterator AsyncRecvRequestID; + + /// \brief Callback which is called when an asynchronous receive finishes. + /// + /// This is the callback used by groupRecvMsgAsync() function. It is called + /// when a matching message arrives. It receives following parameters when + /// called: + /// - The envelope of the message + /// - The message itself + /// - The ID of the request, as returned by corresponding groupRecvMsgAsync + /// call. + /// + /// It is possible to throw exceptions from the callback, but they will not + /// be caught and they will get propagated out through the checkCommand() + /// call. This, if not handled on higher level, will likely terminate the + /// application. However, the ModuleCCSession internals will be in + /// well-defined state after the call (both the callback and the message + /// will be removed from the queues as already called). + typedef boost::function3 + AsyncRecvCallback; + + /// \brief Receive a message from the CC session asynchronously. + /// + /// This registers a callback which is called when a matching message + /// is received. This message returns immediately. + /// + /// Once a matching message arrives, the callback is called with the + /// envelope of the message, the message itself and the result of this + /// function call (which might be useful for identifying which of many + /// events the recipient is waiting for this is). This makes the callback + /// used and is not called again even if a message that would match + /// arrives later (this is a single-shot callback). + /// + /// The callback is never called from within this function. Even if there + /// are queued messages, the callback would be called once checkCommand() + /// is invoked (possibly from start() or the constructor). + /// + /// The matching is as follows. If is_reply is true, only replies are + /// considered. In that case, if seq is -1, any reply is accepted. If + /// it is something else than -1, only the reply with matching seq is + /// taken. This may be used to receive replies to commands + /// asynchronously. + /// + /// In case the is_reply is false, the function looks for command messages. + /// The seq parameter is ignored, but the recipient one is considered. If + /// it is an empty string, any command is taken. If it is non-empty, only + /// commands addressed to the recipient channel (eg. group - instance is + /// ignored for now) are taken. This can be used to receive foreign commands + /// or notifications. In such case, it might be desirable to call the + /// groupRecvMsgAsync again from within the callback, to receive any future + /// commands or events of the same type. + /// + /// The interaction with other receiving functions is slightly complicated. + /// The groupRecvMsg call takes precedence. If the message matches its + /// parameters, it steals the message and no callback matching it as well + /// is called. Then, all the queued asynchronous receives are considered, + /// with the oldest active ones taking precedence (they work as FIFO). + /// If none of them matches, generic command and config handling takes + /// place. If it is not handled by that, the message is dropped. However, + /// it is better if there's just one place that wants to receive each given + /// message. + /// + /// \exception std::bad_alloc if there isn't enough memory to store the + /// callback. + /// \param callback is the function to be called when a matching message + /// arrives. + /// \param is_reply specifies if the desired message should be a reply or + /// a command. + /// \param seq specifies the reply sequence number in case a reply is + /// desired. The default -1 means any reply is OK. + /// \param recipient is the CC channel to which the command should be + /// addressed to match (in case is_reply is false). Empty means any + /// command is good one. + /// \return An identifier of the request. This will be passed to the + /// callback or can be used to cancel the request by cancelAsyncRecv. + /// \todo Decide what to do with instance and what was it meant for anyway. + AsyncRecvRequestID groupRecvMsgAsync(const AsyncRecvCallback& callback, + bool is_reply, int seq = -1, + const std::string& recipient = + std::string()); + + /// \brief Removes yet unused request for asynchronous receive. + /// + /// This function cancels a request previously queued by + /// groupRecvMsgAsync(). You may use it only before the callback was + /// already triggered. If you call it with an ID of callback that + /// already happened or was already canceled, the behaviour is undefined + /// (but something like a crash is very likely, as the function removes + /// an item from a list and this would be removing it from a list that + /// does not contain the item). + /// + /// It is important to cancel requests that are no longer going to happen + /// for some reason, as the request would occupy memory forever. + /// + /// \param id The id of request as returned by groupRecvMsgAsync. + void cancelAsyncRecv(const AsyncRecvRequestID& id); + private: ModuleSpec readModuleSpecification(const std::string& filename); void startCheck(); void sendStopping(); + /// \brief Check if the message is wanted by asynchronous read + /// + /// It checks if any of the previously queued requests match + /// the message. If so, the callback is dispatched and removed. + /// + /// \param envelope The envelope of the message. + /// \param msg The actual message data. + /// \return True if the message was used for a callback, false + /// otherwise. + bool checkAsyncRecv(const data::ConstElementPtr& envelope, + const data::ConstElementPtr& msg); + /// \brief Checks if a message with this envelope matches the request + bool requestMatch(const AsyncRecvRequest& request, + const data::ConstElementPtr& envelope) const; bool started_; std::string module_name_; isc::cc::AbstractSession& session_; ModuleSpec module_specification_; + AsyncRecvRequests async_recv_requests_; isc::data::ConstElementPtr handleConfigUpdate( isc::data::ConstElementPtr new_config); diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc index a9310708f0..98a991da69 100644 --- a/src/lib/config/module_spec.cc +++ b/src/lib/config/module_spec.cc @@ -136,7 +136,7 @@ check_statistics_item_list(ConstElementPtr spec) { && item->contains("item_default")) { if(!check_format(item->get("item_default"), item->get("item_format"))) { - isc_throw(ModuleSpecError, + isc_throw(ModuleSpecError, "item_default not valid type of item_format"); } } diff --git a/src/lib/config/tests/Makefile.am b/src/lib/config/tests/Makefile.am index 2f1fc6fc1b..1d9bad833b 100644 --- a/src/lib/config/tests/Makefile.am +++ b/src/lib/config/tests/Makefile.am @@ -14,6 +14,9 @@ CLEANFILES = *.gcno *.gcda noinst_LTLIBRARIES = libfake_session.la libfake_session_la_SOURCES = fake_session.h fake_session.cc +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc index abaff8e95d..3fca741bbb 100644 --- a/src/lib/config/tests/ccsession_unittests.cc +++ b/src/lib/config/tests/ccsession_unittests.cc @@ -27,11 +27,13 @@ #include #include +#include using namespace isc::data; using namespace isc::config; using namespace isc::cc; using namespace std; +using namespace boost; namespace { std::string @@ -497,10 +499,10 @@ TEST_F(CCSessionTest, remoteConfig) { const size_t qsize(session.getMsgQueue()->size()); EXPECT_TRUE(session.getMsgQueue()->get(qsize - 2)->equals(*el( "[ \"ConfigManager\", \"*\", { \"command\": [" - "\"get_module_spec\", { \"module_name\": \"Spec2\" } ] } ]"))); + "\"get_module_spec\", { \"module_name\": \"Spec2\" } ] }, -1 ]"))); EXPECT_TRUE(session.getMsgQueue()->get(qsize - 1)->equals(*el( "[ \"ConfigManager\", \"*\", { \"command\": [ \"get_config\"," - "{ \"module_name\": \"Spec2\" } ] } ]"))); + "{ \"module_name\": \"Spec2\" } ] }, -1 ]"))); EXPECT_EQ("Spec2", module_name); // Since we returned an empty local config above, the default value // for "item1", which is 1, should be used. @@ -709,13 +711,286 @@ TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) { FakeSession::DoubleRead); } -namespace { +/// \brief Test fixture for asynchronous receiving of messages. +/// +/// This is an extension to the CCSessionTest. It would be possible to add +/// the functionality to the CCSessionTest, but it is going to be used +/// only by few tests and is non-trivial, so it is placed to a separate +/// sub-class. +class AsyncReceiveCCSessionTest : public CCSessionTest { +protected: + AsyncReceiveCCSessionTest() : + mccs_(ccspecfile("spec29.spec"), session, NULL, NULL, false, false), + msg_(el("{\"result\": [0]}")), + next_flag_(0) + { + // This is just to make sure the messages get through the fake + // session. + session.subscribe("test group"); + session.subscribe("other group"); + session.subscribe(""); + // Get rid of all unrelated stray messages + while (session.getMsgQueue()->size() > 0) { + session.getMsgQueue()->remove(0); + } + } + /// \brief Convenience function to queue a request to get a command + /// message. + ModuleCCSession::AsyncRecvRequestID + registerCommand(const string& recipient) + { + return (mccs_.groupRecvMsgAsync( + bind(&AsyncReceiveCCSessionTest::callback, this, next_flag_ ++, _1, + _2, _3), false, -1, recipient)); + } + /// \brief Convenience function to queue a request to get a reply + /// message. + ModuleCCSession::AsyncRecvRequestID + registerReply(int seq) + { + return (mccs_.groupRecvMsgAsync( + bind(&AsyncReceiveCCSessionTest::callback, this, next_flag_ ++, _1, + _2, _3), true, seq)); + } + /// \brief Check the next called callback was with this flag + void called(int flag) { + ASSERT_FALSE(called_.empty()); + EXPECT_EQ(flag, *called_.begin()); + called_.pop_front(); + } + /// \brief Checks that no more callbacks were called. + void nothingCalled() { + EXPECT_TRUE(called_.empty()); + } + /// \brief The tested session. + ModuleCCSession mccs_; + /// \brief The value of message on the last called callback. + ConstElementPtr last_msg_; + /// \brief A message that can be used + ConstElementPtr msg_; + // Shared part of the simpleCommand and similar tests. + void commandTest(const string& group) { + // Push the message inside + session.addMessage(msg_, "test group", ""); + EXPECT_TRUE(mccs_.hasQueuedMsgs()); + // Register the callback + registerCommand(group); + // But the callback should not be called yet + // (even if the message is there). + nothingCalled(); + // But when we call the checkCommand(), it should be called. + mccs_.checkCommand(); + called(0); + EXPECT_EQ(msg_, last_msg_); + // But only once + nothingCalled(); + // And the message should be eaten + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + // The callback should have been eaten as well, inserting another + // message will not invoke it again + session.addMessage(msg_, "test group", ""); + mccs_.checkCommand(); + nothingCalled(); + } + /// \brief Shared part of the simpleResponse and wildcardResponse tests. + void responseTest(int seq) { + // Push the message inside + session.addMessage(msg_, "", "", 1); + EXPECT_TRUE(mccs_.hasQueuedMsgs()); + // Register the callback + registerReply(seq); + // But the callback should not be called yet + // (even if the message is there). + nothingCalled(); + // But when we call the checkCommand(), it should be called. + mccs_.checkCommand(); + called(0); + EXPECT_EQ(msg_, last_msg_); + // But only once + nothingCalled(); + // And the message should be eaten + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + // The callback should have been eaten as well, inserting another + // message will not invoke it again + session.addMessage(msg_, "test group", ""); + mccs_.checkCommand(); + nothingCalled(); + } + /// \brief Shared part of the noMatch* tests + void noMatchTest(int seq, int wanted_seq, bool is_reply) { + // Push the message inside + session.addMessage(msg_, "other group", "", seq); + EXPECT_TRUE(mccs_.hasQueuedMsgs()); + // Register the callback + if (is_reply) { + registerReply(wanted_seq); + } else { + registerCommand("test group"); + } + // But the callback should not be called yet + // (even if the message is there). + nothingCalled(); + // And even not now, because it does not match. + mccs_.checkCommand(); + nothingCalled(); + // And the message should be eaten by the checkCommand + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + } +private: + /// \brief The next flag to be handed out + int next_flag_; + /// \brief Flags of callbacks already called (as FIFO) + list called_; + /// \brief This is the callback registered to the tested groupRecvMsgAsync + /// function. + void callback(int store_flag, const ConstElementPtr&, + const ConstElementPtr& msg, + const ModuleCCSession::AsyncRecvRequestID&) + { + called_.push_back(store_flag); + last_msg_ = msg; + } +}; + +// Test we can receive a command, without anything fancy yet +TEST_F(AsyncReceiveCCSessionTest, simpleCommand) { + commandTest("test group"); +} + +// Test we can receive a "wildcard" command - without specifying the +// group to subscribe to. Very similar to simpleCommand test. +TEST_F(AsyncReceiveCCSessionTest, wildcardCommand) { + commandTest(""); +} + +// Very similar to simpleCommand, but with a response message +TEST_F(AsyncReceiveCCSessionTest, simpleResponse) { + responseTest(1); +} + +// Matching a response message with wildcard +TEST_F(AsyncReceiveCCSessionTest, wildcardResponse) { + responseTest(-1); +} + +// Check that a wrong command message is not matched +TEST_F(AsyncReceiveCCSessionTest, noMatchCommand) { + noMatchTest(-1, -1, false); +} + +// Check that a wrong response message is not matched +TEST_F(AsyncReceiveCCSessionTest, noMatchResponse) { + noMatchTest(2, 3, true); +} + +// Check that a command will not match on a reply check and vice versa +TEST_F(AsyncReceiveCCSessionTest, noMatchResponseAgainstCommand) { + // Send a command and check it is not matched as a response + noMatchTest(-1, -1, true); +} + +TEST_F(AsyncReceiveCCSessionTest, noMatchCommandAgainstResponse) { + noMatchTest(2, -1, false); +} + +// We check for command several times before the message actually arrives. +TEST_F(AsyncReceiveCCSessionTest, delayedCallback) { + // First, register the callback + registerReply(1); + // And see it is not called, because the message is not there yet + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + for (size_t i(0); i < 100; ++ i) { + mccs_.checkCommand(); + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + nothingCalled(); + } + // Now the message finally arrives + session.addMessage(msg_, "", "", 1); + EXPECT_TRUE(mccs_.hasQueuedMsgs()); + // And now, the callback is happily triggered. + mccs_.checkCommand(); + called(0); + EXPECT_EQ(msg_, last_msg_); + // But only once + nothingCalled(); +} + +// See that if we put multiple messages inside, and request some callbacks, +// the callbacks are called in the order of messages, not in the order they +// were registered. +TEST_F(AsyncReceiveCCSessionTest, outOfOrder) { + // First, put some messages there + session.addMessage(msg_, "", "", 1); + session.addMessage(msg_, "test group", ""); + session.addMessage(msg_, "other group", ""); + session.addMessage(msg_, "", "", 2); + session.addMessage(msg_, "", "", 3); + session.addMessage(msg_, "", "", 4); + // Now register some callbacks + registerReply(13); // Will not be called + registerCommand("other group"); // Matches 3rd message + registerReply(2); // Matches 4th message + registerCommand(""); // Matches the 2nd message + registerCommand("test group"); // Will not be called + registerReply(-1); // Matches the 1st message + registerReply(-1); // Matches the 5th message + // Process all messages there + while (mccs_.hasQueuedMsgs()) { + mccs_.checkCommand(); + } + // These are the numbers of callbacks in the order of messages + called(5); + called(3); + called(1); + called(2); + called(6); + // The last message doesn't trigger anything, so nothing more is called + nothingCalled(); +} + +// We first add, then remove the callback again and check that nothing is +// matched. +TEST_F(AsyncReceiveCCSessionTest, cancel) { + // Add the callback + ModuleCCSession::AsyncRecvRequestID request(registerReply(1)); + // Add corresponding message + session.addMessage(msg_, "", "", 1); + EXPECT_TRUE(mccs_.hasQueuedMsgs()); + // And now, remove the callback again + mccs_.cancelAsyncRecv(request); + // And see that Nothing Happens(TM) + mccs_.checkCommand(); + EXPECT_FALSE(mccs_.hasQueuedMsgs()); + nothingCalled(); +} + +// We add multiple requests and cancel only one of them to see the rest +// is unaffected. +TEST_F(AsyncReceiveCCSessionTest, cancelSome) { + // Register few callbacks + registerReply(1); + ModuleCCSession::AsyncRecvRequestID request(registerCommand("")); + registerCommand("test group"); + // Put some messages there + session.addMessage(msg_, "test group", ""); + session.addMessage(msg_, "", "", 1); + // Cancel the second callback. Therefore the first message will be matched + // by the third callback, not by the second. + mccs_.cancelAsyncRecv(request); + // Now, process the messages + mccs_.checkCommand(); + mccs_.checkCommand(); + // And see how they matched + called(2); + called(0); + nothingCalled(); +} + void doRelatedLoggersTest(const char* input, const char* expected) { ConstElementPtr all_conf = isc::data::Element::fromJSON(input); ConstElementPtr expected_conf = isc::data::Element::fromJSON(expected); EXPECT_EQ(*expected_conf, *isc::config::getRelatedLoggers(all_conf)); } -} // end anonymous namespace TEST(LogConfigTest, relatedLoggersTest) { // make sure logger configs for 'other' programs are ignored, diff --git a/src/lib/config/tests/fake_session.cc b/src/lib/config/tests/fake_session.cc index 177e62942c..157d4d658f 100644 --- a/src/lib/config/tests/fake_session.cc +++ b/src/lib/config/tests/fake_session.cc @@ -139,6 +139,9 @@ FakeSession::recvmsg(ConstElementPtr& env, ConstElementPtr& msg, bool nonblock, ElementPtr new_env = Element::createMap(); new_env->set("group", c_m->get(0)); new_env->set("to", c_m->get(1)); + if (c_m->get(3)->intValue() != -1) { + new_env->set("reply", c_m->get(3)); + } env = new_env; msg = c_m->get(2); to_remove = c_m; @@ -207,7 +210,7 @@ FakeSession::reply(ConstElementPtr envelope, ConstElementPtr newmsg) { bool FakeSession::hasQueuedMsgs() const { - return (false); + return (msg_queue_ && msg_queue_->size() > 0); } ConstElementPtr @@ -228,12 +231,13 @@ FakeSession::getFirstMessage(std::string& group, std::string& to) const { void FakeSession::addMessage(ConstElementPtr msg, const std::string& group, - const std::string& to) + const std::string& to, int seq) { ElementPtr m_el = Element::createList(); m_el->add(Element::create(group)); m_el->add(Element::create(to)); m_el->add(msg); + m_el->add(Element::create(seq)); if (!msg_queue_) { msg_queue_ = Element::createList(); } diff --git a/src/lib/config/tests/fake_session.h b/src/lib/config/tests/fake_session.h index 79ff174110..c91b5199e3 100644 --- a/src/lib/config/tests/fake_session.h +++ b/src/lib/config/tests/fake_session.h @@ -74,7 +74,7 @@ public: isc::data::ConstElementPtr getFirstMessage(std::string& group, std::string& to) const; void addMessage(isc::data::ConstElementPtr, const std::string& group, - const std::string& to); + const std::string& to, int seq = -1); bool haveSubscription(const std::string& group, const std::string& instance); bool haveSubscription(const isc::data::ConstElementPtr group, diff --git a/src/lib/cryptolink/crypto_hmac.cc b/src/lib/cryptolink/crypto_hmac.cc index 277b0365cd..c1bbfa865b 100644 --- a/src/lib/cryptolink/crypto_hmac.cc +++ b/src/lib/cryptolink/crypto_hmac.cc @@ -23,6 +23,8 @@ #include #include +#include + namespace { const char* getBotanHashAlgorithmName(isc::cryptolink::HashAlgorithm algorithm) { @@ -155,7 +157,7 @@ public: if (output_size > len) { output_size = len; } - memcpy(result, b_result.begin(), output_size); + std::memcpy(result, b_result.begin(), output_size); } catch (const Botan::Exception& exc) { isc_throw(isc::cryptolink::LibraryError, exc.what()); } diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am index fbe1bad350..be2f4d6f05 100644 --- a/src/lib/cryptolink/tests/Makefile.am +++ b/src/lib/cryptolink/tests/Makefile.am @@ -10,6 +10,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/datasrc/.gitignore b/src/lib/datasrc/.gitignore index 05c761ec89..206ddca6d9 100644 --- a/src/lib/datasrc/.gitignore +++ b/src/lib/datasrc/.gitignore @@ -2,3 +2,4 @@ /datasrc_messages.h /datasrc_config.h /datasrc_config.h.pre +/static.zone diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am index 2cdb8ea378..9a4d733f91 100644 --- a/src/lib/datasrc/Makefile.am +++ b/src/lib/datasrc/Makefile.am @@ -12,8 +12,13 @@ pkglibdir = $(libexecdir)/@PACKAGE@/backends datasrc_config.h: datasrc_config.h.pre $(SED) -e "s|@@PKGLIBDIR@@|$(pkglibdir)|" datasrc_config.h.pre >$@ +static.zone: static.zone.pre + $(SED) -e "s|@@VERSION_STRING@@|$(PACKAGE_STRING)|" $(srcdir)/static.zone.pre >$@ + $(SED) -e 's/\(.*\)/AUTHORS.BIND. 0 CH TXT "\1"/' $(top_srcdir)/AUTHORS >>$@ + CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc CLEANFILES += datasrc_config.h +CLEANFILES += static.zone lib_LTLIBRARIES = libdatasrc.la libdatasrc_la_SOURCES = data_source.h data_source.cc @@ -30,10 +35,11 @@ libdatasrc_la_SOURCES += logger.h logger.cc libdatasrc_la_SOURCES += client.h iterator.h libdatasrc_la_SOURCES += database.h database.cc libdatasrc_la_SOURCES += factory.h factory.cc +libdatasrc_la_SOURCES += client_list.h client_list.cc nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc libdatasrc_la_LDFLAGS = -no-undefined -version-info 1:0:1 -pkglib_LTLIBRARIES = sqlite3_ds.la memory_ds.la +pkglib_LTLIBRARIES = sqlite3_ds.la memory_ds.la static_ds.la sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc sqlite3_ds_la_SOURCES += sqlite3_accessor_link.cc @@ -49,6 +55,12 @@ memory_ds_la_LDFLAGS = -module -avoid-version memory_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la memory_ds_la_LIBADD += libdatasrc.la +static_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc +static_ds_la_SOURCES += static_datasrc_link.cc +static_ds_la_LDFLAGS = -module -avoid-version +static_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la +static_ds_la_LIBADD += libdatasrc.la + libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la @@ -59,4 +71,7 @@ BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes -EXTRA_DIST = datasrc_messages.mes +EXTRA_DIST = datasrc_messages.mes static.zone.pre + +zonedir = $(pkgdatadir) +zone_DATA = static.zone diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h index 24c88508ce..dab081f38d 100644 --- a/src/lib/datasrc/client.h +++ b/src/lib/datasrc/client.h @@ -362,6 +362,22 @@ public: virtual std::pair getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial, uint32_t end_serial) const = 0; + + /// Return the number of zones currently known to this datasource + /// + /// This is an optional convenience method, currently only implemented + /// by the InMemory datasource. By default, it throws NotImplemented + /// + /// \exception NotImplemented Thrown if this method is not supported + /// by the datasource + /// + /// \note This is a tentative API, and this method may likely to be + /// removed in the near future. + /// \return The number of zones known to this datasource + virtual unsigned int getZoneCount() const { + isc_throw(isc::NotImplemented, + "Data source doesn't support getZoneCount"); + } }; } } diff --git a/src/lib/datasrc/client_list.cc b/src/lib/datasrc/client_list.cc new file mode 100644 index 0000000000..549b2168fe --- /dev/null +++ b/src/lib/datasrc/client_list.cc @@ -0,0 +1,162 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "client_list.h" +#include "client.h" +#include "factory.h" + +#include +#include + +using namespace isc::data; +using namespace std; + +namespace isc { +namespace datasrc { + +void +ConfigurableClientList::configure(const Element& config, bool) { + // TODO: Implement the cache + // TODO: Implement recycling from the old configuration. + size_t i(0); // Outside of the try to be able to access it in the catch + try { + vector new_data_sources; + for (; i < config.size(); ++i) { + // Extract the parameters + const ConstElementPtr dconf(config.get(i)); + const ConstElementPtr typeElem(dconf->get("type")); + if (typeElem == ConstElementPtr()) { + isc_throw(ConfigurationError, "Missing the type option in " + "data source no " << i); + } + const string type(typeElem->stringValue()); + ConstElementPtr paramConf(dconf->get("params")); + if (paramConf == ConstElementPtr()) { + paramConf.reset(new NullElement()); + } + // TODO: Special-case the master files type. + // Ask the factory to create the data source for us + const DataSourcePair ds(this->getDataSourceClient(type, + paramConf)); + // And put it into the vector + new_data_sources.push_back(DataSourceInfo(ds.first, ds.second)); + } + // If everything is OK up until now, we have the new configuration + // ready. So just put it there and let the old one die when we exit + // the scope. + data_sources_.swap(new_data_sources); + } catch (const TypeError& te) { + isc_throw(ConfigurationError, "Malformed configuration at data source " + "no. " << i << ": " << te.what()); + } +} + +ClientList::FindResult +ConfigurableClientList::find(const dns::Name& name, bool want_exact_match, + bool) const +{ + // Nothing found yet. + // + // We have this class as a temporary storage, as the FindResult can't be + // assigned. + struct MutableResult { + MutableResult() : + datasrc_client(NULL), + matched_labels(0), + matched(false) + {} + DataSourceClient* datasrc_client; + ZoneFinderPtr finder; + uint8_t matched_labels; + bool matched; + operator FindResult() const { + // Conversion to the right result. If we return this, there was + // a partial match at best. + return (FindResult(datasrc_client, finder, false)); + } + } candidate; + + BOOST_FOREACH(const DataSourceInfo& info, data_sources_) { + // TODO: Once we have support for the caches, consider them too here + // somehow. This would probably get replaced by a function, that + // checks if there's a cache available, if it is, checks the loaded + // zones and zones expected to be in the real data source. If it is + // the cached one, provide the cached one. If it is in the external + // data source, use the datasource and don't provide the finder yet. + const DataSourceClient::FindResult result( + info.data_src_client_->findZone(name)); + switch (result.code) { + case result::SUCCESS: + // If we found an exact match, we have no hope to getting + // a better one. Stop right here. + + // TODO: In case we have only the datasource and not the finder + // and the need_updater parameter is true, get the zone there. + return (FindResult(info.data_src_client_, result.zone_finder, + true)); + case result::PARTIALMATCH: + if (!want_exact_match) { + // In case we have a partial match, check if it is better + // than what we have. If so, replace it. + // + // We don't need the labels at the first partial match, + // we have nothing to compare with. So we don't get it + // (as a performance) and hope we will not need it at all. + const uint8_t labels(candidate.matched ? + result.zone_finder->getOrigin().getLabelCount() : 0); + if (candidate.matched && candidate.matched_labels == 0) { + // But if the hope turns out to be false, we need to + // compute it for the first match anyway. + candidate.matched_labels = candidate.finder-> + getOrigin().getLabelCount(); + } + if (labels > candidate.matched_labels || + !candidate.matched) { + // This one is strictly better. Replace it. + candidate.datasrc_client = info.data_src_client_; + candidate.finder = result.zone_finder; + candidate.matched_labels = labels; + candidate.matched = true; + } + } + break; + default: + // Nothing found, nothing to do. + break; + } + } + + // TODO: In case we have only the datasource and not the finder + // and the need_updater parameter is true, get the zone there. + + // Return the partial match we have. In case we didn't want a partial + // match, this surely contains the original empty result. + return (candidate); +} + +// NOTE: This function is not tested, it would be complicated. However, the +// purpose of the function is to provide a very thin wrapper to be able to +// replace the call to DataSourceClientContainer constructor in tests. +ConfigurableClientList::DataSourcePair +ConfigurableClientList::getDataSourceClient(const string& type, + const ConstElementPtr& + configuration) +{ + DataSourceClientContainerPtr + container(new DataSourceClientContainer(type, configuration)); + return (DataSourcePair(&container->getInstance(), container)); +} + +} +} diff --git a/src/lib/datasrc/client_list.h b/src/lib/datasrc/client_list.h new file mode 100644 index 0000000000..599dca8217 --- /dev/null +++ b/src/lib/datasrc/client_list.h @@ -0,0 +1,289 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef DATASRC_CONTAINER_H +#define DATASRC_CONTAINER_H + +#include +#include +#include + +#include +#include +#include + +namespace isc { +namespace datasrc { + +class ZoneFinder; +typedef boost::shared_ptr ZoneFinderPtr; +class DataSourceClient; +typedef boost::shared_ptr DataSourceClientPtr; +class DataSourceClientContainer; +typedef boost::shared_ptr + DataSourceClientContainerPtr; + +/// \brief The list of data source clients. +/// +/// The purpose of this class is to hold several data source clients and search +/// through them to find one containing a zone best matching a request. +/// +/// All the data source clients should be for the same class. If you need +/// to handle multiple classes, you need to create multiple separate lists. +/// +/// This is an abstract base class. It is not expected we would use multiple +/// implementation inside the servers (but it is not forbidden either), we +/// have it to allow easy testing. It is possible to create a mock-up class +/// instead of creating a full-blown configuration. The real implementation +/// is the ConfigurableClientList. +class ClientList : public boost::noncopyable { +protected: + /// \brief Constructor. + /// + /// It is protected to prevent accidental creation of the abstract base + /// class. + ClientList() {} +public: + /// \brief Virtual destructor + virtual ~ClientList() {} + /// \brief Structure holding the (compound) result of find. + /// + /// As this is read-only structure, we don't bother to create accessors. + /// Instead, all the member variables are defined as const and can be + /// accessed directly. + struct FindResult { + /// \brief Constructor. + /// + /// It simply fills in the member variables according to the + /// parameters. See the member descriptions for their meaning. + FindResult(DataSourceClient* dsrc_client, const ZoneFinderPtr& finder, + bool exact_match) : + dsrc_client_(dsrc_client), + finder_(finder), + exact_match_(exact_match) + {} + + /// \brief Negative answer constructor. + /// + /// This conscructs a result for negative answer. Both pointers are + /// NULL, and exact_match_ is false. + FindResult() : + dsrc_client_(NULL), + exact_match_(false) + {} + + /// \brief Comparison operator. + /// + /// It is needed for tests and it might be of some use elsewhere + /// too. + bool operator ==(const FindResult& other) const { + return (dsrc_client_ == other.dsrc_client_ && + finder_ == other.finder_ && + exact_match_ == other.exact_match_); + } + + /// \brief The found data source client. + /// + /// The client of the data source containing the best matching zone. + /// If no such data source exists, this is NULL pointer. + /// + /// Note that the pointer is valid only as long the ClientList which + /// returned the pointer is alive and was not reconfigured. The + /// ownership is preserved within the ClientList. + DataSourceClient* const dsrc_client_; + + /// \brief The finder for the requested zone. + /// + /// This is the finder corresponding to the best matching zone. + /// This may be NULL even in case the datasrc_ is something + /// else, depending on the find options. + /// + /// \see find + const ZoneFinderPtr finder_; + + /// \brief If the result is an exact match. + const bool exact_match_; + }; + + /// \brief Search for a zone through the data sources. + /// + /// This searches the contained data source clients for a one that best + /// matches the zone name. + /// + /// There are two expected usage scenarios. One is answering queries. In + /// this case, the zone finder is needed and the best matching superzone + /// of the searched name is needed. Therefore, the call would look like: + /// + /// \code FindResult result(list->find(queried_name)); + /// FindResult result(list->find(queried_name)); + /// if (result.datasrc_) { + /// createTheAnswer(result.finder_); + /// } else { + /// createNotAuthAnswer(); + /// } \endcode + /// + /// The other scenario is manipulating zone data (XfrOut, XfrIn, DDNS, + /// ...). In this case, the finder itself is not so important. However, + /// we need an exact match (if we want to manipulate zone data, we must + /// know exactly, which zone we are about to manipulate). Then the call + /// + /// \code FindResult result(list->find(zone_name, true, false)); + /// FindResult result(list->find(zone_name, true, false)); + /// if (result.datasrc_) { + /// ZoneUpdaterPtr updater(result.datasrc_->getUpdater(zone_name); + /// ... + /// } \endcode + /// + /// \param zone The name of the zone to look for. + /// \param want_exact_match If it is true, it returns only exact matches. + /// If the best possible match is partial, a negative result is + /// returned instead. It is possible the caller could check it and + /// act accordingly if the result would be partial match, but with this + /// set to true, the find might be actually faster under some + /// circumstances. + /// \param want_finder If this is false, the finder_ member of FindResult + /// might be NULL even if the corresponding data source is found. This + /// is because of performance, in some cases the finder is a side + /// result of the searching algorithm (therefore asking for it again + /// would be a waste), but under other circumstances it is not, so + /// providing it when it is not needed would also be wasteful. + /// + /// Other things are never the side effect of searching, therefore the + /// caller can get them explicitly (the updater, journal reader and + /// iterator). + /// \return A FindResult describing the data source and zone with the + /// longest match against the zone parameter. + virtual FindResult find(const dns::Name& zone, + bool want_exact_match = false, + bool want_finder = true) const = 0; +}; + +/// \brief Shared pointer to the list. +typedef boost::shared_ptr ClientListPtr; +/// \brief Shared const pointer to the list. +typedef boost::shared_ptr ConstClientListPtr; + +/// \Concrete implementation of the ClientList, which is constructed based on +/// configuration. +/// +/// This is the implementation which is expected to be used in the servers. +/// However, it is expected most of the code will use it as the ClientList, +/// only the creation is expected to be direct. +/// +/// While it is possible to inherit this class, it is not expected to be +/// inherited except for tests. +class ConfigurableClientList : public ClientList { +public: + /// \brief Exception thrown when there's an error in configuration. + class ConfigurationError : public Exception { + public: + ConfigurationError(const char* file, size_t line, const char* what) : + Exception(file, line, what) + {} + }; + + /// \brief Sets the configuration. + /// + /// This fills the ClientList with data source clients corresponding to the + /// configuration. The data source clients are newly created or recycled + /// from previous configuration. + /// + /// If any error is detected, an exception is thrown and the current + /// configuration is preserved. + /// + /// \param configuration The JSON element describing the configuration to + /// use. + /// \param allow_cache If it is true, the 'cache' option of the + /// configuration is used and some zones are cached into an In-Memory + /// data source according to it. If it is false, it is ignored and + /// no In-Memory data sources are created. + /// \throw DataSourceError if there's a problem creating a data source + /// client. + /// \throw ConfigurationError if the configuration is invalid in some + /// sense. + void configure(const data::Element& configuration, bool allow_cache); + + /// \brief Implementation of the ClientList::find. + virtual FindResult find(const dns::Name& zone, + bool want_exact_match = false, + bool want_finder = true) const; + + /// \brief This holds one data source client and corresponding information. + /// + /// \todo The content yet to be defined. + struct DataSourceInfo { + /// \brief Default constructor. + /// + /// Don't use directly. It is here so the structure can live in + /// a vector. + DataSourceInfo() : + data_src_client_(NULL) + {} + DataSourceInfo(DataSourceClient* data_src_client, + const DataSourceClientContainerPtr& container) : + data_src_client_(data_src_client), + container_(container) + {} + DataSourceClient* data_src_client_; + DataSourceClientContainerPtr container_; + }; + + /// \brief The collection of data sources. + typedef std::vector DataSources; +protected: + /// \brief The data sources held here. + /// + /// All our data sources are stored here. It is protected to let the + /// tests in. You should consider it private if you ever want to + /// derive this class (which is not really recommended anyway). + DataSources data_sources_; + + /// \brief Convenience type alias. + /// + /// \see getDataSource + typedef std::pair + DataSourcePair; + + /// \brief Create a data source client of given type and configuration. + /// + /// This is a thin wrapper around the DataSourceClientContainer + /// constructor. The function is here to make it possible for tests + /// to replace the DataSourceClientContainer with something else. + /// Also, derived classes could want to create the data source clients + /// in a different way, though inheriting this class is not recommended. + /// + /// The parameters are the same as of the constructor. + /// \return Pair containing both the data source client and the container. + /// The container might be NULL in the derived class, it is + /// only stored so the data source client is properly destroyed when + /// not needed. However, in such case, it is the caller's + /// responsibility to ensure the data source client is deleted when + /// needed. + virtual DataSourcePair getDataSourceClient(const std::string& type, + const data::ConstElementPtr& + configuration); +public: + /// \brief Access to the data source clients. + /// + /// It can be used to examine the loaded list of data sources clients + /// directly. It is not known if it is of any use other than testing, but + /// it might be, so it is just made public (there's no real reason to + /// hide it). + const DataSources& getDataSources() const { return (data_sources_); } +}; + +} // namespace datasrc +} // namespace isc + +#endif // DATASRC_CONTAINER_H diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc index 7b271f1327..61dab39db9 100644 --- a/src/lib/datasrc/database.cc +++ b/src/lib/datasrc/database.cc @@ -450,7 +450,8 @@ DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name, const size_t remove_labels = name.getLabelCount() - origin_label_count; // Go through all superdomains from the origin down searching for nodes - // that indicate a delegation (.e. NS or DNAME). + // that indicate a delegation (.e. NS or DNAME). Note that we only check + // pure superdomains; delegation on an exact match will be detected later. for (int i = remove_labels; i > 0; --i) { const Name superdomain(name.split(i)); @@ -810,12 +811,14 @@ DatabaseClient::Finder::findOnNameResult(const Name& name, const FoundIterator cni(found.second.find(RRType::CNAME())); const FoundIterator wti(found.second.find(type)); - if (!is_origin && (options & FIND_GLUE_OK) == 0 && + if (!is_origin && (options & FIND_GLUE_OK) == 0 && type != RRType::DS() && nsi != found.second.end()) { // A NS RRset was found at the domain we were searching for. As it is // not at the origin of the zone, it is a delegation and indicates that // this zone is not authoritative for the data. Just return the - // delegation information. + // delegation information, except: + // - when we are looking for glue records (FIND_GLUE_OK), or + // - when the query type is DS (which cancels the delegation) return (logAndCreateResult(name, wildname, type, DELEGATION, nsi->second, wild ? DATASRC_DATABASE_WILDCARD_NS : @@ -839,8 +842,6 @@ DatabaseClient::Finder::findOnNameResult(const Name& name, flags)); } else if (wti != found.second.end()) { bool any(type == RRType::ANY()); - isc::log::MessageID lid(wild ? DATASRC_DATABASE_WILDCARD_MATCH : - DATASRC_DATABASE_FOUND_RRSET); if (any) { // An ANY query, copy everything to the target instead of returning // directly. @@ -851,15 +852,32 @@ DatabaseClient::Finder::findOnNameResult(const Name& name, target->push_back(it->second); } } - lid = wild ? DATASRC_DATABASE_WILDCARD_ANY : - DATASRC_DATABASE_FOUND_ANY; + if (wild) { + LOG_DEBUG(logger, DBG_TRACE_DETAILED, + DATASRC_DATABASE_WILDCARD_ANY). + arg(accessor_->getDBName()).arg(name); + } else { + LOG_DEBUG(logger, DBG_TRACE_DETAILED, + DATASRC_DATABASE_FOUND_ANY). + arg(accessor_->getDBName()).arg(name); + } + } else { + if (wild) { + LOG_DEBUG(logger, DBG_TRACE_DETAILED, + DATASRC_DATABASE_WILDCARD_MATCH). + arg(accessor_->getDBName()).arg(*wildname). + arg(wti->second); + } else { + LOG_DEBUG(logger, DBG_TRACE_DETAILED, + DATASRC_DATABASE_FOUND_RRSET). + arg(accessor_->getDBName()).arg(wti->second); + } } // Found an RR matching the query, so return it. (Note that this // includes the case where we were explicitly querying for a CNAME and // found it. It also includes the case where we were querying for an // NS RRset and found it at the apex of the zone.) - return (logAndCreateResult(name, wildname, type, SUCCESS, - wti->second, lid, flags)); + return (ResultContext(SUCCESS, wti->second, flags)); } // If we get here, we have found something at the requested name but not @@ -1108,9 +1126,9 @@ DatabaseClient::Finder::findPreviousName(const Name& name) const { name.reverse().toText())); try { return (Name(str)); - } - catch (const isc::dns::NameParserException&) { - isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName"); + } catch (const isc::dns::NameParserException&) { + isc_throw(DataSourceError, "Bad name " + str + + " from findPreviousName"); } } diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h index 8083322edf..8ad1c5b86f 100644 --- a/src/lib/datasrc/database.h +++ b/src/lib/datasrc/database.h @@ -743,8 +743,9 @@ public: /// \brief It returns the previous name in DNSSEC order. /// - /// This is used in DatabaseClient::findPreviousName and does more - /// or less the real work, except for working on strings. + /// Gets the previous name in the DNSSEC order. This can be used + /// to find the correct NSEC records for proving nonexistence + /// of domains. /// /// \param rname The name to ask for previous of, in reversed form. /// We use the reversed form (see isc::dns::Name::reverse), @@ -904,10 +905,6 @@ public: std::vector& target, const FindOptions options = FIND_DEFAULT); - /// \brief Implementation of ZoneFinder::findPreviousName method. - virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) - const; - /// Look for NSEC3 for proving (non)existence of given name. /// /// See documentation in \c Zone. @@ -1108,6 +1105,10 @@ public: bool probed_; }; + /// \brief A simple wrapper for identifying the previous name + /// of the given name in the underlying database. + isc::dns::Name findPreviousName(const isc::dns::Name& name) const; + /// \brief Search result of \c findDelegationPoint(). /// /// This is a tuple combining the result of the search - a status code diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes index a9870d6fa0..7e3d5c2bdd 100644 --- a/src/lib/datasrc/datasrc_messages.mes +++ b/src/lib/datasrc/datasrc_messages.mes @@ -79,9 +79,12 @@ in the answer as a result. Debug information. A search in an database data source for NSEC3 that matches or covers the given name is being started. -% DATASRC_DATABASE_FINDNSEC3_COVER found a covering NSEC3 for %1: %2 +% DATASRC_DATABASE_FINDNSEC3_COVER found a covering NSEC3 for %1 at label count %2: %3 Debug information. An NSEC3 that covers the given name is found and -being returned. The found NSEC3 RRset is also displayed. +being returned. The found NSEC3 RRset is also displayed. When the shown label +count is smaller than that of the given name, the matching NSEC3 is for a +superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH). The +found NSEC3 RRset is also displayed. % DATASRC_DATABASE_FINDNSEC3_MATCH found a matching NSEC3 for %1 at label count %2: %3 Debug information. An NSEC3 that matches (a possibly superdomain of) @@ -157,7 +160,7 @@ A search in the database for RRs for the specified name, type and class has located RRs that match the name and class but not the type. DNSSEC information has been requested and returned. -% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5 +% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2 The data returned by the database backend contained data for the given domain name, and it either matches the type or has a relevant type. The RRset that is returned is printed. @@ -276,7 +279,7 @@ nonterminal (e.g. there's nothing at *.example.com but something like subdomain.*.example.org, do exist: so *.example.org exists in the namespace but has no RRs assopciated with it). This will produce NXRRSET. -% DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6 +% DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %2 with RRset %3 The database doesn't contain directly matching name. When searching for a wildcard match, a wildcard record matching the name and type of the query was found. The data at this point is returned. diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc index 35a79fe61c..82b4df97eb 100644 --- a/src/lib/datasrc/factory.cc +++ b/src/lib/datasrc/factory.cc @@ -23,6 +23,8 @@ #include +#include + #include #include diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h index f3ca397598..2731f58775 100644 --- a/src/lib/datasrc/factory.h +++ b/src/lib/datasrc/factory.h @@ -15,14 +15,14 @@ #ifndef __DATA_SOURCE_FACTORY_H #define __DATA_SOURCE_FACTORY_H 1 -#include - #include #include -#include #include +#include +#include + namespace isc { namespace datasrc { @@ -134,6 +134,13 @@ private: /// /// extern "C" void destroyInstance(isc::data::DataSourceClient* instance); /// \endcode +/// +/// \note This class is relatively recent, and its design is not yet fully +/// formed. We may want to split this into an abstract base container +/// class, and a derived 'dyload' class, and perhaps then add non-dynamic +/// derived classes as well. Currently, the class is actually derived in some +/// of the tests, which is rather unclean (as this class as written is really +/// intended to be used directly). class DataSourceClientContainer : boost::noncopyable { public: /// \brief Constructor @@ -157,13 +164,13 @@ public: isc::data::ConstElementPtr config); /// \brief Destructor - ~DataSourceClientContainer(); + virtual ~DataSourceClientContainer(); /// \brief Accessor to the instance /// /// \return Reference to the DataSourceClient instance contained in this /// container. - DataSourceClient& getInstance() { return (*instance_); } + virtual DataSourceClient& getInstance() { return (*instance_); } private: DataSourceClient* instance_; @@ -171,6 +178,12 @@ private: LibraryContainer ds_lib_; }; +/// +/// Shared pointer type for datasource client containers +/// +typedef boost::shared_ptr + DataSourceClientContainerPtr; + } // end namespace datasrc } // end namespace isc #endif // DATA_SOURCE_FACTORY_H diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc index ea35cfab65..c5122bf832 100644 --- a/src/lib/datasrc/memory_datasrc.cc +++ b/src/lib/datasrc/memory_datasrc.cc @@ -212,11 +212,66 @@ public: // Identify the RBTree node that best matches the given name. // See implementation notes below. + // + // The caller should pass an empty node_path, and it will contain the + // search context (all ancestor nodes that the underlying RBTree search + // traverses, and how the search stops) for possible later use at the + // caller side. template ResultType findNode(const Name& name, + RBTreeNodeChain& node_path, ZoneFinder::FindOptions options) const; + + // A helper method for NSEC-signed zones. It searches the zone for + // the "closest" NSEC corresponding to the search context stored in + // node_path (it should contain sufficient information to identify the + // previous name of the query name in the zone). In some cases the + // immediate closest name may not have NSEC (when it's under a zone cut + // for glue records, or even when the zone is partly broken), so this + // method continues the search until it finds a name that has NSEC, + // and returns the one found first. Due to the prerequisite (see below), + // it should always succeed. + // + // node_path must store valid search context (in practice, it's expected + // to be set by findNode()); otherwise the underlying RBTree implementation + // throws. + // + // If the zone is not considered NSEC-signed or DNSSEC records were not + // required in the original search context (specified in options), this + // method doesn't bother to find NSEC, and simply returns NULL. So, by + // definition of "NSEC-signed", when it really tries to find an NSEC it + // should succeed; there should be one at least at the zone origin. + ConstRBNodeRRsetPtr + getClosestNSEC(RBTreeNodeChain& node_path, + ZoneFinder::FindOptions options) const; }; +ConstRBNodeRRsetPtr +ZoneData::getClosestNSEC(RBTreeNodeChain& node_path, + ZoneFinder::FindOptions options) const +{ + if (!nsec_signed_ || (options & ZoneFinder::FIND_DNSSEC) == 0) { + return (ConstRBNodeRRsetPtr()); + } + + const DomainNode* prev_node; + while ((prev_node = domains_.previousNode(node_path)) != NULL) { + if (!prev_node->isEmpty()) { + const Domain::const_iterator found = + prev_node->getData()->find(RRType::NSEC()); + if (found != prev_node->getData()->end()) { + return (found->second); + } + } + } + // This must be impossible and should be an internal bug. + // See the description at the method declaration. + assert(false); + // Even though there is an assert here, strict compilers + // will still need some return value. + return (ConstRBNodeRRsetPtr()); +} + /// Maintain intermediate data specific to the search context used in /// \c find(). /// @@ -359,9 +414,10 @@ bool cutCallback(const DomainNode& node, FindState* state) { // the zone. template ResultType -ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const { +ZoneData::findNode(const Name& name, RBTreeNodeChain& node_path, + ZoneFinder::FindOptions options) const +{ DomainNode* node = NULL; - RBTreeNodeChain node_path; FindState state((options & ZoneFinder::FIND_GLUE_OK) != 0); const DomainTree::Result result = @@ -387,9 +443,10 @@ ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const { NameComparisonResult::SUPERDOMAIN) { // empty node, so NXRRSET LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_SUPER_STOP).arg(name); return (ResultType(ZoneFinder::NXRRSET, node, - ConstRBNodeRRsetPtr())); + getClosestNSEC(node_path, options))); } - if (node->getFlag(domain_flag::WILD)) { // maybe a wildcard + if (node->getFlag(domain_flag::WILD) && // maybe a wildcard, check only + (options & ZoneFinder::NO_WILDCARD) == 0) { // if not disabled. if (node_path.getLastComparisonResult().getRelation() == NameComparisonResult::COMMONANCESTOR && node_path.getLastComparisonResult().getCommonLabels() > 1) { @@ -403,12 +460,17 @@ ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const { LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_WILDCARD_CANCEL).arg(name); return (ResultType(ZoneFinder::NXDOMAIN, NULL, - ConstRBNodeRRsetPtr())); + getClosestNSEC(node_path, options))); } // Now the wildcard should be the best match. const Name wildcard(Name("*").concatenate( node_path.getAbsoluteName())); - DomainTree::Result result = domains_.find(wildcard, &node); + + // Clear the node_path so that we don't keep incorrect (NSEC) + // context + node_path.clear(); + DomainTree::Result result(domains_.find(wildcard, &node, + node_path)); // Otherwise, why would the domain_flag::WILD be there if // there was no wildcard under it? assert(result == DomainTree::EXACTMATCH); @@ -418,7 +480,8 @@ ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const { } // Nothing really matched. LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOT_FOUND).arg(name); - return (ResultType(ZoneFinder::NXDOMAIN, node, state.rrset_)); + return (ResultType(ZoneFinder::NXDOMAIN, node, + getClosestNSEC(node_path, options))); } else { // If the name is neither an exact or partial match, it is // out of bailiwick, which is considered an error. @@ -1199,6 +1262,24 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl { } } + // A helper function for the NXRRSET case in find(). If the zone is + // NSEC-signed and DNSSEC records are requested, try to find NSEC + // on the given node, and return it if found; return NULL for all other + // cases. + ConstRBNodeRRsetPtr getNSECForNXRRSET(FindOptions options, + const DomainNode& node) const + { + if (zone_data_->nsec_signed_ && + (options & ZoneFinder::FIND_DNSSEC) != 0) { + const Domain::const_iterator found = + node.getData()->find(RRType::NSEC()); + if (found != node.getData()->end()) { + return (found->second); + } + } + return (ConstRBNodeRRsetPtr()); + } + // Set up FindContext object as a return value of find(), taking into // account wildcard matches and DNSSEC information. We set the NSEC/NSEC3 // flag when applicable regardless of the find option; the caller would @@ -1236,8 +1317,10 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl { // Get the node. All other cases than an exact match are handled // in findNode(). We simply construct a result structure and return. + RBTreeNodeChain node_path; // findNode will fill in this const ZoneData::FindNodeResult node_result = - zone_data_->findNode(name, options); + zone_data_->findNode(name, node_path, + options); if (node_result.code != SUCCESS) { return (createFindResult(node_result.code, node_result.rrset)); } @@ -1253,7 +1336,10 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl { if (node->isEmpty()) { LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_DOMAIN_EMPTY). arg(name); - return (createFindResult(NXRRSET, ConstRBNodeRRsetPtr(), rename)); + return (createFindResult(NXRRSET, + zone_data_->getClosestNSEC(node_path, + options), + rename)); } Domain::const_iterator found; @@ -1309,10 +1395,9 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl { rename)); } } - // No exact match or CNAME. Return NXRRSET. - LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NXRRSET).arg(type). - arg(name); - return (createFindResult(NXRRSET, ConstRBNodeRRsetPtr(), rename)); + // No exact match or CNAME. Get NSEC if necessary and return NXRRSET. + return (createFindResult(NXRRSET, getNSECForNXRRSET(options, *node), + rename)); } }; @@ -1474,6 +1559,7 @@ addAdditional(RBNodeRRset* rrset, ZoneData* zone_data, { RdataIteratorPtr rdata_iterator = rrset->getRdataIterator(); bool match_wild = false; // will be true if wildcard match is found + RBTreeNodeChain node_path; // placeholder for findNode() for (; !rdata_iterator->isLast(); rdata_iterator->next()) { // For each domain name that requires additional section processing // in each RDATA, search the tree for the name and remember it if @@ -1486,13 +1572,14 @@ addAdditional(RBNodeRRset* rrset, ZoneData* zone_data, // if the name is not in or below this zone, skip it const NameComparisonResult::NameRelation reln = name.compare(zone_data->origin_data_->getName()).getRelation(); - if (reln != NameComparisonResult::SUBDOMAIN && - reln != NameComparisonResult::EQUAL) { + if (reln != NameComparisonResult::SUBDOMAIN && + reln != NameComparisonResult::EQUAL) { continue; } + node_path.clear(); const ZoneData::FindMutableNodeResult result = zone_data->findNode( - name, ZoneFinder::FIND_GLUE_OK); + name, node_path, ZoneFinder::FIND_GLUE_OK); if (result.code != ZoneFinder::SUCCESS) { // We are not interested in anything but a successful match. continue; @@ -1660,7 +1747,7 @@ generateRRsetFromIterator(ZoneIterator* iterator, LoadCallback callback) { } void -InMemoryZoneFinder::load(const string& filename) { +InMemoryZoneFinder::load(const std::string& filename) { LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()). arg(filename); @@ -1687,12 +1774,6 @@ InMemoryZoneFinder::getFileName() const { return (impl_->file_name_); } -isc::dns::Name -InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const { - isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC " - "yet, can't find previous name"); -} - /// Implementation details for \c InMemoryClient hidden from the public /// interface. /// @@ -1762,8 +1843,7 @@ public: { // Find the first node (origin) and preserve the node chain for future // searches - DomainTree::Result result(tree_.find(origin, &node_, chain_, - NULL, NULL)); + DomainTree::Result result(tree_.find(origin, &node_, chain_)); // It can't happen that the origin is not in there if (result != DomainTree::EXACTMATCH) { isc_throw(Unexpected, diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h index c687d1bcd6..be545d42df 100644 --- a/src/lib/datasrc/memory_datasrc.h +++ b/src/lib/datasrc/memory_datasrc.h @@ -65,11 +65,7 @@ public: /// \brief Returns the class of the zone. virtual isc::dns::RRClass getClass() const; - /// \brief Looks up an RRset in the zone. - /// - /// See documentation in \c Zone. - /// - /// It returns NULL pointer in case of NXDOMAIN and NXRRSET. + /// \brief Find an RRset in the datasource virtual ZoneFinderContextPtr find(const isc::dns::Name& name, const isc::dns::RRType& type, const FindOptions options = @@ -91,12 +87,6 @@ public: virtual FindNSEC3Result findNSEC3(const isc::dns::Name& name, bool recursive); - /// \brief Imelementation of the ZoneFinder::findPreviousName method - /// - /// This one throws NotImplemented exception, as InMemory doesn't - /// support DNSSEC currently. - virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const; - /// \brief Inserts an rrset into the zone. /// /// It puts another RRset into the zone. @@ -286,7 +276,7 @@ public: /// This method never throws an exception. /// /// \return The number of zones stored in the client. - unsigned int getZoneCount() const; + virtual unsigned int getZoneCount() const; /// Add a zone (in the form of \c ZoneFinder) to the \c InMemoryClient. /// diff --git a/src/lib/datasrc/memory_datasrc_link.cc b/src/lib/datasrc/memory_datasrc_link.cc index a0b4bf66c3..cbbc6db4c1 100644 --- a/src/lib/datasrc/memory_datasrc_link.cc +++ b/src/lib/datasrc/memory_datasrc_link.cc @@ -17,9 +17,13 @@ #include #include +#include #include +#include + #include +#include #include @@ -29,6 +33,14 @@ using namespace isc::data; namespace isc { namespace datasrc { +/// This exception is raised if there is an error in the configuration +/// that has been passed; missing information, duplicate values, etc. +class InMemoryConfigError : public isc::Exception { +public: + InMemoryConfigError(const char* file, size_t line, const char* what) : + isc::Exception(file, line, what) {} +}; + namespace { // convencience function to add an error message to a list of those // (TODO: move functions like these to some util lib?) @@ -49,14 +61,14 @@ checkConfigElementString(ConstElementPtr config, const std::string& name, "Config for memory backend does not contain a '" +name+ "' value"); - return false; + return (false); } else if (!config->get(name) || config->get(name)->getType() != Element::string) { addError(errors, "value of " + name + " in memory backend config is not a string"); - return false; + return (false); } else { - return true; + return (true); } } @@ -112,24 +124,26 @@ checkConfig(ConstElementPtr config, ElementPtr errors) { result = false; } } - if (!checkConfigElementString(config, "class", errors)) { - result = false; - } else { - try { - RRClass rrc(config->get("class")->stringValue()); - } catch (const isc::Exception& rrce) { - addError(errors, - "Error parsing class config for memory backend: " + - std::string(rrce.what())); + if (config->contains("class")) { + if (!checkConfigElementString(config, "class", errors)) { result = false; + } else { + try { + RRClass rrc(config->get("class")->stringValue()); + } catch (const isc::Exception& rrce) { + addError(errors, + "Error parsing class config for memory backend: " + + std::string(rrce.what())); + result = false; + } } } if (!config->contains("zones")) { - addError(errors, "No 'zones' element in memory backend config"); - result = false; + // Assume empty list of zones } else if (!config->get("zones") || config->get("zones")->getType() != Element::list) { - addError(errors, "'zones' element in memory backend config is not a list"); + addError(errors, + "'zones' element in memory backend config is not a list"); result = false; } else { BOOST_FOREACH(ConstElementPtr zone_config, @@ -144,6 +158,91 @@ checkConfig(ConstElementPtr config, ElementPtr errors) { return (result); } +// Apply the given config to the just-initialized client +// client must be freshly allocated, and config_value should have been +// checked by the caller +void +applyConfig(isc::datasrc::InMemoryClient& client, + isc::data::ConstElementPtr config_value) +{ + // XXX: We have lost the context to get to the default values here, + // as a temporary workaround we hardcode the IN class here. + isc::dns::RRClass rrclass = RRClass::IN(); + if (config_value->contains("class")) { + rrclass = RRClass(config_value->get("class")->stringValue()); + } + ConstElementPtr zones_config = config_value->get("zones"); + if (!zones_config) { + // XXX: Like the RR class, we cannot retrieve the default value here, + // so we assume an empty zone list in this case. + return; + } + + BOOST_FOREACH(ConstElementPtr zone_config, zones_config->listValue()) { + ConstElementPtr origin = zone_config->get("origin"); + const std::string origin_txt = origin ? origin->stringValue() : ""; + if (origin_txt.empty()) { + isc_throw(InMemoryConfigError, "Missing zone origin"); + } + ConstElementPtr file = zone_config->get("file"); + const std::string file_txt = file ? file->stringValue() : ""; + if (file_txt.empty()) { + isc_throw(InMemoryConfigError, "Missing zone file for zone: " + << origin_txt); + } + + // We support the traditional text type and SQLite3 backend. For the + // latter we create a client for the underlying SQLite3 data source, + // and build the in-memory zone using an iterator of the underlying + // zone. + ConstElementPtr filetype = zone_config->get("filetype"); + const std::string filetype_txt = filetype ? filetype->stringValue() : + "text"; + boost::scoped_ptr container; + if (filetype_txt == "sqlite3") { + container.reset(new DataSourceClientContainer( + "sqlite3", + Element::fromJSON("{\"database_file\": \"" + + file_txt + "\"}"))); + } else if (filetype_txt != "text") { + isc_throw(InMemoryConfigError, "Invalid filetype for zone " + << origin_txt << ": " << filetype_txt); + } + + // Note: we don't want to have such small try-catch blocks for each + // specific error. We may eventually want to introduce some unified + // error handling framework as we have more configuration parameters. + // See bug #1627 for the relevant discussion. + InMemoryZoneFinder* imzf = NULL; + try { + imzf = new InMemoryZoneFinder(rrclass, Name(origin_txt)); + } catch (const isc::dns::NameParserException& ex) { + isc_throw(InMemoryConfigError, "unable to parse zone's origin: " << + ex.what()); + } + + boost::shared_ptr zone_finder(imzf); + const result::Result result = client.addZone(zone_finder); + if (result == result::EXIST) { + isc_throw(InMemoryConfigError, "zone "<< origin->str() + << " already exists"); + } + + /* + * TODO: Once we have better reloading of configuration (something + * else than throwing everything away and loading it again), we will + * need the load method to be split into some kind of build and + * commit/abort parts. + */ + if (filetype_txt == "text") { + zone_finder->load(file_txt); + } else { + zone_finder->load(*container->getInstance().getIterator( + Name(origin_txt))); + } + } +} + } // end unnamed namespace DataSourceClient * @@ -154,7 +253,12 @@ createInstance(isc::data::ConstElementPtr config, std::string& error) { return (NULL); } try { - return (new isc::datasrc::InMemoryClient()); + std::auto_ptr client(new isc::datasrc::InMemoryClient()); + applyConfig(*client, config); + return (client.release()); + } catch (const isc::Exception& isce) { + error = isce.what(); + return (NULL); } catch (const std::exception& exc) { error = std::string("Error creating memory datasource: ") + exc.what(); return (NULL); diff --git a/src/lib/datasrc/rbnode_rrset.h b/src/lib/datasrc/rbnode_rrset.h index 3e5d20a803..3161cdb00e 100644 --- a/src/lib/datasrc/rbnode_rrset.h +++ b/src/lib/datasrc/rbnode_rrset.h @@ -81,7 +81,7 @@ struct AdditionalNodeInfo; /// can refer to its definition, and only for that purpose. Otherwise this is /// essentially a private class of the in-memory data source implementation, /// and an application shouldn't directly refer to this class. -/// +/// // Note: non-Doxygen-documented methods are documented in the base class. class RBNodeRRset : public isc::dns::AbstractRRset { diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h index dbf05914c0..39646ac45c 100644 --- a/src/lib/datasrc/rbtree.h +++ b/src/lib/datasrc/rbtree.h @@ -237,7 +237,7 @@ private: /// Return if callback is enabled at the node. //@} -private: + /// \brief Define rbnode color enum RBNodeColor {BLACK, RED}; /// This is a factory class method of a special singleton null node. @@ -263,6 +263,37 @@ private: /// This method never throws an exception. const RBNode* successor() const; + /// \brief return the next node which is smaller than current node + /// in the same subtree + /// + /// The predecessor for this node is the next smaller node in terms of + /// the DNSSEC order relation within the same single subtree. + /// Note that it may NOT be the next smaller node in the entire RBTree; + /// RBTree is a tree in tree, and the real next node may reside in + /// an upper or lower subtree of the subtree where this node belongs. + /// For example, if the predecessor node has a sub domain, the real next + /// node is the largest node in the sub domain tree. + /// + /// If this node is the smallest node within the subtree, this method + /// returns \c NULL_NODE(). + /// + /// This method never throws an exception. + const RBNode* predecessor() const; + + /// \brief private shared implementation of successor and predecessor + /// + /// As the two mentioned functions are merely mirror images of each other, + /// it makes little sense to keep both versions. So this is the body of the + /// functions and we call it with the correct pointers. + /// + /// Not to be called directly, not even by friends. + /// + /// The overhead of the member pointers should be optimised out, as this + /// will probably get completely inlined into predecessor and successor + /// methods. + const RBNode* abstractSuccessor(RBNode* RBNode::*left, + RBNode* RBNode::*right) const; + /// \name Data to maintain the rbtree structure. //@{ RBNode* parent_; @@ -283,7 +314,7 @@ private: /// \par Adding down pointer to \c RBNode has two purposes: /// \li Accelerate the search process, with sub domain tree, it splits the /// big flat tree into several hierarchy trees. - /// \li It saves memory useage as it allows storing only relative names, + /// \li It saves memory usage as it allows storing only relative names, /// avoiding storage of the same domain labels multiple times. RBNode* down_; @@ -333,30 +364,48 @@ RBNode::~RBNode() { template const RBNode* -RBNode::successor() const { +RBNode::abstractSuccessor(RBNode* RBNode::*left, RBNode* + RBNode::*right) const +{ + // This function is written as a successor. It becomes predecessor if + // the left and right pointers are swapped. So in case of predecessor, + // the left pointer points to right and vice versa. Don't get confused + // by the idea, just imagine the pointers look into a mirror. + const RBNode* current = this; // If it has right node, the successor is the left-most node of the right // subtree. - if (right_ != NULL_NODE()) { - current = right_; - while (current->left_ != NULL_NODE()) { - current = current->left_; + if (current->*right != RBNode::NULL_NODE()) { + current = current->*right; + while (current->*left != RBNode::NULL_NODE()) { + current = current->*left; } return (current); } - // Otherwise go up until we find the first left branch on our path to // root. If found, the parent of the branch is the successor. // Otherwise, we return the null node const RBNode* parent = current->parent_; - while (parent != NULL_NODE() && current == parent->right_) { + while (parent != RBNode::NULL_NODE() && current == parent->*right) { current = parent; parent = parent->parent_; } return (parent); } +template +const RBNode* +RBNode::successor() const { + return (abstractSuccessor(&RBNode::left_, &RBNode::right_)); +} + +template +const RBNode* +RBNode::predecessor() const { + // Swap the left and right pointers for the abstractSuccessor + return (abstractSuccessor(&RBNode::right_, &RBNode::left_)); +} /// \brief RBTreeNodeChain stores detailed information of \c RBTree::find() /// result. @@ -364,8 +413,7 @@ RBNode::successor() const { /// - The \c RBNode that was last compared with the search name, and /// the comparison result at that point in the form of /// \c isc::dns::NameComparisonResult. -/// - A sequence of nodes that forms a path to the found node (which is -/// not yet implemented). +/// - A sequence of nodes that forms a path to the found node. /// /// The comparison result can be used to handle some rare cases such as /// empty node processing. @@ -396,7 +444,7 @@ RBNode::successor() const { template class RBTreeNodeChain { /// RBTreeNodeChain is initialized by RBTree, only RBTree has - /// knowledge to manipuate it. + /// knowledge to manipulate it. friend class RBTree; public: /// \name Constructors and Assignment Operator. @@ -498,10 +546,10 @@ public: private: // the following private functions check invariants about the internal // state using assert() instead of exception. The state of a chain - // can only be modified operations within this file, so if any of the + // can only be modified by operations within this file, so if any of the // assumptions fails it means an internal bug. - /// \brief return whther node chain has node in it. + /// \brief return whether node chain has node in it. /// /// \exception None bool isEmpty() const { return (node_count_ == 0); } @@ -655,7 +703,7 @@ public: /// By default, nodes that don't have data (see RBNode::isEmpty) are /// ignored and the result can be NOTFOUND even if there's a node whose /// name matches. If the \c RBTree is constructed with its - /// \c returnEmptyNode parameter being \c true, an empty node will also + /// \c returnEmptyNode parameter being \c true, empty nodes will also /// be match candidates. /// /// \note Even when \c returnEmptyNode is \c true, not all empty nodes @@ -673,7 +721,7 @@ public: /// if it throws, the exception will be propagated to the caller. /// /// The \c name parameter says what should be found. The node parameter - /// is output only and in case of EXACTMATCH and PARTIALMATCH, it is set + /// is output-only, and in case of EXACTMATCH or PARTIALMATCH, it is set /// to a pointer to the found node. /// /// They return: @@ -710,6 +758,30 @@ public: return (ret); } + /// \brief Simple find, with node_path tracking + /// + /// Acts as described in the \ref find section. + Result find(const isc::dns::Name& name, RBNode** node, + RBTreeNodeChain& node_path) const + { + return (find(name, node, node_path, NULL, NULL)); + } + + /// \brief Simple find returning immutable node, with node_path tracking + /// + /// Acts as described in the \ref find section, but returns immutable node + /// pointer. + Result find(const isc::dns::Name& name, const RBNode** node, + RBTreeNodeChain& node_path) const + { + RBNode *target_node = NULL; + Result ret = (find(name, &target_node, node_path, NULL, NULL)); + if (ret != NOTFOUND) { + *node = target_node; + } + return (ret); + } + /// \brief Find with callback and node chain. /// \anchor callback /// @@ -720,13 +792,16 @@ public: /// /// This version of \c find() calls the callback whenever traversing (on /// the way from root down the tree) a marked node on the way down through - /// the domain namespace (see \c RBNode::enableCallback and related - /// functions). + /// the domain namespace (see \c RBNode::FLAG_CALLBACK). /// /// If you return true from the callback, the search is stopped and a /// PARTIALMATCH is returned with the given node. Note that this node /// doesn't really need to be the one with longest possible match. /// + /// The callback is not called for the node which matches exactly + /// (EXACTMATCH is returned). This is typically the last node in the + /// traversal during a successful search. + /// /// This callback mechanism was designed with zone cut (delegation) /// processing in mind. The marked nodes would be the ones at delegation /// points. It is not expected that any other applications would need @@ -741,38 +816,36 @@ public: /// which is an object of class \c RBTreeNodeChain. /// The passed parameter must be empty. /// - /// \note The rest of the description isn't yet implemented. It will be - /// handled in Trac ticket #517. - /// - /// On success, the node sequence stoed in \c node_path will contain all + /// On success, the node sequence stored in \c node_path will contain all /// the ancestor nodes from the found node towards the root. /// For example, if we look for o.w.y.d.e.f in the example \ref diagram, /// \c node_path will contain w.y and d.e.f; the \c top() node of the - /// chain will be o, w.f and d.e.f will be stored below it. + /// chain will be o, w.y and d.e.f will be stored below it. /// /// This feature can be used to get the absolute name for a node; /// to do so, we need to travel upside from the node toward the root, /// concatenating all ancestor names. With the current implementation /// it's not possible without a node chain, because there is a no pointer /// from the root of a subtree to the parent subtree (this may change - /// in a future version). A node chain can also be used to find the next - /// node of a given node in the entire RBTree; the \c nextNode() method - /// takes a node chain as a parameter. + /// in a future version). A node chain can also be used to find the + /// next and previous nodes of a given node in the entire RBTree; + /// the \c nextNode() and \c previousNode() methods take a node + /// chain as a parameter. /// - /// \exception isc::BadValue node_path is not empty (not yet implemented). + /// \exception isc::BadValue node_path is not empty. /// /// \param name Target to be found /// \param node On success (either \c EXACTMATCH or \c PARTIALMATCH) /// it will store a pointer to the matching node /// \param node_path Other search details will be stored (see the /// description) - /// \param callback If non \c NULL, a call back function to be called - /// at marked nodes (see above). + /// \param callback If non- \c NULL, a call back function to be called + /// at marked nodes (see the description). /// \param callback_arg A caller supplied argument to be passed to /// \c callback. /// - /// \return As described above, but in case of callback returning true, - /// it returns immediately with the current node. + /// \return As in the description, but in case of callback returning + /// \c true, it returns immediately with the current node. template Result find(const isc::dns::Name& name, RBNode** node, @@ -826,6 +899,30 @@ public: /// the largest, \c NULL will be returned. const RBNode* nextNode(RBTreeNodeChain& node_path) const; + /// \brief return the next smaller node in DNSSEC order from a node + /// searched by RBTree::find(). + /// + /// This acts similarly to \c nextNode(), but it walks in the other + /// direction. But unlike \c nextNode(), this can start even if the + /// node requested by \c find() was not found. In that case, it will + /// identify the node that is previous to the queried name. + /// + /// \note \c previousNode() will iterate over all the nodes in RBTree + /// including empty nodes. If empty node isn't desired, it's easy to add + /// logic to check return node and keep invoking \c previousNode() until the + /// non-empty node is retrieved. + /// + /// \exception isc::BadValue node_path is empty. + /// + /// \param node_path A node chain that stores all the nodes along the path + /// from root to node and the result of \c find(). This will get modified. + /// You should not use the node_path again except for repetitive calls + /// of this method. + /// + /// \return An \c RBNode that is next smaller than \c node; if \c node is + /// the smallest, \c NULL will be returned. + const RBNode* previousNode(RBTreeNodeChain& node_path) const; + /// \brief Get the total number of nodes in the tree /// /// It includes nodes internally created as a result of adding a domain @@ -848,8 +945,8 @@ public: //@{ /// \brief Insert the domain name into the tree. /// - /// It either finds an already existing node of the given name or inserts - /// a new one, if none exists yet. In any case, the inserted_node parameter + /// It either finds an already existing node of the given name, or inserts + /// a new one if none exists yet. In any case, the \c inserted_node parameter /// is set to point to that node. You can fill data into it or modify it. /// So, if you don't know if a node exists or not and you need to modify /// it, just call insert and act by the result. @@ -1059,15 +1156,7 @@ RBTree::nextNode(RBTreeNodeChain& node_path) const { return (left_most); } - // node_path go to up level - node_path.pop(); - // otherwise found the successor node in current level - const RBNode* successor = node->successor(); - if (successor != NULLNODE) { - node_path.push(successor); - return (successor); - } - + // try to find a successor. // if no successor found move to up level, the next successor // is the successor of up node in the up level tree, if // up node doesn't have successor we gonna keep moving to up @@ -1084,6 +1173,143 @@ RBTree::nextNode(RBTreeNodeChain& node_path) const { return (NULL); } +template +const RBNode* +RBTree::previousNode(RBTreeNodeChain& node_path) const { + if (getNodeCount() == 0) { + // Special case for empty trees. It would look every time like + // we didn't search, because the last compared is empty. This is + // a slight hack and not perfect, but this is better than throwing + // on empty tree. And we probably won't meet an empty tree in practice + // anyway. + return (NULL); + } + if (node_path.last_compared_ == NULL) { + isc_throw(isc::BadValue, + "RBTree::previousNode() called before find()"); + } + + // If the relation isn't EQUAL, it means the find was called previously + // and didn't find the exact node. Therefore we need to locate the place + // to start iterating the chain of domains. + // + // The logic here is not too complex, we just need to take care to handle + // all the cases and decide where to go from there. + switch (node_path.last_comparison_.getRelation()) { + case dns::NameComparisonResult::COMMONANCESTOR: + // We compared with a leaf in the tree and wanted to go to one of + // the children. But the child was not there. It now depends on the + // direction in which we wanted to go. + if (node_path.last_comparison_.getOrder() < 0) { + // We wanted to go left. So the one we compared with is + // the one higher than we wanted. If we just put it into + // the node_path, then the following algorithm below will find + // the smaller one. + // + // This is exactly the same as with superdomain below. + // Therefore, we just fall through to the next case. + } else { + // We wanted to go right. That means we want to output the + // one which is the largest in the tree defined by the + // compared one (it is either the compared one, or some + // subdomain of it). There probably is not an easy trick + // for this, so we just find the correct place. + const RBNode* current(node_path.last_compared_); + while (current != NULLNODE) { + node_path.push(current); + // Go a level down and as much right there as possible + current = current->down_; + while (current->right_ != NULLNODE) { + // A small trick. The current may be NULLNODE, but + // such node has the right_ pointer and it is equal + // to NULLNODE. + current = current->right_; + } + } + // Now, the one on top of the path is the one we want. We + // return it now and leave it there, so we can search for + // previous of it the next time we'are called. + node_path.last_comparison_ = + dns::NameComparisonResult(0, 0, + dns::NameComparisonResult::EQUAL); + return (node_path.top()); + } + // No break; here - we want to fall through. See above. + case dns::NameComparisonResult::SUPERDOMAIN: + // This is the case there's a "compressed" node and we looked for + // only part of it. The node itself is larger than we wanted, but + // if we put it to the node_path and then go one step left from it, + // we get the correct result. + node_path.push(node_path.last_compared_); + // Correct the comparison result, so we won't trigger this case + // next time previousNode is called. We already located the correct + // place to start. The value is partly nonsense, but that doesn't + // matter any more. + node_path.last_comparison_ = + dns::NameComparisonResult(0, 0, + dns::NameComparisonResult::EQUAL); + break; + case dns::NameComparisonResult::SUBDOMAIN: + // A subdomain means we returned the one above the searched one + // already and it is on top of the stack. This is was smaller + // than the one already, but we want to return yet smaller one. + // So we act as if it was EQUAL. + break; + case dns::NameComparisonResult::EQUAL: + // The find gave us an exact match or the previousNode was called + // already, which located the exact node. The rest of the function + // goes one domain left and returns it for us. + break; + } + + // So, the node_path now contains the path to a node we want previous for. + // We just need to go one step left. + + if (node_path.isEmpty()) { + // We got past the first one. So, we're returning NULL from + // now on. + return (NULL); + } + + const RBNode* node(node_path.top()); + + // Try going left in this tree + node = node->predecessor(); + if (node == NULLNODE) { + // We are the smallest ones in this tree. We go one level + // up. That one is the smaller one than us. + + node_path.pop(); + if (node_path.isEmpty()) { + // We're past the first one + return (NULL); + } else { + return (node_path.top()); + } + } + + // Exchange the node at the top of the path, as we move horizontaly + // through the domain tree + node_path.pop(); + node_path.push(node); + + // Try going as deep as possible, keeping on the right side of the trees + while (node->down_ != NULLNODE) { + // Move to the tree below + node = node->down_; + // And get as much to the right of the tree as possible + while (node->right_ != NULLNODE) { + node = node->right_; + } + // Now, we found the right-most node in the sub-tree, we need to + // include it in the path + node_path.push(node); + } + + // Now, if the current node has no down_ pointer any more, it's the + // correct one. + return (node); +} template typename RBTree::Result diff --git a/src/lib/datasrc/static.zone.pre b/src/lib/datasrc/static.zone.pre new file mode 100644 index 0000000000..16a7379ea8 --- /dev/null +++ b/src/lib/datasrc/static.zone.pre @@ -0,0 +1,12 @@ +;; This is the content of the BIND./CH zone. It contains the version and +;; authors (called VERSION.BIND. and AUTHORS.BIND.). You can add more or +;; modify the zone. Then you can reload the zone by issuing the command +;; +;; loadzone CH BIND +;; +;; in the bindctl. + +BIND. 0 CH SOA bind. authors.bind. 0 28800 7200 604800 86400 + +VERSION.BIND. 0 CH TXT "@@VERSION_STRING@@" +;; HOSTNAME.BIND 0 CH TXT "localhost" diff --git a/src/lib/datasrc/static_datasrc_link.cc b/src/lib/datasrc/static_datasrc_link.cc new file mode 100644 index 0000000000..789580d057 --- /dev/null +++ b/src/lib/datasrc/static_datasrc_link.cc @@ -0,0 +1,62 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "client.h" +#include "memory_datasrc.h" + +#include +#include + +#include +#include + +using namespace isc::data; +using namespace isc::dns; +using namespace boost; +using namespace std; + +namespace isc { +namespace datasrc { + +DataSourceClient* +createInstance(ConstElementPtr config, string& error) { + try { + // Create the data source + auto_ptr client(new InMemoryClient()); + // Hardcode the origin and class + shared_ptr + finder(new InMemoryZoneFinder(RRClass::CH(), Name("BIND"))); + // Fill it with data + const string path(config->stringValue()); + finder->load(path); + // And put the zone inside + client->addZone(finder); + return (client.release()); + } + catch (const std::exception& e) { + error = e.what(); + } + catch (...) { + error = "Unknown exception"; + } + return (NULL); +} + +void +destroyInstance(DataSourceClient* instance) { + delete instance; +} + +} +} diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am index 90fb3e4bf1..e72fd71f8b 100644 --- a/src/lib/datasrc/tests/Makefile.am +++ b/src/lib/datasrc/tests/Makefile.am @@ -17,6 +17,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = noinst_PROGRAMS = if HAVE_GTEST @@ -31,9 +34,7 @@ common_sources = run_unittests.cc common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.h common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc -common_ldadd = $(GTEST_LDADD) -common_ldadd += $(SQLITE_LIBS) -common_ldadd += $(top_builddir)/src/lib/datasrc/libdatasrc.la +common_ldadd = $(top_builddir)/src/lib/datasrc/libdatasrc.la common_ldadd += $(top_builddir)/src/lib/dns/libdns++.la common_ldadd += $(top_builddir)/src/lib/util/libutil.la common_ldadd += $(top_builddir)/src/lib/log/liblog.la @@ -41,6 +42,7 @@ common_ldadd += $(top_builddir)/src/lib/exceptions/libexceptions.la common_ldadd += $(top_builddir)/src/lib/cc/libcc.la common_ldadd += $(top_builddir)/src/lib/testutils/libtestutils.la common_ldadd += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la +common_ldadd += $(GTEST_LDADD) $(SQLITE_LIBS) # The general tests run_unittests_SOURCES = $(common_sources) @@ -60,6 +62,7 @@ run_unittests_SOURCES += memory_datasrc_unittest.cc run_unittests_SOURCES += rbnode_rrset_unittest.cc run_unittests_SOURCES += zone_finder_context_unittest.cc run_unittests_SOURCES += faked_nsec3.h faked_nsec3.cc +run_unittests_SOURCES += client_list_unittest.cc # We need the actual module implementation in the tests (they are not part # of libdatasrc) @@ -114,3 +117,4 @@ EXTRA_DIST += testdata/test.sqlite3 EXTRA_DIST += testdata/new_minor_schema.sqlite3 EXTRA_DIST += testdata/newschema.sqlite3 EXTRA_DIST += testdata/oldschema.sqlite3 +EXTRA_DIST += testdata/static.zone diff --git a/src/lib/datasrc/tests/client_list_unittest.cc b/src/lib/datasrc/tests/client_list_unittest.cc new file mode 100644 index 0000000000..ae22470e78 --- /dev/null +++ b/src/lib/datasrc/tests/client_list_unittest.cc @@ -0,0 +1,472 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include + +#include + +#include + +#include + +using namespace isc::datasrc; +using namespace isc::data; +using namespace isc::dns; +using namespace boost; +using namespace std; + +namespace { + +// A test data source. It pretends it has some zones. +class MockDataSourceClient : public DataSourceClient { +public: + class Finder : public ZoneFinder { + public: + Finder(const Name& origin) : + origin_(origin) + {} + Name getOrigin() const { return (origin_); } + // The rest is not to be called, so just have them + RRClass getClass() const { + isc_throw(isc::NotImplemented, "Not implemented"); + } + shared_ptr find(const Name&, const RRType&, + const FindOptions) + { + isc_throw(isc::NotImplemented, "Not implemented"); + } + shared_ptr findAll(const Name&, + vector&, + const FindOptions) + { + isc_throw(isc::NotImplemented, "Not implemented"); + } + FindNSEC3Result findNSEC3(const Name&, bool) { + isc_throw(isc::NotImplemented, "Not implemented"); + } + private: + Name origin_; + }; + // Constructor from a list of zones. + MockDataSourceClient(const char* zone_names[]) { + for (const char** zone(zone_names); *zone; ++zone) { + zones.insert(Name(*zone)); + } + } + // Constructor from configuration. The list of zones will be empty, but + // it will keep the configuration inside for further inspection. + MockDataSourceClient(const string& type, + const ConstElementPtr& configuration) : + type_(type), + configuration_(configuration) + {} + virtual FindResult findZone(const Name& name) const { + if (zones.empty()) { + return (FindResult(result::NOTFOUND, ZoneFinderPtr())); + } + set::const_iterator it(zones.upper_bound(name)); + if (it == zones.begin()) { + return (FindResult(result::NOTFOUND, ZoneFinderPtr())); + } + --it; + NameComparisonResult compar(it->compare(name)); + const ZoneFinderPtr finder(new Finder(*it)); + switch (compar.getRelation()) { + case NameComparisonResult::EQUAL: + return (FindResult(result::SUCCESS, finder)); + case NameComparisonResult::SUPERDOMAIN: + return (FindResult(result::PARTIALMATCH, finder)); + default: + return (FindResult(result::NOTFOUND, ZoneFinderPtr())); + } + } + // These methods are not used. They just need to be there to have + // complete vtable. + virtual ZoneUpdaterPtr getUpdater(const Name&, bool, bool) const { + isc_throw(isc::NotImplemented, "Not implemented"); + } + virtual pair + getJournalReader(const Name&, uint32_t, uint32_t) const + { + isc_throw(isc::NotImplemented, "Not implemented"); + } + const string type_; + const ConstElementPtr configuration_; +private: + set zones; +}; + + +// The test version is the same as the normal version. We, however, add +// some methods to dig directly in the internals, for the tests. +class TestedList : public ConfigurableClientList { +public: + DataSources& getDataSources() { return (data_sources_); } + // Overwrite the list's method to get a data source with given type + // and configuration. We mock the data source and don't create the + // container. This is just to avoid some complexity in the tests. + virtual DataSourcePair getDataSourceClient(const string& type, + const ConstElementPtr& + configuration) + { + if (type == "error") { + isc_throw(DataSourceError, "The error data source type"); + } + shared_ptr + ds(new MockDataSourceClient(type, configuration)); + // Make sure it is deleted when the test list is deleted. + to_delete_.push_back(ds); + return (DataSourcePair(ds.get(), DataSourceClientContainerPtr())); + } +private: + // Hold list of data sources created internally, so they are preserved + // until the end of the test and then deleted. + vector > to_delete_; +}; + +const char* ds_zones[][3] = { + { + "example.org.", + "example.com.", + NULL + }, + { + "sub.example.org.", + NULL, NULL + }, + { + NULL, NULL, NULL + }, + { + "sub.example.org.", + NULL, NULL + } +}; + +const size_t ds_count = (sizeof(ds_zones) / sizeof(*ds_zones)); + +class ListTest : public ::testing::Test { +public: + ListTest() : + // The empty list corresponds to a list with no elements inside + list_(new TestedList()), + config_elem_(Element::fromJSON("[" + "{" + " \"type\": \"test_type\"," + " \"cache\": \"off\"," + " \"params\": {}" + "}]")) + { + for (size_t i(0); i < ds_count; ++ i) { + shared_ptr + ds(new MockDataSourceClient(ds_zones[i])); + ds_.push_back(ds); + ds_info_.push_back(ConfigurableClientList::DataSourceInfo(ds.get(), + DataSourceClientContainerPtr())); + } + } + // Check the positive result is as we expect it. + void positiveResult(const ClientList::FindResult& result, + const shared_ptr& dsrc, + const Name& name, bool exact, + const char* test) + { + SCOPED_TRACE(test); + EXPECT_EQ(dsrc.get(), result.dsrc_client_); + ASSERT_NE(ZoneFinderPtr(), result.finder_); + EXPECT_EQ(name, result.finder_->getOrigin()); + EXPECT_EQ(exact, result.exact_match_); + } + // Configure the list with multiple data sources, according to + // some configuration. It uses the index as parameter, to be able to + // loop through the configurations. + void multiConfiguration(size_t index) { + list_->getDataSources().clear(); + switch (index) { + case 2: + list_->getDataSources().push_back(ds_info_[2]); + // The ds_[2] is empty. We just check that it doesn't confuse + // us. Fall through to the case 0. + case 0: + list_->getDataSources().push_back(ds_info_[0]); + list_->getDataSources().push_back(ds_info_[1]); + break; + case 1: + // The other order + list_->getDataSources().push_back(ds_info_[1]); + list_->getDataSources().push_back(ds_info_[0]); + break; + case 3: + list_->getDataSources().push_back(ds_info_[1]); + list_->getDataSources().push_back(ds_info_[0]); + // It is the same as ds_[1], but we take from the first one. + // The first one to match is the correct one. + list_->getDataSources().push_back(ds_info_[3]); + break; + default: + FAIL() << "Unknown configuration index " << index; + } + } + void checkDS(size_t index, const string& type, const string& params) const + { + ASSERT_GT(list_->getDataSources().size(), index); + MockDataSourceClient* ds(dynamic_cast( + list_->getDataSources()[index].data_src_client_)); + + // Comparing with NULL does not work + ASSERT_NE(ds, static_cast(NULL)); + EXPECT_EQ(type, ds->type_); + EXPECT_TRUE(Element::fromJSON(params)->equals(*ds->configuration_)); + } + shared_ptr list_; + const ClientList::FindResult negativeResult_; + vector > ds_; + vector ds_info_; + const ConstElementPtr config_elem_; +}; + +// Test the test itself +TEST_F(ListTest, selfTest) { + EXPECT_EQ(result::SUCCESS, ds_[0]->findZone(Name("example.org")).code); + EXPECT_EQ(result::PARTIALMATCH, + ds_[0]->findZone(Name("sub.example.org")).code); + EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("org")).code); + EXPECT_EQ(result::NOTFOUND, ds_[1]->findZone(Name("example.org")).code); + EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("aaa")).code); + EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("zzz")).code); +} + +// Test the list we create with empty configuration is, in fact, empty +TEST_F(ListTest, emptyList) { + EXPECT_TRUE(list_->getDataSources().empty()); +} + +// Check the values returned by a find on an empty list. It should be +// a negative answer (nothing found) no matter if we want an exact or inexact +// match. +TEST_F(ListTest, emptySearch) { + // No matter what we try, we don't get an answer. + + // Note: we don't have operator<< for the result class, so we cannot use + // EXPECT_EQ. Same for other similar cases. + EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), false, + false)); + EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), false, + true)); + EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), true, + false)); + EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), true, + true)); +} + +// Put a single data source inside the list and check it can find an +// exact match if there's one. +TEST_F(ListTest, singleDSExactMatch) { + list_->getDataSources().push_back(ds_info_[0]); + // This zone is not there + EXPECT_TRUE(negativeResult_ == list_->find(Name("org."), true)); + // But this one is, so check it. + positiveResult(list_->find(Name("example.org"), true), ds_[0], + Name("example.org"), true, "Exact match"); + // When asking for a sub zone of a zone there, we get nothing + // (we want exact match, this would be partial one) + EXPECT_TRUE(negativeResult_ == list_->find(Name("sub.example.org."), + true)); +} + +// When asking for a partial match, we get all that the exact one, but more. +TEST_F(ListTest, singleDSBestMatch) { + list_->getDataSources().push_back(ds_info_[0]); + // This zone is not there + EXPECT_TRUE(negativeResult_ == list_->find(Name("org."))); + // But this one is, so check it. + positiveResult(list_->find(Name("example.org")), ds_[0], + Name("example.org"), true, "Exact match"); + // When asking for a sub zone of a zone there, we get the parent + // one. + positiveResult(list_->find(Name("sub.example.org.")), ds_[0], + Name("example.org"), false, "Subdomain match"); +} + +const char* const test_names[] = { + "Sub second", + "Sub first", + "With empty", + "With a duplicity" +}; + +TEST_F(ListTest, multiExactMatch) { + // Run through all the multi-configurations + for (size_t i(0); i < sizeof(test_names) / sizeof(*test_names); ++i) { + SCOPED_TRACE(test_names[i]); + multiConfiguration(i); + // Something that is nowhere there + EXPECT_TRUE(negativeResult_ == list_->find(Name("org."), true)); + // This one is there exactly. + positiveResult(list_->find(Name("example.org"), true), ds_[0], + Name("example.org"), true, "Exact match"); + // This one too, but in a different data source. + positiveResult(list_->find(Name("sub.example.org."), true), ds_[1], + Name("sub.example.org"), true, "Subdomain match"); + // But this one is in neither data source. + EXPECT_TRUE(negativeResult_ == + list_->find(Name("sub.example.com."), true)); + } +} + +TEST_F(ListTest, multiBestMatch) { + // Run through all the multi-configurations + for (size_t i(0); i < 4; ++ i) { + SCOPED_TRACE(test_names[i]); + multiConfiguration(i); + // Something that is nowhere there + EXPECT_TRUE(negativeResult_ == list_->find(Name("org."))); + // This one is there exactly. + positiveResult(list_->find(Name("example.org")), ds_[0], + Name("example.org"), true, "Exact match"); + // This one too, but in a different data source. + positiveResult(list_->find(Name("sub.example.org.")), ds_[1], + Name("sub.example.org"), true, "Subdomain match"); + // But this one is in neither data source. But it is a subdomain + // of one of the zones in the first data source. + positiveResult(list_->find(Name("sub.example.com.")), ds_[0], + Name("example.com."), false, "Subdomain in com"); + } +} + +// Check the configuration is empty when the list is empty +TEST_F(ListTest, configureEmpty) { + ConstElementPtr elem(new ListElement); + list_->configure(*elem, true); + EXPECT_TRUE(list_->getDataSources().empty()); +} + +// Check we can get multiple data sources and they are in the right order. +TEST_F(ListTest, configureMulti) { + ConstElementPtr elem(Element::fromJSON("[" + "{" + " \"type\": \"type1\"," + " \"cache\": \"off\"," + " \"params\": {}" + "}," + "{" + " \"type\": \"type2\"," + " \"cache\": \"off\"," + " \"params\": {}" + "}]" + )); + list_->configure(*elem, true); + EXPECT_EQ(2, list_->getDataSources().size()); + checkDS(0, "type1", "{}"); + checkDS(1, "type2", "{}"); +} + +// Check we can pass whatever we want to the params +TEST_F(ListTest, configureParams) { + const char* params[] = { + "true", + "false", + "null", + "\"hello\"", + "42", + "[]", + "{}", + NULL + }; + for (const char** param(params); *param; ++param) { + SCOPED_TRACE(*param); + ConstElementPtr elem(Element::fromJSON(string("[" + "{" + " \"type\": \"t\"," + " \"cache\": \"off\"," + " \"params\": ") + *param + + "}]")); + list_->configure(*elem, true); + EXPECT_EQ(1, list_->getDataSources().size()); + checkDS(0, "t", *param); + } +} + +TEST_F(ListTest, wrongConfig) { + const char* configs[] = { + // A lot of stuff missing from there + "[{\"type\": \"test_type\", \"params\": 13}, {}]", + // Some bad types completely + "{}", + "true", + "42", + "null", + "[{\"type\": \"test_type\", \"params\": 13}, true]", + "[{\"type\": \"test_type\", \"params\": 13}, []]", + "[{\"type\": \"test_type\", \"params\": 13}, 42]", + // Bad type of type + "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": 42}]", + "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": true}]", + "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": null}]", + "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": []}]", + "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": {}}]", + // TODO: Once cache is supported, add some invalid cache values + NULL + }; + // Put something inside to see it survives the exception + list_->configure(*config_elem_, true); + checkDS(0, "test_type", "{}"); + for (const char** config(configs); *config; ++config) { + SCOPED_TRACE(*config); + ConstElementPtr elem(Element::fromJSON(*config)); + EXPECT_THROW(list_->configure(*elem, true), + ConfigurableClientList::ConfigurationError); + // Still untouched + checkDS(0, "test_type", "{}"); + EXPECT_EQ(1, list_->getDataSources().size()); + } +} + +// The param thing defaults to null. Cache is not used yet. +TEST_F(ListTest, defaults) { + ConstElementPtr elem(Element::fromJSON("[" + "{" + " \"type\": \"type1\"" + "}]")); + list_->configure(*elem, true); + EXPECT_EQ(1, list_->getDataSources().size()); + checkDS(0, "type1", "null"); +} + +// Check we can call the configure multiple times, to change the configuration +TEST_F(ListTest, reconfigure) { + ConstElementPtr empty(new ListElement); + list_->configure(*config_elem_, true); + checkDS(0, "test_type", "{}"); + list_->configure(*empty, true); + EXPECT_TRUE(list_->getDataSources().empty()); + list_->configure(*config_elem_, true); + checkDS(0, "test_type", "{}"); +} + +// Make sure the data source error exception from the factory is propagated +TEST_F(ListTest, dataSrcError) { + ConstElementPtr elem(Element::fromJSON("[" + "{" + " \"type\": \"error\"" + "}]")); + list_->configure(*config_elem_, true); + checkDS(0, "test_type", "{}"); + EXPECT_THROW(list_->configure(*elem, true), DataSourceError); + checkDS(0, "test_type", "{}"); +} + +} diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc index 64ad25f3df..87ab5e05db 100644 --- a/src/lib/datasrc/tests/client_unittest.cc +++ b/src/lib/datasrc/tests/client_unittest.cc @@ -56,4 +56,8 @@ TEST_F(ClientTest, defaultIterator) { EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented); } +TEST_F(ClientTest, defaultGetZoneCount) { + EXPECT_THROW(client_.getZoneCount(), isc::NotImplemented); +} + } diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc index 110b3f0734..c96a1d28a8 100644 --- a/src/lib/datasrc/tests/database_unittest.cc +++ b/src/lib/datasrc/tests/database_unittest.cc @@ -142,9 +142,11 @@ const char* const TEST_RECORDS[][5] = { {"delegation.example.org.", "NS", "3600", "", "ns.example.com."}, {"delegation.example.org.", "NS", "3600", "", "ns.delegation.example.org."}, - {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"}, + {"delegation.example.org.", "DS", "3600", "", "1 1 2 abcd"}, {"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 " "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"}, + {"delegation.example.org.", "RRSIG", "3600", "", "DS 5 3 3600 " + "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"}, {"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"}, {"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"}, @@ -156,6 +158,16 @@ const char* const TEST_RECORDS[][5] = { {"below.dname.example.org.", "A", "3600", "", "192.0.2.1"}, + // Insecure delegation (i.e., no DS at the delegation point) + {"insecdelegation.example.org.", "NS", "3600", "", "ns.example.com."}, + {"insecdelegation.example.org.", "NSEC", "3600", "", + "dummy.example.org. NS NSEC"}, + // and a DS under the zone cut. Such an RR shouldn't exist in a sane zone, + // but it could by error or some malicious attempt. It shouldn't confuse + // the implementation) + {"child.insecdelegation.example.org.", "DS", "3600", "", "DS 5 3 3600 " + "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"}, + // Broken NS {"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"}, {"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."}, @@ -214,6 +226,15 @@ const char* const TEST_RECORDS[][5] = { {NULL, NULL, NULL, NULL, NULL}, }; +// NSEC3PARAM at the zone origin and its RRSIG. These will be added +// separately for some NSEC3 related tests. +const char* TEST_NSEC3PARAM_RECORDS[][5] = { + {"example.org.", "NSEC3PARAM", "3600", "", "1 0 12 aabbccdd"}, + {"example.org.", "RRSIG", "3600", "", "NSEC3PARAM 5 3 3600 20000101000000 " + "20000201000000 12345 example.org. FAKEFAKEFAKE"}, + {NULL, NULL, NULL, NULL, NULL} +}; + // FIXME: Taken from a different test. Fill with proper data when creating a test. const char* TEST_NSEC3_RECORDS[][5] = { {apex_hash, "NSEC3", "300", "", "1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"}, @@ -1065,24 +1086,15 @@ public: // tests. Note that the NSEC3 namespace is available in other tests, but // it should not be accessed at that time. void enableNSEC3() { - // We place the signature first, so it's in the block with the other - // signatures - vector signature; - signature.push_back("RRSIG"); - signature.push_back("3600"); - signature.push_back(""); - signature.push_back("NSEC3PARAM 5 3 3600 20000101000000 20000201000000 " - "12345 example.org. FAKEFAKEFAKE"); - signature.push_back("exmaple.org."); - (*readonly_records_)["example.org."].push_back(signature); - // Now the NSEC3 param itself - vector param; - param.push_back("NSEC3PARAM"); - param.push_back("3600"); - param.push_back(""); - param.push_back("1 0 12 aabbccdd"); - param.push_back("example.org."); - (*readonly_records_)["example.org."].push_back(param); + for (int i = 0; TEST_NSEC3PARAM_RECORDS[i][0] != NULL; ++i) { + vector param; + param.push_back(TEST_NSEC3PARAM_RECORDS[i][1]); // RRtype + param.push_back(TEST_NSEC3PARAM_RECORDS[i][2]); // TTL + param.push_back(""); // sigtype, unused + param.push_back(TEST_NSEC3PARAM_RECORDS[i][4]); // RDATA + param.push_back(TEST_NSEC3PARAM_RECORDS[i][0]); // owner name + (*readonly_records_)[param.back()].push_back(param); + } } }; @@ -1324,6 +1336,36 @@ public: addRecordToZone(columns); } + // We don't add NSEC3s until we are explicitly told we need them + // in enableNSEC3(); these would break some non NSEC3 tests. + commit(); + } + + void enableNSEC3() { + startUpdateZone("example.org.", false); + + // Add NSECPARAM at the zone origin + for (int i = 0; TEST_NSEC3PARAM_RECORDS[i][0] != NULL; ++i) { + const string param_columns[ADD_COLUMN_COUNT] = { + TEST_NSEC3PARAM_RECORDS[i][0], // name + Name(param_columns[ADD_NAME]).reverse().toText(), // revname + TEST_NSEC3PARAM_RECORDS[i][2], // TTL + TEST_NSEC3PARAM_RECORDS[i][1], // RR type + TEST_NSEC3PARAM_RECORDS[i][3], // sigtype + TEST_NSEC3PARAM_RECORDS[i][4] }; // RDATA + addRecordToZone(param_columns); + } + + // Add NSEC3s + for (int i = 0; TEST_NSEC3_RECORDS[i][0] != NULL; ++i) { + const string nsec3_columns[ADD_NSEC3_COLUMN_COUNT] = { + Name(TEST_NSEC3_RECORDS[i][0]).split(0, 1).toText(true), + TEST_NSEC3_RECORDS[i][2], // TTL + TEST_NSEC3_RECORDS[i][1], // RR type + TEST_NSEC3_RECORDS[i][4] }; // RDATA + addNSEC3RecordToZone(nsec3_columns); + } + commit(); } }; @@ -2171,6 +2213,48 @@ TYPED_TEST(DatabaseClientTest, findDelegation) { DataSourceError); } +TYPED_TEST(DatabaseClientTest, findDS) { + // Type DS query is an exception to the general delegation case; the NS + // should be ignored and it should be treated just like normal + // authoritative data. + + boost::shared_ptr finder(this->getFinder()); + + // DS exists at the delegation point. It should be returned with result + // code of SUCCESS. + this->expected_rdatas_.push_back("1 1 2 abcd"), + this->expected_sig_rdatas_.push_back("DS 5 3 3600 20000101000000 " + "20000201000000 12345 example.org. " + "FAKEFAKEFAKE"); + doFindTest(*finder, Name("delegation.example.org."), + RRType::DS(), RRType::DS(), this->rrttl_, ZoneFinder::SUCCESS, + this->expected_rdatas_, this->expected_sig_rdatas_, + ZoneFinder::RESULT_DEFAULT); + + // DS doesn't exist at the delegation point. The result should be + // NXRRSET, and if DNSSEC is requested and the zone is NSEC-signed, + // the corresponding NSEC should be returned (normally with its RRSIG, + // but in this simplified test setup it's omitted in the test data). + this->expected_rdatas_.clear(); + this->expected_rdatas_.push_back("dummy.example.org. NS NSEC"); + this->expected_sig_rdatas_.clear(); + doFindTest(*finder, Name("insecdelegation.example.org."), + RRType::DS(), RRType::NSEC(), this->rrttl_, ZoneFinder::NXRRSET, + this->expected_rdatas_, this->expected_sig_rdatas_, + ZoneFinder::RESULT_NSEC_SIGNED, + Name("insecdelegation.example.org."), ZoneFinder::FIND_DNSSEC); + + // Some insane case: DS under a zone cut. It's included in the DB, but + // shouldn't be visible via finder. + this->expected_rdatas_.clear(); + this->expected_rdatas_.push_back("ns.example.com"); + doFindTest(*finder, Name("child.insecdelegation.example.org"), + RRType::DS(), RRType::NS(), this->rrttl_, + ZoneFinder::DELEGATION, this->expected_rdatas_, + this->empty_rdatas_, ZoneFinder::RESULT_DEFAULT, + Name("insecdelegation.example.org."), ZoneFinder::FIND_DNSSEC); +} + TYPED_TEST(DatabaseClientTest, emptyDomain) { boost::shared_ptr finder(this->getFinder()); @@ -3538,33 +3622,6 @@ TYPED_TEST(DatabaseClientTest, compoundUpdate) { this->empty_rdatas_); } -TYPED_TEST(DatabaseClientTest, previous) { - boost::shared_ptr finder(this->getFinder()); - - EXPECT_EQ(Name("www.example.org."), - finder->findPreviousName(Name("www2.example.org."))); - // Check a name that doesn't exist there - EXPECT_EQ(Name("www.example.org."), - finder->findPreviousName(Name("www1.example.org."))); - if (this->is_mock_) { // We can't really force the DB to throw - // Check it doesn't crash or anything if the underlying DB throws - DataSourceClient::FindResult - zone(this->client_->findZone(Name("bad.example.org"))); - finder = - dynamic_pointer_cast(zone.zone_finder); - - EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")), - isc::NotImplemented); - } else { - // No need to test this on mock one, because we test only that - // the exception gets through - - // A name before the origin - EXPECT_THROW(finder->findPreviousName(Name("example.com")), - isc::NotImplemented); - } -} - TYPED_TEST(DatabaseClientTest, invalidRdata) { boost::shared_ptr finder(this->getFinder()); @@ -3592,13 +3649,6 @@ TEST_F(MockDatabaseClientTest, missingNSEC) { this->expected_rdatas_, this->expected_sig_rdatas_); } -TEST_F(MockDatabaseClientTest, badName) { - boost::shared_ptr finder(this->getFinder()); - - EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")), - DataSourceError); -} - /* * Test correct use of the updater with a journal. */ @@ -3975,11 +4025,11 @@ TEST_F(MockDatabaseClientTest, journalWithBadData) { } /// Let us test a little bit of NSEC3. -TEST_F(MockDatabaseClientTest, findNSEC3) { +TYPED_TEST(DatabaseClientTest, findNSEC3) { // Set up the faked hash calculator. - setNSEC3HashCreator(&test_nsec3_hash_creator_); + setNSEC3HashCreator(&this->test_nsec3_hash_creator_); - DataSourceClient::FindResult + const DataSourceClient::FindResult zone(this->client_->findZone(Name("example.org"))); ASSERT_EQ(result::SUCCESS, zone.code); boost::shared_ptr finder( diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc index e98f9bcc68..2031d50842 100644 --- a/src/lib/datasrc/tests/factory_unittest.cc +++ b/src/lib/datasrc/tests/factory_unittest.cc @@ -28,6 +28,8 @@ using namespace isc::datasrc; using namespace isc::data; std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3"; +const std::string STATIC_DS_FILE = TEST_DATA_DIR "/static.zone"; +const std::string ROOT_ZONE_FILE = TEST_DATA_DIR "/root.zone"; namespace { @@ -188,8 +190,8 @@ TEST(FactoryTest, memoryClient) { DataSourceError); config->set("type", Element::create("memory")); - ASSERT_THROW(DataSourceClientContainer("memory", config), - DataSourceError); + // no config at all should result in a default empty memory client + ASSERT_NO_THROW(DataSourceClientContainer("memory", config)); config->set("class", ElementPtr()); ASSERT_THROW(DataSourceClientContainer("memory", config), @@ -204,8 +206,7 @@ TEST(FactoryTest, memoryClient) { DataSourceError); config->set("class", Element::create("IN")); - ASSERT_THROW(DataSourceClientContainer("memory", config), - DataSourceError); + ASSERT_NO_THROW(DataSourceClientContainer("memory", config)); config->set("zones", ElementPtr()); ASSERT_THROW(DataSourceClientContainer("memory", config), @@ -236,5 +237,59 @@ TEST(FactoryTest, badType) { DataSourceError); } +// Check the static data source can be loaded. +TEST(FactoryTest, staticDS) { + // The only configuration is the file to load. + const ConstElementPtr config(new StringElement(STATIC_DS_FILE)); + // Get the data source + DataSourceClientContainer dsc("static", config); + // And try getting something out to see if it really works. + DataSourceClient::FindResult + result(dsc.getInstance().findZone(isc::dns::Name("BIND"))); + ASSERT_EQ(result::SUCCESS, result.code); + EXPECT_EQ(isc::dns::Name("BIND"), result.zone_finder->getOrigin()); + EXPECT_EQ(isc::dns::RRClass::CH(), result.zone_finder->getClass()); + const isc::dns::ConstRRsetPtr + version(result.zone_finder->find(isc::dns::Name("VERSION.BIND"), + isc::dns::RRType::TXT())->rrset); + ASSERT_NE(isc::dns::ConstRRsetPtr(), version); + EXPECT_EQ(isc::dns::Name("VERSION.BIND"), version->getName()); + EXPECT_EQ(isc::dns::RRClass::CH(), version->getClass()); + EXPECT_EQ(isc::dns::RRType::TXT(), version->getType()); +} + +// Check that file not containing BIND./CH is rejected +// +// FIXME: This test is disabled because the InMemoryZoneFinder::load does +// not check if the data loaded correspond with the origin. The static +// factory is not the place to fix that. +TEST(FactoryTest, DISABLED_staticDSBadFile) { + // The only configuration is the file to load. + const ConstElementPtr config(new StringElement(STATIC_DS_FILE)); + // See it does not want the file + EXPECT_THROW(DataSourceClientContainer("static", config), DataSourceError); +} + +// Check that some bad configs are rejected +TEST(FactoryTest, staticDSBadConfig) { + const char* configs[] = { + // The file does not exist + "\"/does/not/exist\"", + // Bad types + "null", + "42", + "{}", + "[]", + "true", + NULL + }; + for (const char** config(configs); *config; ++config) { + SCOPED_TRACE(*config); + EXPECT_THROW(DataSourceClientContainer("static", + Element::fromJSON(*config)), + DataSourceError); + } +} + } // end anonymous namespace diff --git a/src/lib/datasrc/tests/faked_nsec3.cc b/src/lib/datasrc/tests/faked_nsec3.cc index 0a1823b1be..1e37b8ebbc 100644 --- a/src/lib/datasrc/tests/faked_nsec3.cc +++ b/src/lib/datasrc/tests/faked_nsec3.cc @@ -130,7 +130,7 @@ performNSEC3Test(ZoneFinder &finder) { EXPECT_THROW(finder.findNSEC3(Name("example.com"), false), OutOfZone); EXPECT_THROW(finder.findNSEC3(Name("org"), true), OutOfZone); - Name origin("example.org"); + const Name origin("example.org"); const string apex_nsec3_text = string(apex_hash) + ".example.org." + string(nsec3_common); const string ns1_nsec3_text = string(ns1_hash) + ".example.org." + diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc index 07d1fb9605..580f7ff5e7 100644 --- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc +++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc @@ -373,18 +373,30 @@ protected: // expected_flags is set to either RESULT_NSEC_SIGNED or // RESULT_NSEC3_SIGNED when it's NSEC/NSEC3 signed respectively and // find() is expected to set the corresponding flags. + // find_options should be set to FIND_DNSSEC for NSEC-signed case when + // NSEC is expected to be returned. void findCheck(ZoneFinder::FindResultFlags expected_flags = - ZoneFinder::RESULT_DEFAULT); + ZoneFinder::RESULT_DEFAULT, + ZoneFinder::FindOptions find_options = + ZoneFinder::FIND_DEFAULT); void emptyNodeCheck(ZoneFinder::FindResultFlags expected_flags = ZoneFinder::RESULT_DEFAULT); void wildcardCheck(ZoneFinder::FindResultFlags expected_flags = - ZoneFinder::RESULT_DEFAULT); + ZoneFinder::RESULT_DEFAULT, + ZoneFinder::FindOptions find_options = + ZoneFinder::FIND_DEFAULT); void doCancelWildcardCheck(ZoneFinder::FindResultFlags expected_flags = - ZoneFinder::RESULT_DEFAULT); + ZoneFinder::RESULT_DEFAULT, + ZoneFinder::FindOptions find_options = + ZoneFinder::FIND_DEFAULT); void anyWildcardCheck(ZoneFinder::FindResultFlags expected_flags = ZoneFinder::RESULT_DEFAULT); void emptyWildcardCheck(ZoneFinder::FindResultFlags expected_flags = ZoneFinder::RESULT_DEFAULT); + void findNSECENTCheck(const Name& ent_name, + ConstRRsetPtr expected_nsec, + ZoneFinder::FindResultFlags expected_flags = + ZoneFinder::RESULT_DEFAULT); public: InMemoryZoneFinderTest() : @@ -441,8 +453,23 @@ public: {"0P9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM.example.org. 300 IN " "NSEC3 1 1 12 aabbccdd 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG", &rr_nsec3_}, - {"example.org. 300 IN NSEC cname.example.org. A NS NSEC", - &rr_nsec_}, + {"example.org. 300 IN NSEC wild.*.foo.example.org. " + "NS SOA RRSIG NSEC DNSKEY", &rr_nsec_}, + // Together with the apex NSEC, these next NSECs make a complete + // chain in the case of the zone for the emptyNonterminal tests + // (We may want to clean up this generator code and/or masterLoad + // so that we can prepare conflicting datasets better) + {"wild.*.foo.example.org. 3600 IN NSEC ns.example.org. " + "A RRSIG NSEC", &rr_ent_nsec2_}, + {"ns.example.org. 3600 IN NSEC foo.wild.example.org. A RRSIG NSEC", + &rr_ent_nsec3_}, + {"foo.wild.example.org. 3600 IN NSEC example.org. A RRSIG NSEC", + &rr_ent_nsec4_}, + // And these are NSECs used in different tests + {"ns.example.org. 300 IN NSEC *.nswild.example.org. A AAAA NSEC", + &rr_ns_nsec_}, + {"*.wild.example.org. 300 IN NSEC foo.wild.example.org. A NSEC", + &rr_wild_nsec_}, {NULL, NULL} }; @@ -508,6 +535,11 @@ public: RRsetPtr rr_not_wild_another_; RRsetPtr rr_nsec3_; RRsetPtr rr_nsec_; + RRsetPtr rr_ent_nsec2_; + RRsetPtr rr_ent_nsec3_; + RRsetPtr rr_ent_nsec4_; + RRsetPtr rr_ns_nsec_; + RRsetPtr rr_wild_nsec_; // A faked NSEC3 hash calculator for convenience. // Tests that need to use the faked hashed values should call @@ -650,14 +682,6 @@ public: } }; -/** - * \brief Check that findPreviousName throws as it should now. - */ -TEST_F(InMemoryZoneFinderTest, findPreviousName) { - EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")), - isc::NotImplemented); -} - /** * \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor. * @@ -977,7 +1001,9 @@ TEST_F(InMemoryZoneFinderTest, glue) { * directly there, it just tells it doesn't exist. */ void -InMemoryZoneFinderTest::findCheck(ZoneFinder::FindResultFlags expected_flags) { +InMemoryZoneFinderTest::findCheck(ZoneFinder::FindResultFlags expected_flags, + ZoneFinder::FindOptions find_options) +{ // Fill some data inside // Now put all the data we have there. It should throw nothing EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_))); @@ -996,17 +1022,44 @@ InMemoryZoneFinderTest::findCheck(ZoneFinder::FindResultFlags expected_flags) { findTest(rr_ns_a_->getName(), RRType::A(), ZoneFinder::SUCCESS, true, rr_ns_a_); - // These domain exist but don't have the provided RRType - findTest(origin_, RRType::AAAA(), ZoneFinder::NXRRSET, true, - ConstRRsetPtr(), expected_flags); - findTest(rr_ns_a_->getName(), RRType::NS(), ZoneFinder::NXRRSET, true, - ConstRRsetPtr(), expected_flags); + // These domains don't exist. (and one is out of the zone). In an + // NSEC-signed zone with DNSSEC records requested, it should return the + // covering NSEC for the query name (the actual NSEC in the test data may + // not really "cover" it, but for the purpose of this test it's okay). + ConstRRsetPtr expected_nsec; // by default it's NULL + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0 && + (find_options & ZoneFinder::FIND_DNSSEC) != 0) { + expected_nsec = rr_nsec_; + } - // These domains don't exist (and one is out of the zone) - findTest(Name("nothere.example.org"), RRType::A(), ZoneFinder::NXDOMAIN, - true, ConstRRsetPtr(), expected_flags); + // There's no other name between this one and the origin, so when NSEC + // is to be returned it should be the origin NSEC. + findTest(Name("nothere.example.org"), RRType::A(), + ZoneFinder::NXDOMAIN, true, expected_nsec, expected_flags, + NULL, find_options); + + // The previous name in the zone is "ns.example.org", but it doesn't + // have an NSEC. It should be skipped and the origin NSEC will be + // returned as the "closest NSEC". + findTest(Name("nxdomain.example.org"), RRType::A(), + ZoneFinder::NXDOMAIN, true, expected_nsec, expected_flags, + NULL, find_options); EXPECT_THROW(zone_finder_.find(Name("example.net"), RRType::A()), OutOfZone); + + // These domain exist but don't have the provided RRType. For the latter + // one we now add its NSEC (which was delayed so that it wouldn't break + // other cases above). + findTest(origin_, RRType::AAAA(), ZoneFinder::NXRRSET, true, + expected_nsec, expected_flags, NULL, find_options); + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0) { + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_nsec_)); + if ((find_options & ZoneFinder::FIND_DNSSEC) != 0) { + expected_nsec = rr_ns_nsec_; + } + } + findTest(rr_ns_a_->getName(), RRType::NS(), ZoneFinder::NXRRSET, true, + expected_nsec, expected_flags, NULL, find_options); } TEST_F(InMemoryZoneFinderTest, find) { @@ -1017,10 +1070,74 @@ TEST_F(InMemoryZoneFinderTest, findNSEC3Signed) { findCheck(ZoneFinder::RESULT_NSEC3_SIGNED); } +TEST_F(InMemoryZoneFinderTest, findNSEC3SignedWithDNSSEC) { + // For NSEC3-signed zones, specifying the DNSSEC option shouldn't change + // anything (the NSEC3_SIGNED flag is always set, and no records are + // returned for negative cases regardless). + findCheck(ZoneFinder::RESULT_NSEC3_SIGNED, ZoneFinder::FIND_DNSSEC); +} + TEST_F(InMemoryZoneFinderTest, findNSECSigned) { + // NSEC-signed zone, without requesting DNSSEC (no NSEC should be provided) findCheck(ZoneFinder::RESULT_NSEC_SIGNED); } +// Generalized test for Empty Nonterminal (ENT) cases with NSEC +void +InMemoryZoneFinderTest::findNSECENTCheck(const Name& ent_name, + ConstRRsetPtr expected_nsec, + ZoneFinder::FindResultFlags expected_flags) +{ + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_emptywild_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_under_wild_)); + + // Sanity check: Should result in NXRRSET + findTest(ent_name, RRType::A(), ZoneFinder::NXRRSET, true, + ConstRRsetPtr(), expected_flags); + // Sanity check: No NSEC added yet + findTest(ent_name, RRType::A(), ZoneFinder::NXRRSET, true, + ConstRRsetPtr(), expected_flags, + NULL, ZoneFinder::FIND_DNSSEC); + + // Now add the NSEC rrs making it a 'complete' zone (in terms of NSEC, + // there are no sigs) + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_nsec_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ent_nsec2_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ent_nsec3_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ent_nsec4_)); + + // Should result in NXRRSET, and RESULT_NSEC_SIGNED + findTest(ent_name, RRType::A(), ZoneFinder::NXRRSET, true, + ConstRRsetPtr(), + expected_flags | ZoneFinder::RESULT_NSEC_SIGNED); + + // And check for the NSEC if DNSSEC_OK + findTest(ent_name, RRType::A(), ZoneFinder::NXRRSET, true, + expected_nsec, expected_flags | ZoneFinder::RESULT_NSEC_SIGNED, + NULL, ZoneFinder::FIND_DNSSEC); +} + +TEST_F(InMemoryZoneFinderTest,findNSECEmptyNonterminal) { + // Non-wildcard case + findNSECENTCheck(Name("wild.example.org"), rr_ent_nsec3_); +} + +TEST_F(InMemoryZoneFinderTest,findNSECEmptyNonterminalWildcard) { + // Wildcard case, above actual wildcard + findNSECENTCheck(Name("foo.example.org"), rr_nsec_); +} + +TEST_F(InMemoryZoneFinderTest,findNSECEmptyNonterminalAtWildcard) { + // Wildcard case, at actual wildcard + findNSECENTCheck(Name("bar.foo.example.org"), rr_nsec_, + ZoneFinder::RESULT_WILDCARD); +} + +TEST_F(InMemoryZoneFinderTest, findNSECSignedWithDNSSEC) { + // NSEC-signed zone, requesting DNSSEC (NSEC should be provided) + findCheck(ZoneFinder::RESULT_NSEC_SIGNED, ZoneFinder::FIND_DNSSEC); +} + void InMemoryZoneFinderTest::emptyNodeCheck( ZoneFinder::FindResultFlags expected_flags) @@ -1183,7 +1300,8 @@ TEST_F(InMemoryZoneFinderTest, loadFromIterator) { */ void InMemoryZoneFinderTest::wildcardCheck( - ZoneFinder::FindResultFlags expected_flags) + ZoneFinder::FindResultFlags expected_flags, + ZoneFinder::FindOptions find_options) { /* * example.org. @@ -1195,7 +1313,6 @@ InMemoryZoneFinderTest::wildcardCheck( // If the zone is "signed" (detecting it by the NSEC/NSEC3 signed flags), // add RRSIGs to the records. - ZoneFinder::FindOptions find_options = ZoneFinder::FIND_DEFAULT; if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0 || (expected_flags & ZoneFinder::RESULT_NSEC3_SIGNED) != 0) { // Convenience shortcut. The RDATA is not really validatable, but @@ -1225,10 +1342,31 @@ InMemoryZoneFinderTest::wildcardCheck( // be in the wildcard (so check the wildcard isn't matched at the parent) { SCOPED_TRACE("Search at parent"); - findTest(Name("wild.example.org"), RRType::A(), ZoneFinder::NXRRSET, - true, ConstRRsetPtr(), expected_flags, NULL, find_options); + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0) { + findTest(Name("wild.example.org"), RRType::A(), + ZoneFinder::NXRRSET, true, rr_nsec_, expected_flags, + NULL, find_options); + } else { + findTest(Name("wild.example.org"), RRType::A(), + ZoneFinder::NXRRSET, true, ConstRRsetPtr(), + expected_flags, NULL, find_options); + } } + // For the test setup of "NSEC-signed" zone, we might expect it will + // be returned with a negative result, either because wildcard match is + // disabled by the search option or because wildcard match is canceled + // per protocol. + ConstRRsetPtr expected_nsec; // by default it's NULL + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0 && + (find_options & ZoneFinder::FIND_DNSSEC) != 0) { + expected_nsec = rr_nsec_; + } + // Explicitly converting the following to const pointers; some compilers + // would complain about mixed use of const and non const in ?: below. + const ConstRRsetPtr rr_wild = rr_wild_; + const ConstRRsetPtr rr_cnamewild = rr_cnamewild_; + // Search the original name of wildcard { SCOPED_TRACE("Search directly at *"); @@ -1236,45 +1374,70 @@ InMemoryZoneFinderTest::wildcardCheck( true, rr_wild_, ZoneFinder::RESULT_DEFAULT, NULL, find_options); } + + // Below some of the test cases will normally result in a wildcard match; + // if NO_WILDCARD is specified, it should result in NXDOMAIN instead, + // and, when available and requested, the covering NSEC will be returned. + // The following are shortcut parameters to unify these cases. + const bool wild_ok = ((find_options & ZoneFinder::NO_WILDCARD) == 0); + const ZoneFinder::FindResultFlags wild_expected_flags = + wild_ok ? (ZoneFinder::RESULT_WILDCARD | expected_flags) : + expected_flags; + // Search "created" name. { SCOPED_TRACE("Search at created child"); - findTest(Name("a.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS, - false, rr_wild_, - ZoneFinder::RESULT_WILDCARD | expected_flags, NULL, - find_options, true); - // Wildcard match, but no data - findTest(Name("a.wild.example.org"), RRType::AAAA(), - ZoneFinder::NXRRSET, true, ConstRRsetPtr(), - ZoneFinder::RESULT_WILDCARD | expected_flags, NULL, - find_options); + findTest(Name("a.wild.example.org"), RRType::A(), + wild_ok ? ZoneFinder::SUCCESS : ZoneFinder::NXDOMAIN, false, + wild_ok ? rr_wild : expected_nsec, + wild_expected_flags, NULL, find_options, wild_ok); } // Search name that has CNAME. { SCOPED_TRACE("Matching CNAME"); findTest(Name("a.cnamewild.example.org"), RRType::A(), - ZoneFinder::CNAME, false, rr_cnamewild_, - ZoneFinder::RESULT_WILDCARD | expected_flags, NULL, - find_options, true); + wild_ok ? ZoneFinder::CNAME : ZoneFinder::NXDOMAIN, false, + wild_ok ? rr_cnamewild : expected_nsec, + wild_expected_flags, NULL, find_options, wild_ok); } // Search another created name, this time little bit lower { SCOPED_TRACE("Search at created grand-child"); findTest(Name("a.b.wild.example.org"), RRType::A(), - ZoneFinder::SUCCESS, false, rr_wild_, - ZoneFinder::RESULT_WILDCARD | expected_flags, NULL, - find_options, true); + wild_ok ? ZoneFinder::SUCCESS : ZoneFinder::NXDOMAIN, false, + wild_ok ? rr_wild : expected_nsec, + wild_expected_flags, NULL, find_options, wild_ok); } EXPECT_EQ(SUCCESS, zone_finder_.add(rr_under_wild_)); { SCOPED_TRACE("Search under non-wildcard"); findTest(Name("bar.foo.wild.example.org"), RRType::A(), - ZoneFinder::NXDOMAIN, true, ConstRRsetPtr(), expected_flags, + ZoneFinder::NXDOMAIN, true, expected_nsec, expected_flags, NULL, find_options); } + + // Wildcard match, but no data. We add the additional NSEC at the wildcard + // at this point so that it wouldn't break other tests above. Note also + // that in the NO_WILDCARD case the resulting NSEC is the same. Ideally + // we could use a more tricky setup so we can distinguish these cases, + // but for this purpose it's not bad; what we'd like to test here is that + // wildcard substitution doesn't happen for either case, and the + // NO_WILDCARD effect itself can be checked by the result code (NXDOMAIN). + ConstRRsetPtr expected_wild_nsec; // by default it's NULL + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0) { + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_nsec_)); + expected_wild_nsec = rr_wild_nsec_; + } + { + SCOPED_TRACE("Search at wildcard, no data"); + findTest(Name("a.wild.example.org"), RRType::AAAA(), + wild_ok ? ZoneFinder::NXRRSET : ZoneFinder::NXDOMAIN, true, + wild_ok ? expected_wild_nsec : expected_wild_nsec, + wild_expected_flags, NULL, find_options); + } } TEST_F(InMemoryZoneFinderTest, wildcard) { @@ -1288,10 +1451,22 @@ TEST_F(InMemoryZoneFinderTest, wildcardNSEC3) { } TEST_F(InMemoryZoneFinderTest, wildcardNSEC) { - // Similar to the previous one, but the zone signed with NSEC + // Similar to the previous one, but the zone is signed with NSEC wildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED); } +TEST_F(InMemoryZoneFinderTest, wildcardDisabledWithNSEC) { + // Wildcard is disabled. In practice, this is used as part of query + // processing for an NSEC-signed zone, so we test that case specifically. + wildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED, ZoneFinder::NO_WILDCARD); +} + +TEST_F(InMemoryZoneFinderTest, wildcardDisabledWithoutNSEC) { + // Similar to the previous once, but check the behavior for a non signed + // zone just in case. + wildcardCheck(ZoneFinder::RESULT_DEFAULT, ZoneFinder::NO_WILDCARD); +} + /* * Test that we don't match a wildcard if we get under delegation. * By 4.3.3 of RFC1034: @@ -1495,15 +1670,29 @@ TEST_F(InMemoryZoneFinderTest, nestedEmptyWildcard) { // situations void InMemoryZoneFinderTest::doCancelWildcardCheck( - ZoneFinder::FindResultFlags expected_flags) + ZoneFinder::FindResultFlags expected_flags, + ZoneFinder::FindOptions find_options) { // These should be canceled { SCOPED_TRACE("Canceled under foo.wild.example.org"); + + // For an NSEC-signed zone with DNSSEC requested, the covering NSEC + // should be returned. The expected NSEC is actually just the only + // NSEC in the test data, but in this context it doesn't matter; + // it's sufficient just to check any NSEC is returned (or not). + ConstRRsetPtr expected_nsec; // by default it's NULL + if ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0 && + (find_options & ZoneFinder::FIND_DNSSEC)) { + expected_nsec = rr_nsec_; + } + findTest(Name("aaa.foo.wild.example.org"), RRType::A(), - ZoneFinder::NXDOMAIN, true, ConstRRsetPtr(), expected_flags); + ZoneFinder::NXDOMAIN, true, expected_nsec, expected_flags, + NULL, find_options); findTest(Name("zzz.foo.wild.example.org"), RRType::A(), - ZoneFinder::NXDOMAIN, true, ConstRRsetPtr(), expected_flags); + ZoneFinder::NXDOMAIN, true, expected_nsec, expected_flags, + NULL, find_options); } // This is existing, non-wildcard domain, shouldn't wildcard at all @@ -1571,6 +1760,7 @@ TEST_F(InMemoryZoneFinderTest, cancelWildcard) { } } +// Same tests as cancelWildcard for NSEC3-signed zone TEST_F(InMemoryZoneFinderTest, cancelWildcardNSEC3) { EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_)); EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_)); @@ -1587,6 +1777,29 @@ TEST_F(InMemoryZoneFinderTest, cancelWildcardNSEC3) { } } +// Same tests as cancelWildcard for NSEC-signed zone. Check both cases with +// or without FIND_DNSSEC option. NSEC should be returned only when the option +// is given. +TEST_F(InMemoryZoneFinderTest, cancelWildcardNSEC) { + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_)); + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_nsec_)); + + { + SCOPED_TRACE("Runnig with single entry under foo.wild.example.org"); + doCancelWildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED, + ZoneFinder::FIND_DNSSEC); + doCancelWildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED); + } + EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_another_)); + { + SCOPED_TRACE("Runnig with two entries under foo.wild.example.org"); + doCancelWildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED, + ZoneFinder::FIND_DNSSEC); + doCancelWildcardCheck(ZoneFinder::RESULT_NSEC_SIGNED); + } +} + TEST_F(InMemoryZoneFinderTest, loadBadWildcard) { // We reject loading the zone if it contains a wildcard name for // NS or DNAME. diff --git a/src/lib/datasrc/tests/rbtree_unittest.cc b/src/lib/datasrc/tests/rbtree_unittest.cc index b26a22bc5f..a11bff517d 100644 --- a/src/lib/datasrc/tests/rbtree_unittest.cc +++ b/src/lib/datasrc/tests/rbtree_unittest.cc @@ -45,8 +45,8 @@ const size_t Name::MAX_LABELS; * c | g.h * | | * w.y i - * / | \ - * x | z + * / | \ \ + * x | z k * | | * p j * / \ @@ -59,7 +59,7 @@ protected: RBTreeTest() : rbtree_expose_empty_node(true), crbtnode(NULL) { const char* const domain_names[] = { "c", "b", "a", "x.d.e.f", "z.d.e.f", "g.h", "i.g.h", "o.w.y.d.e.f", - "j.z.d.e.f", "p.w.y.d.e.f", "q.w.y.d.e.f"}; + "j.z.d.e.f", "p.w.y.d.e.f", "q.w.y.d.e.f", "k.g.h"}; int name_count = sizeof(domain_names) / sizeof(domain_names[0]); for (int i = 0; i < name_count; ++i) { rbtree.insert(Name(domain_names[i]), &rbtnode); @@ -79,7 +79,7 @@ protected: TEST_F(RBTreeTest, getNodeCount) { - EXPECT_EQ(13, rbtree.getNodeCount()); + EXPECT_EQ(14, rbtree.getNodeCount()); } TEST_F(RBTreeTest, setGetData) { @@ -91,46 +91,46 @@ TEST_F(RBTreeTest, insertNames) { EXPECT_EQ(RBTree::ALREADYEXISTS, rbtree.insert(Name("d.e.f"), &rbtnode)); EXPECT_EQ(Name("d.e.f"), rbtnode->getName()); - EXPECT_EQ(13, rbtree.getNodeCount()); + EXPECT_EQ(14, rbtree.getNodeCount()); //insert not exist node EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("."), &rbtnode)); EXPECT_EQ(Name("."), rbtnode->getName()); - EXPECT_EQ(14, rbtree.getNodeCount()); + EXPECT_EQ(15, rbtree.getNodeCount()); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("example.com"), &rbtnode)); - EXPECT_EQ(15, rbtree.getNodeCount()); + EXPECT_EQ(16, rbtree.getNodeCount()); rbtnode->setData(RBNode::NodeDataPtr(new int(12))); // return ALREADYEXISTS, since node "example.com" already has been explicitly inserted EXPECT_EQ(RBTree::ALREADYEXISTS, rbtree.insert(Name("example.com"), &rbtnode)); - EXPECT_EQ(15, rbtree.getNodeCount()); + EXPECT_EQ(16, rbtree.getNodeCount()); // split the node "d.e.f" EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("k.e.f"), &rbtnode)); EXPECT_EQ(Name("k"), rbtnode->getName()); - EXPECT_EQ(17, rbtree.getNodeCount()); + EXPECT_EQ(18, rbtree.getNodeCount()); // split the node "g.h" EXPECT_EQ(RBTree::ALREADYEXISTS, rbtree.insert(Name("h"), &rbtnode)); EXPECT_EQ(Name("h"), rbtnode->getName()); - EXPECT_EQ(18, rbtree.getNodeCount()); + EXPECT_EQ(19, rbtree.getNodeCount()); // add child domain EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("m.p.w.y.d.e.f"), &rbtnode)); EXPECT_EQ(Name("m"), rbtnode->getName()); - EXPECT_EQ(19, rbtree.getNodeCount()); + EXPECT_EQ(20, rbtree.getNodeCount()); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("n.p.w.y.d.e.f"), &rbtnode)); EXPECT_EQ(Name("n"), rbtnode->getName()); - EXPECT_EQ(20, rbtree.getNodeCount()); + EXPECT_EQ(21, rbtree.getNodeCount()); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("l.a"), &rbtnode)); EXPECT_EQ(Name("l"), rbtnode->getName()); - EXPECT_EQ(21, rbtree.getNodeCount()); + EXPECT_EQ(22, rbtree.getNodeCount()); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("r.d.e.f"), &rbtnode)); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("s.d.e.f"), &rbtnode)); - EXPECT_EQ(23, rbtree.getNodeCount()); + EXPECT_EQ(24, rbtree.getNodeCount()); EXPECT_EQ(RBTree::SUCCESS, rbtree.insert(Name("h.w.y.d.e.f"), &rbtnode)); @@ -180,10 +180,10 @@ TEST_F(RBTreeTest, findName) { TEST_F(RBTreeTest, findError) { // For the version that takes a node chain, the chain must be empty. RBTreeNodeChain chain; - EXPECT_EQ(RBTree::EXACTMATCH, rbtree.find(Name("a"), &crbtnode, - chain, NULL, NULL)); + EXPECT_EQ(RBTree::EXACTMATCH, rbtree.find(Name("a"), &crbtnode, + chain)); // trying to reuse the same chain. it should result in an exception. - EXPECT_THROW(rbtree.find(Name("a"), &crbtnode, chain, NULL, NULL), + EXPECT_THROW(rbtree.find(Name("a"), &crbtnode, chain), BadValue); } @@ -280,7 +280,7 @@ TEST_F(RBTreeTest, chainLevel) { Name node_name(Name::ROOT_NAME()); EXPECT_EQ(RBTree::SUCCESS, tree.insert(node_name, &rbtnode)); EXPECT_EQ(RBTree::EXACTMATCH, - tree.find(node_name, &crbtnode, chain, NULL, NULL)); + tree.find(node_name, &crbtnode, chain)); EXPECT_EQ(1, chain.getLevelCount()); /* @@ -303,8 +303,7 @@ TEST_F(RBTreeTest, chainLevel) { EXPECT_EQ(RBTree::SUCCESS, tree.insert(node_name, &rbtnode)); RBTreeNodeChain found_chain; EXPECT_EQ(RBTree::EXACTMATCH, - tree.find(node_name, &crbtnode, found_chain, - NULL, NULL)); + tree.find(node_name, &crbtnode, found_chain)); EXPECT_EQ(i, found_chain.getLevelCount()); } @@ -324,7 +323,7 @@ TEST_F(RBTreeTest, getAbsoluteNameError) { /* *the domain order should be: * a, b, c, d.e.f, x.d.e.f, w.y.d.e.f, o.w.y.d.e.f, p.w.y.d.e.f, q.w.y.d.e.f, - * z.d.e.f, j.z.d.e.f, g.h, i.g.h + * z.d.e.f, j.z.d.e.f, g.h, i.g.h, k.g.h * b * / \ * a d.e.f @@ -332,23 +331,24 @@ TEST_F(RBTreeTest, getAbsoluteNameError) { * c | g.h * | | * w.y i - * / | \ - * x | z + * / | \ \ + * x | z k * | | * p j * / \ * o q */ +const char* const names[] = { + "a", "b", "c", "d.e.f", "x.d.e.f", "w.y.d.e.f", "o.w.y.d.e.f", + "p.w.y.d.e.f", "q.w.y.d.e.f", "z.d.e.f", "j.z.d.e.f", + "g.h", "i.g.h", "k.g.h"}; +const size_t name_count(sizeof(names) / sizeof(*names)); + TEST_F(RBTreeTest, nextNode) { - const char* const names[] = { - "a", "b", "c", "d.e.f", "x.d.e.f", "w.y.d.e.f", "o.w.y.d.e.f", - "p.w.y.d.e.f", "q.w.y.d.e.f", "z.d.e.f", "j.z.d.e.f", "g.h", "i.g.h"}; - const int name_count = sizeof(names) / sizeof(names[0]); RBTreeNodeChain node_path; const RBNode* node = NULL; EXPECT_EQ(RBTree::EXACTMATCH, - rbtree.find(Name(names[0]), &node, node_path, NULL, - NULL)); + rbtree.find(Name(names[0]), &node, node_path)); for (int i = 0; i < name_count; ++i) { EXPECT_NE(static_cast(NULL), node); EXPECT_EQ(Name(names[i]), node_path.getAbsoluteName()); @@ -359,6 +359,201 @@ TEST_F(RBTreeTest, nextNode) { EXPECT_EQ(static_cast(NULL), node); } +// Just walk using previousNode until the beginning of the tree and check it is +// OK +// +// rbtree - the tree to walk +// node - result of previous call to find(), starting position of the walk +// node_path - the path from the previous call to find(), will be modified +// chain_length - the number of names that should be in the chain to be walked +// (0 means it should be empty, 3 means 'a', 'b' and 'c' should be there - +// this is always from the beginning of the names[] list). +// skip_first - if this is false, the node should already contain the node with +// the first name of the chain. If it is true, the node should be NULL +// (true is for finds that return no match, false for the ones that return +// match) +void +previousWalk(RBTree& rbtree, const RBNode* node, + RBTreeNodeChain& node_path, size_t chain_length, + bool skip_first) +{ + if (skip_first) { + // If the first is not found, this is supposed to be NULL and we skip + // it in our checks. + EXPECT_EQ(static_cast(NULL), node); + node = rbtree.previousNode(node_path); + } + for (size_t i(chain_length); i > 0; --i) { + EXPECT_NE(static_cast(NULL), node); + EXPECT_EQ(Name(names[i - 1]), node_path.getAbsoluteName()); + // Find the node at the path and check the value is the same + // (that it really returns the correct corresponding node) + // + // The "empty" nodes can not be found + if (node->getData()) { + const RBNode* node2(NULL); + RBTreeNodeChain node_path2; + EXPECT_EQ(RBTree::EXACTMATCH, + rbtree.find(Name(names[i - 1]), &node2, node_path2)); + EXPECT_EQ(node, node2); + } + node = rbtree.previousNode(node_path); + } + + // We should have reached the start of the tree. + EXPECT_EQ(static_cast(NULL), node); + + // Calling previousNode() yet again should still return NULL without + // fail. + node = rbtree.previousNode(node_path); + EXPECT_EQ(static_cast(NULL), node); +} + +// Check the previousNode +TEST_F(RBTreeTest, previousNode) { + // First, iterate the whole tree from the end to the beginning. + RBTreeNodeChain node_path; + EXPECT_THROW(rbtree.previousNode(node_path), isc::BadValue) << + "Throw before a search was done on the path"; + const RBNode* node(NULL); + { + SCOPED_TRACE("Iterate through"); + EXPECT_EQ(RBTree::EXACTMATCH, + rbtree.find(Name(names[name_count - 1]), &node, node_path)); + previousWalk(rbtree, node, node_path, name_count, false); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Iterate from the middle"); + // Now, start somewhere in the middle, but within the real node. + EXPECT_EQ(RBTree::EXACTMATCH, + rbtree.find(Name(names[4]), &node, node_path)); + previousWalk(rbtree, node, node_path, 5, false); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start at the first"); + // If we start at the lowest (which is "a"), we get to the beginning + // right away. + EXPECT_EQ(RBTree::EXACTMATCH, + rbtree.find(Name(names[0]), &node, node_path)); + EXPECT_NE(static_cast(NULL), node); + EXPECT_EQ(static_cast(NULL), rbtree.previousNode(node_path)); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start before the first"); + // If we start before the lowest (0 < a), we should not get a node nor + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("0"), &node, node_path, NULL, NULL)); + EXPECT_EQ(static_cast(NULL), node); + EXPECT_EQ(static_cast(NULL), rbtree.previousNode(node_path)); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start after the last"); + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("z"), &node, node_path)); + previousWalk(rbtree, node, node_path, name_count, true); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start below a leaf"); + // We exit a leaf by going down. We should start by the one + // we exited - 'c' (actually, we should get it by the find, as partial + // match). + EXPECT_EQ(RBTree::PARTIALMATCH, + rbtree.find(Name("b.c"), &node, node_path)); + previousWalk(rbtree, node, node_path, 3, false); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start to the right of a leaf"); + // When searching for this, we exit the 'x' node to the right side, + // so we should go x afterwards. + + // The d.e.f is empty node, so it is hidden by find. Therefore NOTFOUND + // and not PARTIALMATCH. + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("xy.d.e.f"), &node, node_path)); + previousWalk(rbtree, node, node_path, 5, true); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start to the left of a leaf"); + // This is similar to the previous, but we exit the 'z' leaf to the + // left side, so should not visit z at all then. + + // The d.e.f is empty node, so it is hidden by find. Therefore NOTFOUND + // and not PARTIALMATCH. + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("yz.d.e.f"), &node, node_path)); + previousWalk(rbtree, node, node_path, 9, true); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start to the right of a parent"); + // When searching for this, we exit the 'g.h' node to the right + // side, so we should go to g.h's children afterwards. + + // 'g.h' is an empty node, so we get a NOTFOUND and not + // PARTIALMATCH. + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("x.h"), &node, node_path)); + // 'g.h' is the COMMONANCESTOR. + EXPECT_EQ(node_path.getLastComparedNode()->getName(), Name("g.h")); + EXPECT_EQ(NameComparisonResult::COMMONANCESTOR, + node_path.getLastComparisonResult().getRelation()); + // find() exits to the right of 'g.h' + EXPECT_GT(node_path.getLastComparisonResult().getOrder(), 0); + // We then descend into 'i.g.h' and walk all the nodes in the + // tree. + previousWalk(rbtree, node, node_path, name_count, true); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Start inside a wrong node"); + // The d.e.f is a single node, but we want only part of it. We + // should start iterating before it. + EXPECT_EQ(RBTree::NOTFOUND, + rbtree.find(Name("e.f"), &node, node_path)); + previousWalk(rbtree, node, node_path, 3, true); + node = NULL; + node_path.clear(); + } + + { + SCOPED_TRACE("Lookup in empty tree"); + // Just check it doesn't crash, etc. + RBTree empty_tree; + EXPECT_EQ(RBTree::NOTFOUND, + empty_tree.find(Name("x"), &node, node_path)); + EXPECT_EQ(static_cast(NULL), node); + EXPECT_EQ(static_cast(NULL), + empty_tree.previousNode(node_path)); + node = NULL; + node_path.clear(); + } +} + TEST_F(RBTreeTest, nextNodeError) { // Empty chain for nextNode() is invalid. RBTreeNodeChain chain; @@ -394,7 +589,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { // A search for an empty tree should result in no 'last compared', too. RBTree empty_tree; EXPECT_EQ(RBTree::NOTFOUND, - empty_tree.find(Name("a"), &crbtnode, chain, NULL, NULL)); + empty_tree.find(Name("a"), &crbtnode, chain)); EXPECT_EQ(static_cast(NULL), chain.getLastComparedNode()); chain.clear(); @@ -402,8 +597,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { // Exact match case. The returned node should be last compared. EXPECT_EQ(RBTree::EXACTMATCH, - tree.find(Name("x.d.e.f"), &expected_node, chain, - NULL, NULL)); + tree.find(Name("x.d.e.f"), &expected_node, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // 2 = # labels of "x." comparisonChecks(chain, 0, 2, NameComparisonResult::EQUAL); @@ -412,12 +606,11 @@ TEST_F(RBTreeTest, getLastComparedNode) { // Partial match, search stopped at the matching node, which should be // the last compared node. EXPECT_EQ(RBTree::EXACTMATCH, - tree.find(Name("i.g.h"), &expected_node)); + tree.find(Name("k.g.h"), &expected_node)); EXPECT_EQ(RBTree::PARTIALMATCH, - tree.find(Name("x.i.g.h"), &crbtnode, chain, - NULL, NULL)); + tree.find(Name("x.k.g.h"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); - // i.g.h < x.i.g.h, 2 = # labels of "i." + // k.g.h < x.k.g.h, 2 = # labels of "k." comparisonChecks(chain, 1, 2, NameComparisonResult::SUBDOMAIN); chain.clear(); @@ -426,8 +619,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { EXPECT_EQ(RBTree::EXACTMATCH, tree.find(Name("x.d.e.f"), &expected_node)); EXPECT_EQ(RBTree::PARTIALMATCH, - tree.find(Name("a.d.e.f"), &crbtnode, chain, - NULL, NULL)); + tree.find(Name("a.d.e.f"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // a < x, 1 = # labels of "." (trailing dot) comparisonChecks(chain, -1, 1, NameComparisonResult::COMMONANCESTOR); @@ -438,8 +630,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { EXPECT_EQ(RBTree::EXACTMATCH, tree.find(Name("z.d.e.f"), &expected_node)); EXPECT_EQ(RBTree::PARTIALMATCH, - tree.find(Name("zz.d.e.f"), &crbtnode, chain, - NULL, NULL)); + tree.find(Name("zz.d.e.f"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // zz > z, 1 = # labels of "." (trailing dot) comparisonChecks(chain, 1, 1, NameComparisonResult::COMMONANCESTOR); @@ -450,8 +641,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { EXPECT_EQ(RBTree::EXACTMATCH, tree.find(Name("w.y.d.e.f"), &expected_node)); EXPECT_EQ(RBTree::PARTIALMATCH, - tree.find(Name("y.d.e.f"), &crbtnode, chain, - NULL, NULL)); + tree.find(Name("y.d.e.f"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // y < w.y, 2 = # labels of "y." comparisonChecks(chain, -1, 2, NameComparisonResult::SUPERDOMAIN); @@ -461,8 +651,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { // with the search name in the subtree below the matching node. // (the expected node is the same as the previous case) EXPECT_EQ(RBTree::PARTIALMATCH, - tree.find(Name("z.y.d.e.f"), &crbtnode, chain, - NULL, NULL)); + tree.find(Name("z.y.d.e.f"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // z.y > w.y, 2 = # labels of "y." comparisonChecks(chain, 1, 2, NameComparisonResult::COMMONANCESTOR); @@ -471,7 +660,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { // Search stops in the highest level after following a left branch. EXPECT_EQ(RBTree::EXACTMATCH, tree.find(Name("c"), &expected_node)); EXPECT_EQ(RBTree::NOTFOUND, - tree.find(Name("bb"), &crbtnode, chain, NULL, NULL)); + tree.find(Name("bb"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // bb < c, 1 = # labels of "." (trailing dot) comparisonChecks(chain, -1, 1, NameComparisonResult::COMMONANCESTOR); @@ -480,7 +669,7 @@ TEST_F(RBTreeTest, getLastComparedNode) { // Search stops in the highest level after following a right branch. // (the expected node is the same as the previous case) EXPECT_EQ(RBTree::NOTFOUND, - tree.find(Name("d"), &crbtnode, chain, NULL, NULL)); + tree.find(Name("d"), &crbtnode, chain)); EXPECT_EQ(expected_node, chain.getLastComparedNode()); // d > c, 1 = # labels of "." (trailing dot) comparisonChecks(chain, 1, 1, NameComparisonResult::COMMONANCESTOR); @@ -491,7 +680,7 @@ TEST_F(RBTreeTest, dumpTree) { std::ostringstream str; std::ostringstream str2; rbtree.dumpTree(str); - str2 << "tree has 13 node(s)\nb. (black)\n a. (black)\n NULL\n NULL\n d.e.f. (black)[invisible] \n begin down from d.e.f.\n w.y. (black)[invisible] \n begin down from w.y.\n p. (black)\n o. (red)\n NULL\n NULL\n q. (red)\n NULL\n NULL\n end down from w.y.\n x. (red)\n NULL\n NULL\n z. (red)\n begin down from z.\n j. (black)\n NULL\n NULL\n end down from z.\n NULL\n NULL\n end down from d.e.f.\n c. (red)\n NULL\n NULL\n g.h. (red)\n begin down from g.h.\n i. (black)\n NULL\n NULL\n end down from g.h.\n NULL\n NULL\n"; + str2 << "tree has 14 node(s)\nb. (black)\n a. (black)\n NULL\n NULL\n d.e.f. (black)[invisible] \n begin down from d.e.f.\n w.y. (black)[invisible] \n begin down from w.y.\n p. (black)\n o. (red)\n NULL\n NULL\n q. (red)\n NULL\n NULL\n end down from w.y.\n x. (red)\n NULL\n NULL\n z. (red)\n begin down from z.\n j. (black)\n NULL\n NULL\n end down from z.\n NULL\n NULL\n end down from d.e.f.\n c. (red)\n NULL\n NULL\n g.h. (red)\n begin down from g.h.\n i. (black)\n NULL\n k. (red)\n NULL\n NULL\n end down from g.h.\n NULL\n NULL\n"; EXPECT_EQ(str.str(), str2.str()); } diff --git a/src/lib/datasrc/tests/testdata/static.zone b/src/lib/datasrc/tests/testdata/static.zone new file mode 100644 index 0000000000..5e9e8a65bd --- /dev/null +++ b/src/lib/datasrc/tests/testdata/static.zone @@ -0,0 +1,2 @@ +BIND. 3600 CH SOA BIND. BIND. 1 3600 300 36000 3600 +VERSION.BIND. 3600 CH TXT "10" diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h index c68a01c9d1..bf4b1eac76 100644 --- a/src/lib/datasrc/zone.h +++ b/src/lib/datasrc/zone.h @@ -617,31 +617,6 @@ public: /// a matching NSEC3, in the form of \c FindNSEC3Result object. virtual FindNSEC3Result findNSEC3(const isc::dns::Name& name, bool recursive) = 0; - - /// \brief Get previous name in the zone - /// - /// Gets the previous name in the DNSSEC order. This can be used - /// to find the correct NSEC records for proving nonexistence - /// of domains. - /// - /// The concrete implementation might throw anything it thinks appropriate, - /// however it is recommended to stick to the ones listed here. The user - /// of this method should be able to handle any exceptions. - /// - /// This method does not include under-zone-cut data (glue data). - /// - /// \param query The name for which one we look for a previous one. The - /// queried name doesn't have to exist in the zone. - /// \return The preceding name - /// - /// \throw NotImplemented in case the data source backend doesn't support - /// DNSSEC or there is no previous in the zone (NSEC records might be - /// missing in the DB, the queried name is less or equal to the apex). - /// \throw DataSourceError for low-level or internal datasource errors - /// (like broken connection to database, wrong data living there). - /// \throw std::bad_alloc For allocation errors. - virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) - const = 0; //@} }; diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am index 5eada15d7b..ec169a5410 100644 --- a/src/lib/dhcp/Makefile.am +++ b/src/lib/dhcp/Makefile.am @@ -5,6 +5,12 @@ AM_CPPFLAGS += $(BOOST_INCLUDES) AM_CXXFLAGS = $(B10_CXXFLAGS) +# Some versions of GCC warn about some versions of Boost regarding +# missing initializer for members in its posix_time. +# https://svn.boost.org/trac/boost/ticket/3477 +# But older GCC compilers don't have the flag. +AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG) + CLEANFILES = *.gcno *.gcda lib_LTLIBRARIES = libdhcp++.la @@ -13,6 +19,7 @@ libdhcp___la_SOURCES += libdhcp++.cc libdhcp++.h libdhcp___la_SOURCES += iface_mgr.cc iface_mgr.h libdhcp___la_SOURCES += iface_mgr_linux.cc libdhcp___la_SOURCES += iface_mgr_bsd.cc +libdhcp___la_SOURCES += iface_mgr_sun.cc libdhcp___la_SOURCES += option.cc option.h libdhcp___la_SOURCES += option6_ia.cc option6_ia.h libdhcp___la_SOURCES += option6_iaaddr.cc option6_iaaddr.h @@ -27,4 +34,12 @@ EXTRA_DIST = README libdhcp___la_CXXFLAGS = $(AM_CXXFLAGS) libdhcp___la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES) -libdhcp___la_LIBADD = $(top_builddir)/src/lib/util/libutil.la +libdhcp___la_LIBADD = $(top_builddir)/src/lib/asiolink/libasiolink.la +libdhcp___la_LIBADD += $(top_builddir)/src/lib/util/libutil.la +libdhcp___la_LDFLAGS = -no-undefined -version-info 2:0:0 + +if USE_CLANGPP +# Disable unused parameter warning caused by some of the +# Boost headers when compiling with clang. +libdhcp___la_CXXFLAGS += -Wno-unused-parameter +endif diff --git a/src/lib/dhcp/iface_mgr.cc b/src/lib/dhcp/iface_mgr.cc index e070688109..508413dfe8 100644 --- a/src/lib/dhcp/iface_mgr.cc +++ b/src/lib/dhcp/iface_mgr.cc @@ -170,39 +170,48 @@ IfaceMgr::~IfaceMgr() { } void IfaceMgr::stubDetectIfaces() { - string ifaceName, linkLocal; + string ifaceName; + const string v4addr("127.0.0.1"), v6addr("::1"); // This is a stub implementation for interface detection. Actual detection - // is faked by reading a text file. It will eventually be removed once - // we have actual implementations for all supported systems. + // is faked by detecting loopback interface (lo or lo0). It will eventually + // be removed once we have actual implementations for all supported systems. - cout << "Interface detection is not implemented yet. " - << "Reading interfaces.txt file instead." << endl; - cout << "Please use format: interface-name link-local-address" << endl; + cout << "Interface detection is not implemented on this Operating System yet. " + << endl; try { - ifstream interfaces("interfaces.txt"); - - if (!interfaces.good()) { - cout << "interfaces.txt file is not available. Stub interface detection skipped." << endl; - return; + if (if_nametoindex("lo") > 0) { + ifaceName = "lo"; + // this is Linux-like OS + } else if (if_nametoindex("lo0") > 0) { + ifaceName = "lo0"; + // this is BSD-like OS + } else { + // we give up. What OS is this, anyway? Solaris? Hurd? + isc_throw(NotImplemented, + "Interface detection on this OS is not supported."); } - interfaces >> ifaceName; - interfaces >> linkLocal; - - cout << "Detected interface " << ifaceName << "/" << linkLocal << endl; Iface iface(ifaceName, if_nametoindex(ifaceName.c_str())); iface.flag_up_ = true; iface.flag_running_ = true; + + // note that we claim that this is not a loopback. iface_mgr tries to open a + // socket on all interaces that are up, running and not loopback. As this is + // the only interface we were able to detect, let's pretend this is a normal + // interface. iface.flag_loopback_ = false; iface.flag_multicast_ = true; iface.flag_broadcast_ = true; iface.setHWType(HWTYPE_ETHERNET); - IOAddress addr(linkLocal); - iface.addAddress(addr); + + iface.addAddress(IOAddress(v4addr)); + iface.addAddress(IOAddress(v6addr)); addInterface(iface); - interfaces.close(); + + cout << "Detected interface " << ifaceName << "/" << v4addr << "/" + << v6addr << endl; } catch (const std::exception& ex) { // TODO: deallocate whatever memory we used // not that important, since this function is going to be @@ -215,15 +224,6 @@ void IfaceMgr::stubDetectIfaces() { } } -/// @todo: Remove this once we have OS-specific interface detection -/// routines (or at least OS-specific files, like iface_mgr_solaris.cc) -/// for all OSes. -#if !defined(OS_LINUX) && !defined(OS_BSD) -void IfaceMgr::detectIfaces() { - stubDetectIfaces(); -} -#endif - bool IfaceMgr::openSockets4(const uint16_t port) { int sock; int count = 0; @@ -610,6 +610,8 @@ IfaceMgr::send(const Pkt6Ptr& pkt) { pktinfo->ipi6_ifindex = pkt->getIndex(); m.msg_controllen = cmsg->cmsg_len; + pkt->updateTimestamp(); + result = sendmsg(getSocket(*pkt), &m, 0); if (result < 0) { isc_throw(Unexpected, "Pkt6 send failed: sendmsg() returned " << result); @@ -669,6 +671,8 @@ IfaceMgr::send(const Pkt4Ptr& pkt) << " over socket " << getSocket(*pkt) << " on interface " << getIface(pkt->getIface())->getFullName() << endl; + pkt->updateTimestamp(); + int result = sendmsg(getSocket(*pkt), &m, 0); if (result < 0) { isc_throw(Unexpected, "Pkt4 send failed."); @@ -825,6 +829,8 @@ IfaceMgr::receive4(uint32_t timeout) { // We have all data let's create Pkt4 object. Pkt4Ptr pkt = Pkt4Ptr(new Pkt4(buf, result)); + pkt->updateTimestamp(); + unsigned int ifindex = iface->getIndex(); IOAddress from(htonl(from_addr.sin_addr.s_addr)); @@ -969,6 +975,8 @@ Pkt6Ptr IfaceMgr::receive6() { return (Pkt6Ptr()); // NULL } + pkt->updateTimestamp(); + pkt->setLocalAddr(IOAddress::from_bytes(AF_INET6, reinterpret_cast(&to_addr))); pkt->setRemoteAddr(IOAddress::from_bytes(AF_INET6, diff --git a/src/lib/dhcp/iface_mgr_bsd.cc b/src/lib/dhcp/iface_mgr_bsd.cc index aa6dd5b2d3..e3f11a1edc 100644 --- a/src/lib/dhcp/iface_mgr_bsd.cc +++ b/src/lib/dhcp/iface_mgr_bsd.cc @@ -25,6 +25,7 @@ using namespace isc::asiolink; using namespace isc::dhcp; namespace isc { +namespace dhcp { void IfaceMgr::detectIfaces() { @@ -48,6 +49,7 @@ bool IfaceMgr::os_receive4(struct msghdr& /*m*/, Pkt4Ptr& /*pkt*/) { return (true); // pretend that we have everything set up for reception. } -} +} // end of isc::dhcp namespace +} // end of dhcp namespace #endif diff --git a/src/lib/dhcp/iface_mgr_linux.cc b/src/lib/dhcp/iface_mgr_linux.cc index 3faac02897..90431dead2 100644 --- a/src/lib/dhcp/iface_mgr_linux.cc +++ b/src/lib/dhcp/iface_mgr_linux.cc @@ -410,7 +410,6 @@ void Netlink::release_list(NetlinkMessages& messages) { } // end of anonymous namespace namespace isc { - namespace dhcp { /// @brief Detect available interfaces on Linux systems. diff --git a/src/lib/dhcp/iface_mgr_sun.cc b/src/lib/dhcp/iface_mgr_sun.cc new file mode 100644 index 0000000000..5847906c47 --- /dev/null +++ b/src/lib/dhcp/iface_mgr_sun.cc @@ -0,0 +1,55 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include + +#if defined(OS_SUN) + +#include +#include + +using namespace std; +using namespace isc; +using namespace isc::asiolink; +using namespace isc::dhcp; + +namespace isc { +namespace dhcp { + +void +IfaceMgr::detectIfaces() { + /// @todo do the actual detection on Solaris. Currently just calling + /// stub implementation. + stubDetectIfaces(); +} + +void IfaceMgr::os_send4(struct msghdr& /*m*/, + boost::scoped_array& /*control_buf*/, + size_t /*control_buf_len*/, + const Pkt4Ptr& /*pkt*/) { + // @todo: Are there any specific actions required before sending IPv4 packet + // on BSDs? See iface_mgr_linux.cc for working Linux implementation. +} + +bool IfaceMgr::os_receive4(struct msghdr& /*m*/, Pkt4Ptr& /*pkt*/) { + // @todo: Are there any specific actions required before receiving IPv4 packet + // on BSDs? See iface_mgr_linux.cc for working Linux implementation. + + return (true); // pretend that we have everything set up for reception. +} + +} // end of isc::dhcp namespace +} // end of dhcp namespace + +#endif diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc index 03b4a3d64c..0c71606b6c 100644 --- a/src/lib/dhcp/option.cc +++ b/src/lib/dhcp/option.cc @@ -270,6 +270,14 @@ void Option::setUint32(uint32_t value) { writeUint32(value, &data_[0]); } +void Option::setData(const OptionBufferConstIter first, + const OptionBufferConstIter last) { + // We will copy entire option buffer, so we have to resize data_. + data_.resize(std::distance(first, last)); + std::copy(first, last, data_.begin()); +} + + Option::~Option() { } diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h index c7f5d10856..066296704e 100644 --- a/src/lib/dhcp/option.h +++ b/src/lib/dhcp/option.h @@ -244,6 +244,15 @@ public: /// @param value value to be set void setUint32(uint32_t value); + /// @brief Sets content of this option from buffer. + /// + /// Option will be resized to length of buffer. + /// + /// @param first iterator pointing begining of buffer to copy. + /// @param last iterator pointing to end of buffer to copy. + void setData(const OptionBufferConstIter first, + const OptionBufferConstIter last); + /// just to force that every option has virtual dtor virtual ~Option(); diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc index e4566310b8..2c3f1eb3a6 100644 --- a/src/lib/dhcp/pkt4.cc +++ b/src/lib/dhcp/pkt4.cc @@ -305,6 +305,11 @@ Pkt4::getOption(uint8_t type) { return boost::shared_ptr(); // NULL } +void +Pkt4::updateTimestamp() { + timestamp_ = boost::posix_time::microsec_clock::universal_time(); +} + } // end of namespace isc::dhcp } // end of namespace isc diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h index a3f683f201..b72c03eb5b 100644 --- a/src/lib/dhcp/pkt4.h +++ b/src/lib/dhcp/pkt4.h @@ -16,8 +16,10 @@ #define PKT4_H #include +#include #include #include +#include #include "asiolink/io_address.h" #include "util/buffer.h" #include "dhcp/option.h" @@ -202,6 +204,11 @@ public: void setGiaddr(const isc::asiolink::IOAddress& giaddr) { giaddr_ = giaddr; }; + /// @brief Sets transaction-id value + /// + /// @param transid transaction-id to be set. + void setTransid(uint32_t transid) { transid_ = transid; } + /// @brief Returns value of transaction-id field. /// /// @return transaction-id @@ -321,6 +328,14 @@ public: /// @return interface name std::string getIface() const { return iface_; }; + /// @brief Returns packet timestamp. + /// + /// Returns packet timestamp value updated when + /// packet is received or send. + /// + /// @return packet timestamp. + const boost::posix_time::ptime& getTimestamp() const { return timestamp_; } + /// @brief Sets interface name. /// /// Sets interface name over which packet was received or is @@ -387,6 +402,14 @@ public: /// @return remote port uint16_t getRemotePort() { return (remote_port_); } + /// @brief Update packet timestamp. + /// + /// Updates packet timestamp. This method is invoked + /// by interface manager just before sending or + /// just after receiving it. + /// @throw isc::Unexpected if timestamp update failed + void updateTimestamp(); + protected: /// converts DHCP message type to BOOTP op type @@ -470,12 +493,26 @@ protected: // end of real DHCPv4 fields /// output buffer (used during message transmission) + /// + /// @warning This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt4. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. isc::util::OutputBuffer bufferOut_; /// that's the data of input buffer used in RX packet. Note that /// InputBuffer does not store the data itself, but just expects that /// data will be valid for the whole life of InputBuffer. Therefore we /// need to keep the data around. + /// + /// @warning This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt4. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. std::vector data_; /// message type (e.g. 1=DHCPDISCOVER) @@ -484,7 +521,17 @@ protected: uint8_t msg_type_; /// collection of options present in this message + /// + /// @warnig This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt4. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. isc::dhcp::Option::OptionCollection options_; + + /// packet timestamp + boost::posix_time::ptime timestamp_; }; // Pkt4 class typedef boost::shared_ptr Pkt4Ptr; diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc index aea3cde3fc..e869c7b834 100644 --- a/src/lib/dhcp/pkt6.cc +++ b/src/lib/dhcp/pkt6.cc @@ -202,5 +202,11 @@ void Pkt6::repack() { bufferOut_.writeData(&data_[0], data_.size()); } +void +Pkt6::updateTimestamp() { + timestamp_ = boost::posix_time::microsec_clock::universal_time(); +} + + } // end of isc::dhcp namespace } // end of isc namespace diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h index 97ac996512..2612f27046 100644 --- a/src/lib/dhcp/pkt6.h +++ b/src/lib/dhcp/pkt6.h @@ -16,8 +16,10 @@ #define PKT6_H #include +#include #include #include +#include #include "asiolink/io_address.h" #include "dhcp/option.h" @@ -129,6 +131,11 @@ public: /// @param type message type to be set void setType(uint8_t type) { msg_type_=type; }; + /// @brief Sets transaction-id value + /// + /// @param transid transaction-id to be set. + void setTransid(uint32_t transid) { transid_ = transid; } + /// Returns value of transaction-id field /// /// @return transaction-id @@ -220,6 +227,14 @@ public: /// @return interface name std::string getIface() const { return iface_; }; + /// @brief Returns packet timestamp. + /// + /// Returns packet timestamp value updated when + /// packet is received or send. + /// + /// @return packet timestamp. + const boost::posix_time::ptime& getTimestamp() const { return timestamp_; } + /// @brief Sets interface name. /// /// Sets interface name over which packet was received or is @@ -231,8 +246,23 @@ public: /// TODO Need to implement getOptions() as well /// collection of options present in this message + /// + /// @warning This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt6. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. isc::dhcp::Option::OptionCollection options_; + /// @brief Update packet timestamp. + /// + /// Updates packet timestamp. This method is invoked + /// by interface manager just before sending or + /// just after receiving it. + /// @throw isc::Unexpected if timestamp update failed + void updateTimestamp(); + protected: /// Builds on wire packet for TCP transmission. /// @@ -278,6 +308,13 @@ protected: uint32_t transid_; /// unparsed data (in received packets) + /// + /// @warning This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt6. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. OptionBuffer data_; /// name of the network interface the packet was received/to be sent over @@ -304,7 +341,17 @@ protected: uint16_t remote_port_; /// output buffer (used during message transmission) + /// + /// @warning This protected member is accessed by derived + /// classes directly. One of such derived classes is + /// @ref perfdhcp::PerfPkt6. The impact on derived clasess' + /// behavior must be taken into consideration before making + /// changes to this member such as access scope restriction or + /// data format change etc. isc::util::OutputBuffer bufferOut_; + + /// packet timestamp + boost::posix_time::ptime timestamp_; }; // Pkt6 class typedef boost::shared_ptr Pkt6Ptr; diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am index e86fa24deb..13e8458896 100644 --- a/src/lib/dhcp/tests/Makefile.am +++ b/src/lib/dhcp/tests/Makefile.am @@ -2,18 +2,26 @@ SUBDIRS = . AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib AM_CPPFLAGS += $(BOOST_INCLUDES) -AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\" AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/dhcp/tests\" AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\" AM_CXXFLAGS = $(B10_CXXFLAGS) +# Some versions of GCC warn about some versions of Boost regarding +# missing initializer for members in its posix_time. +# https://svn.boost.org/trac/boost/ticket/3477 +# But older GCC compilers don't have the flag. +AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG) + if USE_STATIC_LINK AM_LDFLAGS = -static endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += libdhcp++_unittests @@ -23,6 +31,7 @@ libdhcp___unittests_SOURCES += libdhcp++_unittest.cc libdhcp___unittests_SOURCES += ../iface_mgr.cc ../iface_mgr.h iface_mgr_unittest.cc libdhcp___unittests_SOURCES += ../iface_mgr_linux.cc libdhcp___unittests_SOURCES += ../iface_mgr_bsd.cc +libdhcp___unittests_SOURCES += ../iface_mgr_sun.cc libdhcp___unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc libdhcp___unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc libdhcp___unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc @@ -38,8 +47,9 @@ libdhcp___unittests_CXXFLAGS = $(AM_CXXFLAGS) if USE_CLANGPP # This is to workaround unused variables tcout and tcerr in -# log4cplus's streams.h. -libdhcp___unittests_CXXFLAGS += -Wno-unused-variable +# log4cplus's streams.h and unused parameters from some of the +# Boost headers. +libdhcp___unittests_CXXFLAGS += -Wno-unused-variable -Wno-unused-parameter endif libdhcp___unittests_LDADD = $(GTEST_LDADD) libdhcp___unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la diff --git a/src/lib/dhcp/tests/iface_mgr_unittest.cc b/src/lib/dhcp/tests/iface_mgr_unittest.cc index c065438214..e7ccb680ba 100644 --- a/src/lib/dhcp/tests/iface_mgr_unittest.cc +++ b/src/lib/dhcp/tests/iface_mgr_unittest.cc @@ -7,7 +7,7 @@ // THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH // REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY // AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, - // INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR // PERFORMANCE OF THIS SOFTWARE. @@ -36,7 +36,6 @@ const size_t buf_size = 32; char LOOPBACK[buf_size] = "lo"; namespace { -const char* const INTERFACE_FILE = TEST_DATA_BUILDDIR "/interfaces.txt"; class NakedIfaceMgr: public IfaceMgr { // "naked" Interface Manager, exposes internal fields @@ -48,18 +47,11 @@ public: // dummy class for now, but this will be expanded when needed class IfaceMgrTest : public ::testing::Test { public: + // these are empty for now, but let's keep them around IfaceMgrTest() { } - void createLoInterfacesTxt() { - unlink(INTERFACE_FILE); - fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc); - fakeifaces << LOOPBACK << " ::1"; - fakeifaces.close(); - } - ~IfaceMgrTest() { - unlink(INTERFACE_FILE); } }; @@ -152,7 +144,6 @@ TEST_F(IfaceMgrTest, dhcp6Sniffer) { TEST_F(IfaceMgrTest, basic) { // checks that IfaceManager can be instantiated - createLoInterfacesTxt(); IfaceMgr & ifacemgr = IfaceMgr::instance(); ASSERT_TRUE(&ifacemgr != 0); @@ -172,16 +163,17 @@ TEST_F(IfaceMgrTest, ifaceClass) { // is implemented. TEST_F(IfaceMgrTest, getIface) { - createLoInterfacesTxt(); - cout << "Interface checks. Please ignore socket binding errors." << endl; NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); // interface name, ifindex - IfaceMgr::Iface iface1("lo1", 1); - IfaceMgr::Iface iface2("eth5", 2); - IfaceMgr::Iface iface3("en3", 5); - IfaceMgr::Iface iface4("e1000g0", 3); + IfaceMgr::Iface iface1("lo1", 100); + IfaceMgr::Iface iface2("eth9", 101); + IfaceMgr::Iface iface3("en3", 102); + IfaceMgr::Iface iface4("e1000g4", 103); + cout << "This test assumes that there are less than 100 network interfaces" + << " in the tested system and there are no lo1, eth9, en3, e1000g4" + << " or wifi15 interfaces present." << endl; // note: real interfaces may be detected as well ifacemgr->getIfacesLst().push_back(iface1); @@ -199,65 +191,30 @@ TEST_F(IfaceMgrTest, getIface) { // check that interface can be retrieved by ifindex - IfaceMgr::Iface* tmp = ifacemgr->getIface(5); - // ASSERT_NE(NULL, tmp); is not supported. hmmmm. + IfaceMgr::Iface* tmp = ifacemgr->getIface(102); ASSERT_TRUE(tmp != NULL); EXPECT_EQ("en3", tmp->getName()); - EXPECT_EQ(5, tmp->getIndex()); + EXPECT_EQ(102, tmp->getIndex()); // check that interface can be retrieved by name tmp = ifacemgr->getIface("lo1"); ASSERT_TRUE(tmp != NULL); EXPECT_EQ("lo1", tmp->getName()); - EXPECT_EQ(1, tmp->getIndex()); + EXPECT_EQ(100, tmp->getIndex()); // check that non-existing interfaces are not returned - EXPECT_EQ(static_cast(NULL), ifacemgr->getIface("wifi0") ); + EXPECT_EQ(static_cast(NULL), ifacemgr->getIface("wifi15") ); delete ifacemgr; } -#if !defined(OS_LINUX) -TEST_F(IfaceMgrTest, detectIfaces_stub) { - - // test detects that interfaces can be detected - // there is no code for that now, but interfaces are - // read from file - fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc); - fakeifaces << "eth0 fe80::1234"; - fakeifaces.close(); - - // this is not usable on systems that don't have eth0 - // interfaces. Nevertheless, this fake interface should - // be on list, but if_nametoindex() will fail. - - NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); - - ASSERT_TRUE(ifacemgr->getIface("eth0") != NULL); - - IfaceMgr::Iface* eth0 = ifacemgr->getIface("eth0"); - - // there should be one address - IfaceMgr::AddressCollection addrs = eth0->getAddresses(); - ASSERT_EQ(1, addrs.size()); - - IOAddress addr = *addrs.begin(); - - EXPECT_STREQ("fe80::1234", addr.toText().c_str()); - - delete ifacemgr; -} -#endif - TEST_F(IfaceMgrTest, sockets6) { // testing socket operation in a portable way is tricky // without interface detection implemented - createLoInterfacesTxt(); - NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); IOAddress loAddr("::1"); @@ -319,7 +276,6 @@ TEST_F(IfaceMgrTest, sendReceive6) { // testing socket operation in a portable way is tricky // without interface detection implemented - createLoInterfacesTxt(); NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); @@ -377,7 +333,6 @@ TEST_F(IfaceMgrTest, sendReceive4) { // testing socket operation in a portable way is tricky // without interface detection implemented - createLoInterfacesTxt(); NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); @@ -468,7 +423,6 @@ TEST_F(IfaceMgrTest, sendReceive4) { TEST_F(IfaceMgrTest, socket4) { - createLoInterfacesTxt(); NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); // Let's assume that every supported OS have lo interface. @@ -584,7 +538,6 @@ TEST_F(IfaceMgrTest, socketInfo) { EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_); // now let's test if IfaceMgr handles socket info properly - createLoInterfacesTxt(); NakedIfaceMgr* ifacemgr = new NakedIfaceMgr(); IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK); ASSERT_TRUE(loopback); diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc index 5daf75dcd6..9b046f0278 100644 --- a/src/lib/dhcp/tests/option_unittest.cc +++ b/src/lib/dhcp/tests/option_unittest.cc @@ -485,7 +485,7 @@ TEST_F(OptionTest, setUintX) { uint8_t exp2[] = {125, 2, 12345/256, 12345%256}; EXPECT_TRUE(0 == memcmp(exp2, outBuf_.getData(), 4)); - // verity getUint32 + // verify getUint32 outBuf_.clear(); opt4->setUint32(0x12345678); opt4->pack4(outBuf_); @@ -495,4 +495,31 @@ TEST_F(OptionTest, setUintX) { uint8_t exp4[] = {125, 4, 0x12, 0x34, 0x56, 0x78}; EXPECT_TRUE(0 == memcmp(exp4, outBuf_.getData(), 6)); } + +TEST_F(OptionTest, setData) { + // verify data override with new buffer larger than + // initial option buffer size + OptionPtr opt1(new Option(Option::V4, 125, + buf_.begin(), buf_.begin() + 10)); + buf_.resize(20, 1); + opt1->setData(buf_.begin(), buf_.end()); + opt1->pack4(outBuf_); + ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size()); + const uint8_t* test_data = static_cast(outBuf_.getData()); + EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(), + buf_.size())); + + // verify data override with new buffer shorter than + // initial option buffer size + OptionPtr opt2(new Option(Option::V4, 125, + buf_.begin(), buf_.begin() + 10)); + outBuf_.clear(); + buf_.resize(5, 1); + opt2->setData(buf_.begin(), buf_.end()); + opt2->pack4(outBuf_); + ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size()); + test_data = static_cast(outBuf_.getData()); + EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(), + buf_.size())); +} } diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc index bed8c2f68b..9c8cc05ded 100644 --- a/src/lib/dhcp/tests/pkt4_unittest.cc +++ b/src/lib/dhcp/tests/pkt4_unittest.cc @@ -31,7 +31,9 @@ using namespace isc; using namespace isc::asiolink; using namespace isc::dhcp; using namespace isc::util; -using namespace boost; +// don't import the entire boost namespace. It will unexpectedly hide uint8_t +// for some systems. +using boost::scoped_ptr; namespace { @@ -598,4 +600,32 @@ TEST(Pkt4Test, metaFields) { delete pkt; } +TEST(Pkt4Test, Timestamp) { + scoped_ptr pkt(new Pkt4(DHCPOFFER, 1234)); + + // Just after construction timestamp is invalid + ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time()); + + // Update packet time. + pkt->updateTimestamp(); + + // Get updated packet time. + boost::posix_time::ptime ts_packet = pkt->getTimestamp(); + + // After timestamp is updated it should be date-time. + ASSERT_FALSE(ts_packet.is_not_a_date_time()); + + // Check current time. + boost::posix_time::ptime ts_now = + boost::posix_time::microsec_clock::universal_time(); + + // Calculate period between packet time and now. + boost::posix_time::time_period ts_period(ts_packet, ts_now); + + // Duration should be positive or zero. + EXPECT_TRUE(ts_period.length().total_microseconds() >= 0); +} + + + } // end of anonymous namespace diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc index e07ea9fa80..d6ca9b106f 100644 --- a/src/lib/dhcp/tests/pkt6_unittest.cc +++ b/src/lib/dhcp/tests/pkt6_unittest.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -204,4 +205,30 @@ TEST_F(Pkt6Test, addGetDelOptions) { delete parent; } +TEST_F(Pkt6Test, Timestamp) { + boost::scoped_ptr pkt(new Pkt6(DHCPV6_SOLICIT, 0x020304)); + + // Just after construction timestamp is invalid + ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time()); + + // Update packet time. + pkt->updateTimestamp(); + + // Get updated packet time. + boost::posix_time::ptime ts_packet = pkt->getTimestamp(); + + // After timestamp is updated it should be date-time. + ASSERT_FALSE(ts_packet.is_not_a_date_time()); + + // Check current time. + boost::posix_time::ptime ts_now = + boost::posix_time::microsec_clock::universal_time(); + + // Calculate period between packet time and now. + boost::posix_time::time_period ts_period(ts_packet, ts_now); + + // Duration should be positive or zero. + EXPECT_TRUE(ts_period.length().total_microseconds() >= 0); +} + } diff --git a/src/lib/dns/labelsequence.cc b/src/lib/dns/labelsequence.cc index 0ec450f60a..71e0f82542 100644 --- a/src/lib/dns/labelsequence.cc +++ b/src/lib/dns/labelsequence.cc @@ -18,6 +18,8 @@ #include +#include + namespace isc { namespace dns { @@ -52,7 +54,7 @@ LabelSequence::equals(const LabelSequence& other, bool case_sensitive) const { return (false); } if (case_sensitive) { - return (strncmp(data, other_data, len) == 0); + return (std::strncmp(data, other_data, len) == 0); } // As long as the data was originally validated as (part of) a name, diff --git a/src/lib/dns/labelsequence.h b/src/lib/dns/labelsequence.h index 6b10b6782a..b17eeb43ad 100644 --- a/src/lib/dns/labelsequence.h +++ b/src/lib/dns/labelsequence.h @@ -101,7 +101,7 @@ public: /// \note No actual memory is changed, this operation merely updates the /// internal pointers based on the offsets in the Name object. /// - /// \exeption OutOfRange if i is greater than or equal to the number + /// \exception OutOfRange if i is greater than or equal to the number /// of labels currently pointed to by this LabelSequence /// /// \param i The number of labels to remove. @@ -112,7 +112,7 @@ public: /// \note No actual memory is changed, this operation merely updates the /// internal pointers based on the offsets in the Name object. /// - /// \exeption OutOfRange if i is greater than or equal to the number + /// \exception OutOfRange if i is greater than or equal to the number /// of labels currently pointed to by this LabelSequence /// /// \param i The number of labels to remove. diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc index 0db68c6198..0a1625a206 100644 --- a/src/lib/dns/message.cc +++ b/src/lib/dns/message.cc @@ -130,6 +130,11 @@ public: const RRClass& rrclass, const RRType& rrtype, const RRTTL& ttl, ConstRdataPtr rdata, Message::ParseOptions options); + // There are also times where an RR needs to be added that + // represents an empty RRset. There is no Rdata in that case + void addRR(Message::Section section, const Name& name, + const RRClass& rrclass, const RRType& rrtype, + const RRTTL& ttl, Message::ParseOptions options); void addEDNS(Message::Section section, const Name& name, const RRClass& rrclass, const RRType& rrtype, const RRTTL& ttl, const Rdata& rdata); @@ -561,10 +566,18 @@ Message::removeRRset(const Section section, RRsetIterator& iterator) { void Message::clearSection(const Section section) { + if (impl_->mode_ != Message::RENDER) { + isc_throw(InvalidMessageOperation, + "clearSection performed in non-render mode"); + } if (section >= MessageImpl::NUM_SECTIONS) { isc_throw(OutOfRange, "Invalid message section: " << section); } - impl_->rrsets_[section].clear(); + if (section == Message::SECTION_QUESTION) { + impl_->questions_.clear(); + } else { + impl_->rrsets_[section].clear(); + } impl_->counts_[section] = 0; } @@ -736,6 +749,17 @@ MessageImpl::parseSection(const Message::Section section, const RRClass rrclass(buffer.readUint16()); const RRTTL ttl(buffer.readUint32()); const size_t rdlen = buffer.readUint16(); + + // If class is ANY or NONE, rdlength may be zero, to signal + // an empty RRset. + // (the class check must be done to differentiate from RRTypes + // that can have zero length rdata + if ((rrclass == RRClass::ANY() || rrclass == RRClass::NONE()) && + rdlen == 0) { + addRR(section, name, rrclass, rrtype, ttl, options); + ++added; + continue; + } ConstRdataPtr rdata = createRdata(rrtype, rrclass, buffer, rdlen); if (rrtype == RRType::OPT()) { @@ -773,6 +797,24 @@ MessageImpl::addRR(Message::Section section, const Name& name, rrsets_[section].push_back(rrset); } +void +MessageImpl::addRR(Message::Section section, const Name& name, + const RRClass& rrclass, const RRType& rrtype, + const RRTTL& ttl, Message::ParseOptions options) +{ + if ((options & Message::PRESERVE_ORDER) == 0) { + vector::iterator it = + find_if(rrsets_[section].begin(), rrsets_[section].end(), + MatchRR(name, rrtype, rrclass)); + if (it != rrsets_[section].end()) { + (*it)->setTTL(min((*it)->getTTL(), ttl)); + return; + } + } + RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl)); + rrsets_[section].push_back(rrset); +} + void MessageImpl::addEDNS(Message::Section section, const Name& name, const RRClass& rrclass, const RRType& rrtype, diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h index 33551c099e..73d0c6e843 100644 --- a/src/lib/dns/message.h +++ b/src/lib/dns/message.h @@ -513,6 +513,12 @@ public: /// \brief Remove all RRSets from the given Section /// + /// This method is only allowed in the \c RENDER mode, and the given + /// section must be valid. + /// + /// \throw InvalidMessageOperation Message is not in the \c RENDER mode + /// \throw OutOfRange The specified section is not valid + /// /// \param section Section to remove all rrsets from void clearSection(const Section section); diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc index c7ad2ff5dc..f08f62c11d 100644 --- a/src/lib/dns/python/message_python.cc +++ b/src/lib/dns/python/message_python.cc @@ -76,6 +76,7 @@ PyObject* Message_getSection(PyObject* self, PyObject* args); PyObject* Message_addQuestion(s_Message* self, PyObject* args); PyObject* Message_addRRset(s_Message* self, PyObject* args); PyObject* Message_clear(s_Message* self, PyObject* args); +PyObject* Message_clearSection(PyObject* pyself, PyObject* args); PyObject* Message_makeResponse(s_Message* self); PyObject* Message_toText(s_Message* self); PyObject* Message_str(PyObject* self); @@ -149,6 +150,8 @@ PyMethodDef Message_methods[] = { "Clears the message content (if any) and reinitialize the " "message in the given mode\n" "The argument must be either Message.PARSE or Message.RENDER"}, + { "clear_section", Message_clearSection, METH_VARARGS, + Message_clearSection_doc }, { "make_response", reinterpret_cast(Message_makeResponse), METH_NOARGS, "Prepare for making a response from a request.\n" "This will clear the DNS header except those fields that should be kept " @@ -206,12 +209,24 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) { return (NULL); } - if (self->cppobj->getHeaderFlag( + try { + if (self->cppobj->getHeaderFlag( static_cast(messageflag))) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + } catch (const isc::InvalidParameter& ip) { + PyErr_Clear(); + PyErr_SetString(po_InvalidParameter, ip.what()); + } catch (const exception& ex) { + const string ex_what = "Error in Message.get_header_flag(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_header_flag()"); } + return (NULL); } PyObject* @@ -237,12 +252,17 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) { } catch (const InvalidMessageOperation& imo) { PyErr_Clear(); PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); } catch (const isc::InvalidParameter& ip) { PyErr_Clear(); PyErr_SetString(po_InvalidParameter, ip.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.set_header_flag(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.set_header_flag()"); } + return (NULL); } PyObject* @@ -270,8 +290,14 @@ Message_setQid(s_Message* self, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.get_qid(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.set_qid()"); } + return (NULL); } PyObject* @@ -280,11 +306,14 @@ Message_getRcode(s_Message* self) { return (createRcodeObject(self->cppobj->getRcode())); } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.get_rcode(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(po_IscException, "Unexpected exception"); - return (NULL); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_rcode()"); } + return (NULL); } PyObject* @@ -298,8 +327,14 @@ Message_setRcode(s_Message* self, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.set_rcode(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.set_rcode()"); } + return (NULL); } PyObject* @@ -308,17 +343,14 @@ Message_getOpcode(s_Message* self) { return (createOpcodeObject(self->cppobj->getOpcode())); } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); } catch (const exception& ex) { - const string ex_what = - "Failed to get message opcode: " + string(ex.what()); + const string ex_what = "Error in Message.get_opcode(): " + string(ex.what()); PyErr_SetString(po_IscException, ex_what.c_str()); - return (NULL); } catch (...) { PyErr_SetString(po_IscException, - "Unexpected exception getting opcode from message"); - return (NULL); + "Unexpected exception in Message.get_opcode()"); } + return (NULL); } PyObject* @@ -332,8 +364,14 @@ Message_setOpcode(s_Message* self, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.set_opcode(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.set_opcode()"); } + return (NULL); } PyObject* @@ -345,12 +383,11 @@ Message_getEDNS(s_Message* self) { try { return (createEDNSObject(*src)); } catch (const exception& ex) { - const string ex_what = - "Failed to get EDNS from message: " + string(ex.what()); + const string ex_what = "Error in Message.get_edns(): " + string(ex.what()); PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { PyErr_SetString(PyExc_SystemError, - "Unexpected failure getting EDNS from message"); + "Unexpected exception in Message.get_edns()"); } return (NULL); } @@ -366,8 +403,14 @@ Message_setEDNS(s_Message* self, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.set_edns(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.set_edns()"); } + return (NULL); } PyObject* @@ -383,13 +426,11 @@ Message_getTSIGRecord(s_Message* self) { } catch (const InvalidMessageOperation& ex) { PyErr_SetString(po_InvalidMessageOperation, ex.what()); } catch (const exception& ex) { - const string ex_what = - "Unexpected failure in getting TSIGRecord from message: " + - string(ex.what()); + const string ex_what = "Error in Message.get_tsig_record(): " + string(ex.what()); PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(PyExc_SystemError, "Unexpected failure in " - "getting TSIGRecord from message"); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_tsig_record()"); } return (NULL); } @@ -408,8 +449,14 @@ Message_getRRCount(s_Message* self, PyObject* args) { static_cast(section)))); } catch (const isc::OutOfRange& ex) { PyErr_SetString(PyExc_OverflowError, ex.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.get_rr_count(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_rr_count()"); } + return (NULL); } // This is a helper templated class commonly used for getQuestion and @@ -450,13 +497,11 @@ Message_getQuestion(PyObject* po_self, PyObject*) { } catch (const InvalidMessageSection& ex) { PyErr_SetString(po_InvalidMessageSection, ex.what()); } catch (const exception& ex) { - const string ex_what = - "Unexpected failure in Message.get_question: " + - string(ex.what()); + const string ex_what = "Error in Message.get_question(): " + string(ex.what()); PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(PyExc_SystemError, - "Unexpected failure in Message.get_question"); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_question()"); } return (NULL); } @@ -486,13 +531,11 @@ Message_getSection(PyObject* po_self, PyObject* args) { } catch (const InvalidMessageSection& ex) { PyErr_SetString(po_InvalidMessageSection, ex.what()); } catch (const exception& ex) { - const string ex_what = - "Unexpected failure in Message.get_section: " + - string(ex.what()); + const string ex_what = "Error in Message.get_section(): " + string(ex.what()); PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(PyExc_SystemError, - "Unexpected failure in Message.get_section"); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.get_section()"); } return (NULL); } @@ -510,9 +553,20 @@ Message_addQuestion(s_Message* self, PyObject* args) { return (NULL); } - self->cppobj->addQuestion(PyQuestion_ToQuestion(question)); - - Py_RETURN_NONE; + try { + self->cppobj->addQuestion(PyQuestion_ToQuestion(question)); + Py_RETURN_NONE; + } catch (const InvalidMessageOperation& imo) { + PyErr_Clear(); + PyErr_SetString(po_InvalidMessageOperation, imo.what()); + } catch (const exception& ex) { + const string ex_what = "Error in Message.add_question(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.add_question()"); + } + return (NULL); } PyObject* @@ -531,42 +585,88 @@ Message_addRRset(s_Message* self, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); } catch (const isc::OutOfRange& ex) { PyErr_SetString(PyExc_OverflowError, ex.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.add_rrset(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { PyErr_SetString(po_IscException, - "Unexpected exception in adding RRset"); - return (NULL); + "Unexpected exception in Message.add_rrset()"); } + return (NULL); } PyObject* Message_clear(s_Message* self, PyObject* args) { int i; - if (PyArg_ParseTuple(args, "i", &i)) { - PyErr_Clear(); - if (i == Message::PARSE) { - self->cppobj->clear(Message::PARSE); - Py_RETURN_NONE; - } else if (i == Message::RENDER) { - self->cppobj->clear(Message::RENDER); - Py_RETURN_NONE; - } else { - PyErr_SetString(PyExc_TypeError, - "Message mode must be Message.PARSE or Message.RENDER"); - return (NULL); + + try { + if (PyArg_ParseTuple(args, "i", &i)) { + PyErr_Clear(); + if (i == Message::PARSE) { + self->cppobj->clear(Message::PARSE); + Py_RETURN_NONE; + } else if (i == Message::RENDER) { + self->cppobj->clear(Message::RENDER); + Py_RETURN_NONE; + } else { + PyErr_SetString(PyExc_TypeError, + "Message mode must be Message.PARSE or Message.RENDER"); + return (NULL); + } } - } else { + } catch (const exception& ex) { + const string ex_what = "Error in Message.clear(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.clear()"); + } + return (NULL); +} + +PyObject* +Message_clearSection(PyObject* pyself, PyObject* args) { + s_Message* const self = static_cast(pyself); + int section; + + if (!PyArg_ParseTuple(args, "i", §ion)) { return (NULL); } + try { + self->cppobj->clearSection(static_cast(section)); + Py_RETURN_NONE; + } catch (const InvalidMessageOperation& imo) { + PyErr_SetString(po_InvalidMessageOperation, imo.what()); + } catch (const isc::OutOfRange& ex) { + PyErr_SetString(PyExc_OverflowError, ex.what()); + } catch (const exception& ex) { + const string ex_what = "Error in Message.clear_section(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.clear_section()"); + } + return (NULL); } PyObject* Message_makeResponse(s_Message* self) { - self->cppobj->makeResponse(); - Py_RETURN_NONE; + try { + self->cppobj->makeResponse(); + Py_RETURN_NONE; + } catch (const InvalidMessageOperation& imo) { + PyErr_Clear(); + PyErr_SetString(po_InvalidMessageOperation, imo.what()); + } catch (const exception& ex) { + const string ex_what = "Error in Message.make_response(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); + } catch (...) { + PyErr_SetString(po_IscException, + "Unexpected exception in Message.make_response()"); + } + return (NULL); } PyObject* @@ -577,11 +677,14 @@ Message_toText(s_Message* self) { } catch (const InvalidMessageOperation& imo) { PyErr_Clear(); PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.to_text(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(po_IscException, "Unexpected exception"); - return (NULL); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.to_text()"); } + return (NULL); } PyObject* @@ -612,22 +715,18 @@ Message_toWire(s_Message* self, PyObject* args) { } catch (const InvalidMessageOperation& imo) { PyErr_Clear(); PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); } catch (const TSIGContextError& ex) { // toWire() with a TSIG context can fail due to this if the // python program has a bug. PyErr_SetString(po_TSIGContextError, ex.what()); - return (NULL); - } catch (const std::exception& ex) { - // Other exceptions should be rare (most likely an implementation - // bug) - PyErr_SetString(po_TSIGContextError, ex.what()); - return (NULL); + } catch (const exception& ex) { + const string ex_what = "Error in Message.to_wire(): " + string(ex.what()); + PyErr_SetString(po_TSIGContextError, ex_what.c_str()); } catch (...) { - PyErr_SetString(PyExc_RuntimeError, - "Unexpected C++ exception in Message.to_wire"); - return (NULL); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.to_wire()"); } + return (NULL); } PyErr_Clear(); PyErr_SetString(PyExc_TypeError, @@ -655,29 +754,22 @@ Message_fromWire(PyObject* pyself, PyObject* args) { Py_RETURN_NONE; } catch (const InvalidMessageOperation& imo) { PyErr_SetString(po_InvalidMessageOperation, imo.what()); - return (NULL); } catch (const DNSMessageFORMERR& dmfe) { PyErr_SetString(po_DNSMessageFORMERR, dmfe.what()); - return (NULL); } catch (const DNSMessageBADVERS& dmfe) { PyErr_SetString(po_DNSMessageBADVERS, dmfe.what()); - return (NULL); } catch (const MessageTooShort& mts) { PyErr_SetString(po_MessageTooShort, mts.what()); - return (NULL); } catch (const InvalidBufferPosition& ex) { PyErr_SetString(po_DNSMessageFORMERR, ex.what()); - return (NULL); } catch (const exception& ex) { - const string ex_what = - "Error in Message.from_wire: " + string(ex.what()); - PyErr_SetString(PyExc_RuntimeError, ex_what.c_str()); - return (NULL); + const string ex_what = "Error in Message.from_wire(): " + string(ex.what()); + PyErr_SetString(po_IscException, ex_what.c_str()); } catch (...) { - PyErr_SetString(PyExc_RuntimeError, - "Unexpected exception in Message.from_wire"); - return (NULL); + PyErr_SetString(po_IscException, + "Unexpected exception in Message.from_wire()"); } + return (NULL); } PyErr_SetString(PyExc_TypeError, diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc index 561c494436..e1fd23d2ab 100644 --- a/src/lib/dns/python/message_python_inc.cc +++ b/src/lib/dns/python/message_python_inc.cc @@ -38,4 +38,21 @@ Parameters:\n\ options Parse options\n\ \n\ "; + +const char* const Message_clearSection_doc = "\ +clear_section(section) -> void\n\ +\n\ +Remove all RRSets from the given Section.\n\ +\n\ +This method is only allowed in the RENDER mode, and the given section\n\ +must be valid.\n\ +\n\ +Exceptions:\n\ + InvalidMessageOperation Message is not in the RENDER mode\n\ + OverflowError The specified section is not valid\n\ +\n\ +Parameters:\n\ + section Section to remove all rrsets from\n\ +\n\ +"; } // unnamed namespace diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc index ce556df74c..c24d24d51b 100644 --- a/src/lib/dns/python/name_python.cc +++ b/src/lib/dns/python/name_python.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include "pydnspp_common.h" #include "messagerenderer_python.h" @@ -114,6 +115,7 @@ PyObject* Name_reverse(s_Name* self); PyObject* Name_concatenate(s_Name* self, PyObject* args); PyObject* Name_downcase(s_Name* self); PyObject* Name_isWildCard(s_Name* self); +Py_hash_t Name_hash(PyObject* py_self); PyMethodDef Name_methods[] = { { "at", reinterpret_cast(Name_at), METH_VARARGS, @@ -518,6 +520,12 @@ Name_isWildCard(s_Name* self) { } } +Py_hash_t +Name_hash(PyObject* pyself) { + s_Name* const self = static_cast(pyself); + return (LabelSequence(*self->cppobj).getHash(false)); +} + } // end of unnamed namespace namespace isc { @@ -615,7 +623,7 @@ PyTypeObject name_type = { NULL, // tp_as_number NULL, // tp_as_sequence NULL, // tp_as_mapping - NULL, // tp_hash + Name_hash, // tp_hash NULL, // tp_call Name_str, // tp_str NULL, // tp_getattro diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc index 64e3cae5c3..23ed46367c 100644 --- a/src/lib/dns/python/pydnspp.cc +++ b/src/lib/dns/python/pydnspp.cc @@ -221,6 +221,15 @@ initModulePart_Name(PyObject* mod) { NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR"); addClassVariable(name_comparison_result_type, "NameRelation", po_NameRelation); + // Add the constants themselves too + addClassVariable(name_comparison_result_type, "SUPERDOMAIN", + Py_BuildValue("I", NameComparisonResult::SUPERDOMAIN)); + addClassVariable(name_comparison_result_type, "SUBDOMAIN", + Py_BuildValue("I", NameComparisonResult::SUBDOMAIN)); + addClassVariable(name_comparison_result_type, "EQUAL", + Py_BuildValue("I", NameComparisonResult::EQUAL)); + addClassVariable(name_comparison_result_type, "COMMONANCESTOR", + Py_BuildValue("I", NameComparisonResult::COMMONANCESTOR)); PyModule_AddObject(mod, "NameComparisonResult", reinterpret_cast(&name_comparison_result_type)); diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h index 8092b086d4..e9e935977e 100644 --- a/src/lib/dns/python/pydnspp_common.h +++ b/src/lib/dns/python/pydnspp_common.h @@ -43,6 +43,11 @@ extern PyObject* po_DNSMessageBADVERS; int readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence); int addClassVariable(PyTypeObject& c, const char* name, PyObject* obj); + +// Short term workaround for unifying the return type of tp_hash +#if PY_MINOR_VERSION < 2 +typedef long Py_hash_t; +#endif } // namespace python } // namespace dns } // namespace isc diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc index e4ff89057f..20f67c8d7e 100644 --- a/src/lib/dns/python/rdata_python.cc +++ b/src/lib/dns/python/rdata_python.cc @@ -116,6 +116,7 @@ Rdata_init(PyObject* self_p, PyObject* args, PyObject*) { return (0); } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype, &rrclass_type, &rrclass, &data, &len)) { + PyErr_Clear(); InputBuffer input_buffer(data, len); self->cppobj = createRdata(PyRRType_ToRRType(rrtype), PyRRClass_ToRRClass(rrclass), diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc index 00141872e9..b94dc0231c 100644 --- a/src/lib/dns/python/rrclass_python.cc +++ b/src/lib/dns/python/rrclass_python.cc @@ -52,6 +52,7 @@ PyObject* RRClass_str(PyObject* self); PyObject* RRClass_toWire(s_RRClass* self, PyObject* args); PyObject* RRClass_getCode(s_RRClass* self); PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op); +Py_hash_t RRClass_hash(PyObject* pyself); // Static function for direct class creation PyObject* RRClass_IN(s_RRClass *self); @@ -264,6 +265,12 @@ PyObject* RRClass_ANY(s_RRClass*) { return (RRClass_createStatic(RRClass::ANY())); } +Py_hash_t +RRClass_hash(PyObject* pyself) { + s_RRClass* const self = static_cast(pyself); + return (self->cppobj->getCode()); +} + } // end anonymous namespace namespace isc { @@ -296,7 +303,7 @@ PyTypeObject rrclass_type = { NULL, // tp_as_number NULL, // tp_as_sequence NULL, // tp_as_mapping - NULL, // tp_hash + RRClass_hash, // tp_hash NULL, // tp_call RRClass_str, // tp_str NULL, // tp_getattro diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py index 86574fb815..6f32b113bf 100644 --- a/src/lib/dns/python/tests/message_python_test.py +++ b/src/lib/dns/python/tests/message_python_test.py @@ -118,6 +118,11 @@ class MessageTest(unittest.TestCase): self.assertFalse(self.r.get_header_flag(Message.HEADERFLAG_AD)) self.assertFalse(self.r.get_header_flag(Message.HEADERFLAG_CD)) + # 0 passed as flag should raise + self.assertRaises(InvalidParameter, self.r.get_header_flag, 0) + # unused bit + self.assertRaises(InvalidParameter, self.r.get_header_flag, 0x80000000) + self.r.set_header_flag(Message.HEADERFLAG_QR) self.assertTrue(self.r.get_header_flag(Message.HEADERFLAG_QR)) @@ -267,6 +272,15 @@ class MessageTest(unittest.TestCase): self.assertEqual(1, sys.getrefcount(self.r.get_question())) self.assertEqual(1, sys.getrefcount(self.r.get_question()[0])) + # Message.add_question() called in non-RENDER mode should assert + self.r.clear(Message.PARSE) + self.assertRaises(InvalidMessageOperation, self.r.add_question, q) + + def test_make_response(self): + # Message.make_response() called in non-PARSE mode should assert + self.r.clear(Message.RENDER) + self.assertRaises(InvalidMessageOperation, self.r.make_response) + def test_add_rrset(self): self.assertRaises(TypeError, self.r.add_rrset, "wrong") self.assertRaises(TypeError, self.r.add_rrset) @@ -289,6 +303,27 @@ class MessageTest(unittest.TestCase): self.assertRaises(TypeError, self.r.clear, "wrong") self.assertRaises(TypeError, self.r.clear, 3) + def test_clear_question_section(self): + self.r.add_question(Question(Name("www.example.com"), RRClass.IN(), + RRType.A())) + self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION)) + self.r.clear_section(Message.SECTION_QUESTION) + self.assertEqual(0, self.r.get_rr_count(Message.SECTION_QUESTION)) + self.assertEqual(0, len(self.r.get_question())) + + def test_clear_section(self): + for section in [Message.SECTION_ANSWER, Message.SECTION_AUTHORITY, + Message.SECTION_ADDITIONAL]: + self.r.add_rrset(section, self.rrset_a) + self.assertEqual(2, self.r.get_rr_count(section)) + self.r.clear_section(section) + self.assertEqual(0, self.r.get_rr_count(section)) + + self.assertRaises(InvalidMessageOperation, self.p.clear_section, + Message.SECTION_ANSWER) + self.assertRaises(OverflowError, self.r.clear_section, + self.bogus_section) + def test_to_wire(self): self.assertRaises(TypeError, self.r.to_wire, 1) self.assertRaises(InvalidMessageOperation, diff --git a/src/lib/dns/python/tests/name_python_test.py b/src/lib/dns/python/tests/name_python_test.py index 5263412af7..8ea2e3520a 100644 --- a/src/lib/dns/python/tests/name_python_test.py +++ b/src/lib/dns/python/tests/name_python_test.py @@ -218,5 +218,27 @@ class NameTest(unittest.TestCase): self.assertTrue(self.name4 <= self.name1) self.assertFalse(self.name2 >= self.name1) + def test_hash(self): + # The same name should have the same hash value. + self.assertEqual(hash(Name('example.com')), hash(Name('example.com'))) + # Hash is case insensitive. + self.assertEqual(hash(Name('example.com')), hash(Name('EXAMPLE.COM'))) + + # These pairs happen to be known to have different hashes. + # It may be naive to assume the hash value is always the same (we use + # an external library and it depends on its internal details). If + # it turns out that this assumption isn't always held, we should + # disable this test. + self.assertNotEqual(hash(Name('example.com')), + hash(Name('example.org'))) + + # Check insensitiveness for the case of inequality. + # Based on the assumption above, this 'if' should be true and + # we'll always test the case inside it. We'll still keep the if in + # case we end up disabling the above test. + if hash(Name('example.com')) != hash(Name('example.org')): + self.assertNotEqual(hash(Name('example.com')), + hash(Name('EXAMPLE.ORG'))) + if __name__ == '__main__': unittest.main() diff --git a/src/lib/dns/python/tests/rrclass_python_test.py b/src/lib/dns/python/tests/rrclass_python_test.py index 38d8c8c34d..a048c4c0f3 100644 --- a/src/lib/dns/python/tests/rrclass_python_test.py +++ b/src/lib/dns/python/tests/rrclass_python_test.py @@ -78,6 +78,14 @@ class RRClassTest(unittest.TestCase): self.assertTrue(self.c1 <= self.c2) self.assertFalse(self.c1 != other_rrclass) + def test_hash(self): + # Exploiting the knowledge that the hash value is the numeric class + # value, we can predict the comparison result. + self.assertEqual(hash(RRClass.IN()), hash(RRClass("IN"))) + self.assertEqual(hash(RRClass("in")), hash(RRClass("IN"))) + self.assertNotEqual(hash(RRClass.IN()), hash(RRClass.CH())) + self.assertNotEqual(hash(RRClass.IN()), hash(RRClass("CLASS65535"))) + def test_statics(self): self.assertEqual(RRClass.IN(), RRClass("IN")) self.assertEqual(RRClass.CH(), RRClass("CH")) diff --git a/src/lib/dns/python/tests/rrset_python_test.py b/src/lib/dns/python/tests/rrset_python_test.py index de475a766d..0544872d34 100644 --- a/src/lib/dns/python/tests/rrset_python_test.py +++ b/src/lib/dns/python/tests/rrset_python_test.py @@ -30,6 +30,7 @@ class TestModuleSpec(unittest.TestCase): self.test_nsname = Name("ns.example.com") self.rrset_a = RRset(self.test_name, RRClass("IN"), RRType("A"), RRTTL(3600)) self.rrset_a_empty = RRset(self.test_name, RRClass("IN"), RRType("A"), RRTTL(3600)) + self.rrset_any_a_empty = RRset(self.test_name, RRClass("ANY"), RRType("A"), RRTTL(3600)) self.rrset_ns = RRset(self.test_domain, RRClass("IN"), RRType("NS"), RRTTL(86400)) self.rrset_ch_txt = RRset(self.test_domain, RRClass("CH"), RRType("TXT"), RRTTL(0)) self.MAX_RDATA_COUNT = 100 @@ -90,6 +91,9 @@ class TestModuleSpec(unittest.TestCase): self.assertRaises(EmptyRRset, self.rrset_a_empty.to_text) + self.assertEqual("test.example.com. 3600 ANY A\n", + self.rrset_any_a_empty.to_text()) + def test_to_wire_buffer(self): exp_buffer = bytearray(b'\x04test\x07example\x03com\x00\x00\x01\x00\x01\x00\x00\x0e\x10\x00\x04\xc0\x00\x02\x01\x04test\x07example\x03com\x00\x00\x01\x00\x01\x00\x00\x0e\x10\x00\x04\xc0\x00\x02\x02') buffer = bytearray() @@ -99,6 +103,11 @@ class TestModuleSpec(unittest.TestCase): self.assertRaises(EmptyRRset, self.rrset_a_empty.to_wire, buffer); self.assertRaises(TypeError, self.rrset_a.to_wire, 1) + exp_buffer = bytearray(b'\x04test\x07example\x03com\x00\x00\x01\x00\xff\x00\x00\x0e\x10\x00\x00') + buffer = bytearray() + self.rrset_any_a_empty.to_wire(buffer) + self.assertEqual(exp_buffer, buffer) + def test_to_wire_renderer(self): exp_buffer = bytearray(b'\x04test\x07example\x03com\x00\x00\x01\x00\x01\x00\x00\x0e\x10\x00\x04\xc0\x00\x02\x01\xc0\x00\x00\x01\x00\x01\x00\x00\x0e\x10\x00\x04\xc0\x00\x02\x02') mr = MessageRenderer() diff --git a/src/lib/dns/python/tests/testutil.py b/src/lib/dns/python/tests/testutil.py index 679f827888..6a1397f075 100644 --- a/src/lib/dns/python/tests/testutil.py +++ b/src/lib/dns/python/tests/testutil.py @@ -28,14 +28,14 @@ def read_wire_data(filename): data = bytes() for path in testdata_path.split(":"): try: - file = open(path + os.sep + filename, "r") - for line in file: - line = line.strip() - if line == "" or line.startswith("#"): - pass - else: - cur_data = bytes.fromhex(line) - data += cur_data + with open(path + os.sep + filename, "r") as f: + for line in f: + line = line.strip() + if line == "" or line.startswith("#"): + pass + else: + cur_data = bytes.fromhex(line) + data += cur_data return data except IOError: diff --git a/src/lib/dns/rdata.cc b/src/lib/dns/rdata.cc index c1ece52970..59a58871b8 100644 --- a/src/lib/dns/rdata.cc +++ b/src/lib/dns/rdata.cc @@ -119,7 +119,7 @@ Generic::Generic(isc::util::InputBuffer& buffer, size_t rdata_len) { impl_ = new GenericImpl(data); } -Generic::Generic(const string& rdata_string) { +Generic::Generic(const std::string& rdata_string) { istringstream iss(rdata_string); string unknown_mark; iss >> unknown_mark; diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc index 4eb72bcf0b..9ef887fa89 100644 --- a/src/lib/dns/rdata/any_255/tsig_250.cc +++ b/src/lib/dns/rdata/any_255/tsig_250.cc @@ -74,25 +74,28 @@ struct TSIG::TSIGImpl { /// \code

    | ] +/// message [-v | -h | -p | -d <dir> | ] /// /// It reads the message file and writes out two files of the same /// name in the current working directory (unless -d is used) but /// with extensions of .h and .cc, or .py if -p is used. /// /// -v causes it to print the version number and exit. -h prints a help -/// message (and exits). -p sets the output to python. -d will make +/// message (and exits). -p sets the output to python. -d <dir> will make /// it write the output file(s) to dir instead of current working /// directory @@ -119,9 +119,9 @@ currentTime() { /// \brief Create Header Sentinel /// -/// Given the name of a file, create an #ifdef sentinel name. The name is -/// ___, where is the name of the file, and is the -/// extension less the leading period. The sentinel will be upper-case. +/// Given the name of a file, create an \#ifdef sentinel name. The name is +/// ___, where <name> is the name of the file, and <ext> +/// is the extension less the leading period. The sentinel will be upper-case. /// /// \param file Filename object representing the file. /// diff --git a/src/lib/log/log_formatter.cc b/src/lib/log/log_formatter.cc index c728cb5dd3..9cd5cc77b4 100644 --- a/src/lib/log/log_formatter.cc +++ b/src/lib/log/log_formatter.cc @@ -17,6 +17,10 @@ #include +#ifdef ENABLE_LOGGER_CHECKS +#include +#endif + using namespace std; using namespace boost; @@ -59,6 +63,9 @@ checkExcessPlaceholders(string* message, unsigned int placeholder) { // but we can't at least for now because this function is called from // the Formatter's destructor. #ifdef ENABLE_LOGGER_CHECKS + // Also, make sure we print the message so we can identify which + // identifier has the problem. + cerr << "Message " << *message << endl; assert("Excess logger placeholders still exist in message" == NULL); #else message->append(" @@Excess logger placeholders still exist@@"); diff --git a/src/lib/log/log_formatter.h b/src/lib/log/log_formatter.h index fc6020308b..eebdb1a026 100644 --- a/src/lib/log/log_formatter.h +++ b/src/lib/log/log_formatter.h @@ -197,7 +197,9 @@ public: try { return (arg(boost::lexical_cast(value))); } catch (const boost::bad_lexical_cast& ex) { - + // The formatting of the log message got wrong, we don't want + // to output it. + deactivate(); // A bad_lexical_cast during a conversion to a string is // *extremely* unlikely to fail. However, there is nothing // in the documentation that rules it out, so we need to handle @@ -229,10 +231,35 @@ public: // occurrences of "%2" with 42. (Conversely, the sequence // .arg(42).arg("%1") would return "42 %1" - there are no recursive // replacements). - replacePlaceholder(message_, arg, ++nextPlaceholder_ ); + try { + replacePlaceholder(message_, arg, ++nextPlaceholder_ ); + } + catch (...) { + // Something went wrong here, the log message is broken, so + // we don't want to output it, nor we want to check all the + // placeholders were used (because they won't be). + deactivate(); + throw; + } } return (*this); } + + /// \brief Turn off the output of this logger. + /// + /// If the logger would output anything at the end, now it won't. + /// Also, this turns off the strict checking of placeholders, if + /// it is compiled in. + /// + /// The expected use is when there was an exception processing + /// the arguments for the message. + void deactivate() { + if (logger_) { + delete message_; + message_ = NULL; + logger_ = NULL; + } + } }; } diff --git a/src/lib/log/logger.cc b/src/lib/log/logger.cc index d10e9793f4..fef5627c8a 100644 --- a/src/lib/log/logger.cc +++ b/src/lib/log/logger.cc @@ -179,6 +179,13 @@ Logger::fatal(const isc::log::MessageID& ident) { } } +// Replace the interprocess synchronization object + +void +Logger::setInterprocessSync(isc::util::InterprocessSync* sync) { + getLoggerPtr()->setInterprocessSync(sync); +} + // Comparison (testing only) bool diff --git a/src/lib/log/logger.h b/src/lib/log/logger.h index 5715bc4165..6405488b47 100644 --- a/src/lib/log/logger.h +++ b/src/lib/log/logger.h @@ -25,6 +25,7 @@ #include #include +#include namespace isc { namespace log { @@ -98,6 +99,17 @@ public: {} }; +/// \brief Bad Interprocess Sync +/// +/// Exception thrown if a bad InterprocessSync object (such as NULL) is +/// used. +class BadInterprocessSync : public isc::Exception { +public: + BadInterprocessSync(const char* file, size_t line, const char* what) : + isc::Exception(file, line, what) + {} +}; + /// \brief Logger Class /// /// This class is the main class used for logging. Use comprises: @@ -237,6 +249,17 @@ public: /// \param ident Message identification. Formatter fatal(const MessageID& ident); + /// \brief Replace the interprocess synchronization object + /// + /// If this method is called with NULL as the argument, it throws a + /// BadInterprocessSync exception. + /// + /// \param sync The logger uses this synchronization object for + /// synchronizing output of log messages. It should be deletable and + /// the ownership is transferred to the logger. If NULL is passed, + /// a BadInterprocessSync exception is thrown. + void setInterprocessSync(isc::util::InterprocessSync* sync); + /// \brief Equality /// /// Check if two instances of this logger refer to the same stream. diff --git a/src/lib/log/logger_impl.cc b/src/lib/log/logger_impl.cc index 046da13c86..2d6c0f4a27 100644 --- a/src/lib/log/logger_impl.cc +++ b/src/lib/log/logger_impl.cc @@ -32,12 +32,14 @@ #include #include +#include // Note: as log4cplus and the BIND 10 logger have many concepts in common, and // thus many similar names, to disambiguate types we don't "use" the log4cplus // namespace: instead, all log4cplus types are explicitly qualified. using namespace std; +using namespace isc::util; namespace isc { namespace log { @@ -47,14 +49,17 @@ namespace log { // one compiler requires that all member variables be constructed before the // constructor is run, but log4cplus::Logger (the type of logger_) has no // default constructor. -LoggerImpl::LoggerImpl(const string& name) : name_(expandLoggerName(name)), - logger_(log4cplus::Logger::getInstance(name_)) +LoggerImpl::LoggerImpl(const string& name) : + name_(expandLoggerName(name)), + logger_(log4cplus::Logger::getInstance(name_)), + sync_(new InterprocessSyncFile("logger")) { } // Destructor. (Here because of virtual declaration.) LoggerImpl::~LoggerImpl() { + delete sync_; } // Set the severity for logging. @@ -102,8 +107,30 @@ LoggerImpl::lookupMessage(const MessageID& ident) { MessageDictionary::globalDictionary().getText(ident))); } +// Replace the interprocess synchronization object + +void +LoggerImpl::setInterprocessSync(isc::util::InterprocessSync* sync) { + if (sync == NULL) { + isc_throw(BadInterprocessSync, + "NULL was passed to setInterprocessSync()"); + } + + delete sync_; + sync_ = sync; +} + void LoggerImpl::outputRaw(const Severity& severity, const string& message) { + // Use an interprocess sync locker for mutual exclusion from other + // processes to avoid log messages getting interspersed. + + InterprocessSyncLocker locker(*sync_); + + if (!locker.lock()) { + LOG4CPLUS_ERROR(logger_, "Unable to lock logger lockfile"); + } + switch (severity) { case DEBUG: LOG4CPLUS_DEBUG(logger_, message); @@ -124,6 +151,10 @@ LoggerImpl::outputRaw(const Severity& severity, const string& message) { case FATAL: LOG4CPLUS_FATAL(logger_, message); } + + if (!locker.unlock()) { + LOG4CPLUS_ERROR(logger_, "Unable to unlock logger lockfile"); + } } } // namespace log diff --git a/src/lib/log/logger_impl.h b/src/lib/log/logger_impl.h index 90bd41a573..10d3db4b58 100644 --- a/src/lib/log/logger_impl.h +++ b/src/lib/log/logger_impl.h @@ -32,6 +32,8 @@ #include #include +#include + namespace isc { namespace log { @@ -167,6 +169,17 @@ public: /// This gets you the unformatted text of message for given ID. std::string* lookupMessage(const MessageID& id); + /// \brief Replace the interprocess synchronization object + /// + /// If this method is called with NULL as the argument, it throws a + /// BadInterprocessSync exception. + /// + /// \param sync The logger uses this synchronization object for + /// synchronizing output of log messages. It should be deletable and + /// the ownership is transferred to the logger implementation. + /// If NULL is passed, a BadInterprocessSync exception is thrown. + void setInterprocessSync(isc::util::InterprocessSync* sync); + /// \brief Equality /// /// Check if two instances of this logger refer to the same stream. @@ -178,8 +191,9 @@ public: } private: - std::string name_; ///< Full name of this logger - log4cplus::Logger logger_; ///< Underlying log4cplus logger + std::string name_; ///< Full name of this logger + log4cplus::Logger logger_; ///< Underlying log4cplus logger + isc::util::InterprocessSync* sync_; }; } // namespace log diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc index 8a8a36bbfc..8431c2ea53 100644 --- a/src/lib/log/logger_manager.cc +++ b/src/lib/log/logger_manager.cc @@ -28,6 +28,7 @@ #include #include #include +#include "util/interprocess_sync_null.h" using namespace std; @@ -148,6 +149,13 @@ LoggerManager::readLocalMessageFile(const char* file) { MessageDictionary& dictionary = MessageDictionary::globalDictionary(); MessageReader reader(&dictionary); + + // Turn off use of any lock files. This is because this logger can + // be used by standalone programs which may not have write access to + // the local state directory (to create lock files). So we switch to + // using a null interprocess sync object here. + logger.setInterprocessSync(new isc::util::InterprocessSyncNull("logger")); + try { logger.info(LOG_READING_LOCAL_FILE).arg(file); diff --git a/src/lib/log/logger_unittest_support.cc b/src/lib/log/logger_unittest_support.cc index a0969be6bc..4f02b077ea 100644 --- a/src/lib/log/logger_unittest_support.cc +++ b/src/lib/log/logger_unittest_support.cc @@ -160,6 +160,9 @@ void initLogger(isc::log::Severity severity, int dbglevel) { // Set the local message file const char* localfile = getenv("B10_LOGGER_LOCALMSG"); + // Set a directory for creating lockfiles when running tests + setenv("B10_LOCKFILE_DIR_FROM_BUILD", TOP_BUILDDIR, 1); + // Initialize logging initLogger(root, isc::log::DEBUG, isc::log::MAX_DEBUG_LEVEL, localfile); diff --git a/src/lib/log/message_dictionary.cc b/src/lib/log/message_dictionary.cc index deb82328fc..3bfc56cd82 100644 --- a/src/lib/log/message_dictionary.cc +++ b/src/lib/log/message_dictionary.cc @@ -29,7 +29,7 @@ MessageDictionary::~MessageDictionary() { // Add message and note if ID already exists bool -MessageDictionary::add(const string& ident, const string& text) { +MessageDictionary::add(const std::string& ident, const std::string& text) { Dictionary::iterator i = dictionary_.find(ident); bool not_found = (i == dictionary_.end()); if (not_found) { @@ -44,7 +44,7 @@ MessageDictionary::add(const string& ident, const string& text) { // Add message and note if ID does not already exist bool -MessageDictionary::replace(const string& ident, const string& text) { +MessageDictionary::replace(const std::string& ident, const std::string& text) { Dictionary::iterator i = dictionary_.find(ident); bool found = (i != dictionary_.end()); if (found) { @@ -87,7 +87,7 @@ MessageDictionary::load(const char* messages[]) { // output. const string& -MessageDictionary::getText(const string& ident) const { +MessageDictionary::getText(const std::string& ident) const { static const string empty(""); Dictionary::const_iterator i = dictionary_.find(ident); if (i == dictionary_.end()) { diff --git a/src/lib/log/message_exception.h b/src/lib/log/message_exception.h index cd6caf210e..8b9d58a060 100644 --- a/src/lib/log/message_exception.h +++ b/src/lib/log/message_exception.h @@ -38,6 +38,9 @@ public: /// \brief Constructor /// + /// \param file Filename where the exception occurred. + /// \param line Line where exception occurred. + /// \param what Text description of the problem. /// \param id Message identification. /// \param lineno Line number on which error occurred (if > 0). MessageException(const char* file, size_t line, const char* what, @@ -51,6 +54,9 @@ public: /// \brief Constructor /// + /// \param file Filename where the exception occurred. + /// \param line Line where exception occurred. + /// \param what Text description of the problem. /// \param id Message identification. /// \param arg1 First message argument. /// \param lineno Line number on which error occurred (if > 0). @@ -66,6 +72,9 @@ public: /// \brief Constructor /// + /// \param file Filename where the exception occurred. + /// \param line Line where exception occurred. + /// \param what Text description of the problem. /// \param id Message identification. /// \param arg1 First message argument. /// \param arg2 Second message argument. diff --git a/src/lib/log/tests/.gitignore b/src/lib/log/tests/.gitignore index 41b863bbef..b0e45b9150 100644 --- a/src/lib/log/tests/.gitignore +++ b/src/lib/log/tests/.gitignore @@ -6,6 +6,10 @@ /initializer_unittests_2 /local_file_test.sh /logger_example +/logger_lock_test +/logger_lock_test.sh +/log_test_messages.cc +/log_test_messages.h /run_unittests /severity_test.sh /tempdir.h diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am index 6f3d7686dd..3212daaa2e 100644 --- a/src/lib/log/tests/Makefile.am +++ b/src/lib/log/tests/Makefile.am @@ -12,6 +12,13 @@ endif CLEANFILES = *.gcno *.gcda +EXTRA_DIST = log_test_messages.mes +BUILT_SOURCES = log_test_messages.h log_test_messages.cc +log_test_messages.h log_test_messages.cc: log_test_messages.mes + $(AM_V_GEN) $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/log/tests/log_test_messages.mes + +CLEANFILES += log_test_messages.h log_test_messages.cc + noinst_PROGRAMS = logger_example logger_example_SOURCES = logger_example.cc logger_example_CPPFLAGS = $(AM_CPPFLAGS) @@ -30,6 +37,19 @@ init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la init_logger_test_LDADD += $(AM_LDADD) $(LOG4CPLUS_LIBS) +noinst_PROGRAMS += logger_lock_test +logger_lock_test_SOURCES = logger_lock_test.cc +nodist_logger_lock_test_SOURCES = log_test_messages.cc log_test_messages.h +logger_lock_test_CPPFLAGS = $(AM_CPPFLAGS) +logger_lock_test_LDFLAGS = $(AM_LDFLAGS) +logger_lock_test_LDADD = $(top_builddir)/src/lib/log/liblog.la +logger_lock_test_LDADD += $(top_builddir)/src/lib/util/libutil.la +logger_lock_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la +logger_lock_test_LDADD += $(AM_LDADD) $(LOG4CPLUS_LIBS) + +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + if HAVE_GTEST TESTS = @@ -41,9 +61,9 @@ endif AM_CPPFLAGS += $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES) AM_LDFLAGS += $(GTEST_LDFLAGS) +AM_LDADD += $(top_builddir)/src/lib/util/libutil.la AM_LDADD += $(top_builddir)/src/lib/log/liblog.la AM_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la -AM_LDADD += $(top_builddir)/src/lib/util/libutil.la AM_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la AM_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la AM_LDADD += $(GTEST_LDADD) @@ -62,6 +82,7 @@ run_unittests_SOURCES += logger_specification_unittest.cc run_unittests_SOURCES += message_dictionary_unittest.cc run_unittests_SOURCES += message_reader_unittest.cc run_unittests_SOURCES += output_option_unittest.cc +nodist_run_unittests_SOURCES = log_test_messages.cc log_test_messages.h run_unittests_CPPFLAGS = $(AM_CPPFLAGS) run_unittests_CXXFLAGS = $(AM_CXXFLAGS) @@ -104,4 +125,5 @@ check-local: $(SHELL) $(abs_builddir)/destination_test.sh $(SHELL) $(abs_builddir)/init_logger_test.sh $(SHELL) $(abs_builddir)/local_file_test.sh + $(SHELL) $(abs_builddir)/logger_lock_test.sh $(SHELL) $(abs_builddir)/severity_test.sh diff --git a/src/lib/log/tests/log_formatter_unittest.cc b/src/lib/log/tests/log_formatter_unittest.cc index 83fc0629f8..435b200979 100644 --- a/src/lib/log/tests/log_formatter_unittest.cc +++ b/src/lib/log/tests/log_formatter_unittest.cc @@ -81,6 +81,14 @@ TEST_F(FormatterTest, stringArg) { } } +// Test the .deactivate() method +TEST_F(FormatterTest, deactivate) { + Formatter(isc::log::INFO, s("Text of message"), this).deactivate(); + // If there was no .deactivate, it should have output it. + // But not now. + ASSERT_EQ(0, outputs.size()); +} + // Can convert to string TEST_F(FormatterTest, intArg) { Formatter(isc::log::INFO, s("The answer is %1"), this).arg(42); @@ -117,15 +125,12 @@ TEST_F(FormatterTest, mismatchedPlaceholders) { arg("only one"); }, ".*"); - // Mixed case of above two: the exception will be thrown due to the missing - // placeholder, but before even it's caught the program will be aborted - // due to the unused placeholder as a result of the exception. - EXPECT_DEATH({ - isc::util::unittests::dontCreateCoreDumps(); - Formatter(isc::log::INFO, s("Missing the first %2"), this). - arg("missing").arg("argument"); - }, ".*"); #endif /* EXPECT_DEATH */ + // Mixed case of above two: the exception will be thrown due to the missing + // placeholder. The other check is disabled due to that. + EXPECT_THROW(Formatter(isc::log::INFO, s("Missing the first %2"), this). + arg("missing").arg("argument"), + isc::log::MismatchedPlaceholders); } #else diff --git a/src/lib/log/tests/log_test_messages.mes b/src/lib/log/tests/log_test_messages.mes new file mode 100644 index 0000000000..ed4940cd8f --- /dev/null +++ b/src/lib/log/tests/log_test_messages.mes @@ -0,0 +1,26 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +# \brief Message Utility Message File +# +# This is the source of the set of messages generated by the message and +# logging components. The associated .h and .cc files are created by hand from +# this file though and are not built during the build process; this is to avoid +# the chicken-and-egg situation where we need the files to build the message +# compiler, yet we need the compiler to build the files. + +$NAMESPACE isc::log + +% LOG_LOCK_TEST_MESSAGE this is a test message. +This is a log message used in testing. diff --git a/src/lib/log/tests/logger_example.cc b/src/lib/log/tests/logger_example.cc index d3f08f37d5..853d48a73f 100644 --- a/src/lib/log/tests/logger_example.cc +++ b/src/lib/log/tests/logger_example.cc @@ -41,6 +41,7 @@ // Include a set of message definitions. #include +#include "util/interprocess_sync_null.h" using namespace isc::log; using namespace std; @@ -280,10 +281,17 @@ int main(int argc, char** argv) { LoggerManager::readLocalMessageFile(argv[optind]); } - // Log a few messages to different loggers. + // Log a few messages to different loggers. Here, we switch to using + // null interprocess sync objects for the loggers below as the + // logger example can be used as a standalone program (which may not + // have write access to a local state directory to create + // lockfiles). isc::log::Logger logger_ex(ROOT_NAME); + logger_ex.setInterprocessSync(new isc::util::InterprocessSyncNull("logger")); isc::log::Logger logger_alpha("alpha"); + logger_alpha.setInterprocessSync(new isc::util::InterprocessSyncNull("logger")); isc::log::Logger logger_beta("beta"); + logger_beta.setInterprocessSync(new isc::util::InterprocessSyncNull("logger")); LOG_FATAL(logger_ex, LOG_WRITE_ERROR).arg("test1").arg("42"); LOG_ERROR(logger_ex, LOG_READING_LOCAL_FILE).arg("dummy/file"); diff --git a/src/lib/log/tests/logger_lock_test.cc b/src/lib/log/tests/logger_lock_test.cc new file mode 100644 index 0000000000..d63989c8fb --- /dev/null +++ b/src/lib/log/tests/logger_lock_test.cc @@ -0,0 +1,64 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include "util/interprocess_sync.h" +#include "log_test_messages.h" +#include + +using namespace std; +using namespace isc::log; + +class MockLoggingSync : public isc::util::InterprocessSync { +public: + /// \brief Constructor + MockLoggingSync(const std::string& component_name) : + InterprocessSync(component_name) + {} + +protected: + virtual bool lock() { + cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: LOCK\n"; + return (true); + } + + virtual bool tryLock() { + cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: TRYLOCK\n"; + return (true); + } + + virtual bool unlock() { + cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: UNLOCK\n"; + return (true); + } +}; + +/// \brief Test logger lock sequence +/// +/// A program used in testing the logger. It verifies that (1) an +/// interprocess sync lock is first acquired by the logger, (2) the +/// message is logged by the logger, and (3) the lock is released in +/// that sequence. +int +main(int, char**) { + initLogger(); + Logger logger("log"); + logger.setInterprocessSync(new MockLoggingSync("log")); + + LOG_INFO(logger, LOG_LOCK_TEST_MESSAGE); + + return (0); +} diff --git a/src/lib/log/tests/logger_lock_test.sh.in b/src/lib/log/tests/logger_lock_test.sh.in new file mode 100755 index 0000000000..0324499329 --- /dev/null +++ b/src/lib/log/tests/logger_lock_test.sh.in @@ -0,0 +1,46 @@ +#!/bin/sh +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +# Checks that the locker interprocess sync locks are acquired and +# released correctly. + +failcount=0 +tempfile=@abs_builddir@/logger_lock_test_tempfile_$$ +destfile=@abs_builddir@/logger_lock_test_destfile_$$ + +passfail() { + if [ $1 -eq 0 ]; then + echo " pass" + else + echo " FAIL" + failcount=`expr $failcount + $1` + fi +} + +echo -n "Testing that logger acquires and releases locks correctly:" +cat > $tempfile << . +LOGGER_LOCK_TEST: LOCK +INFO [bind10.log] LOG_LOCK_TEST_MESSAGE this is a test message. +LOGGER_LOCK_TEST: UNLOCK +. +rm -f $destfile +B10_LOGGER_SEVERITY=INFO B10_LOGGER_DESTINATION=stdout ./logger_lock_test > $destfile +cut -d' ' -f3- $destfile | diff $tempfile - +passfail $? + +# Tidy up. +rm -f $tempfile $destfile + +exit $failcount diff --git a/src/lib/log/tests/logger_unittest.cc b/src/lib/log/tests/logger_unittest.cc index 069205e1e0..a9330a9a64 100644 --- a/src/lib/log/tests/logger_unittest.cc +++ b/src/lib/log/tests/logger_unittest.cc @@ -23,6 +23,9 @@ #include #include #include +#include "log/tests/log_test_messages.h" + +#include using namespace isc; using namespace isc::log; @@ -379,3 +382,66 @@ TEST_F(LoggerTest, LoggerNameLength) { }, ".*"); #endif } + +TEST_F(LoggerTest, setInterprocessSync) { + // Create a logger + Logger logger("alpha"); + + EXPECT_THROW(logger.setInterprocessSync(NULL), BadInterprocessSync); +} + +class MockSync : public isc::util::InterprocessSync { +public: + /// \brief Constructor + MockSync(const std::string& component_name) : + InterprocessSync(component_name), was_locked_(false), + was_unlocked_(false) + {} + + bool wasLocked() const { + return (was_locked_); + } + + bool wasUnlocked() const { + return (was_unlocked_); + } + +protected: + bool lock() { + was_locked_ = true; + return (true); + } + + bool tryLock() { + return (true); + } + + bool unlock() { + was_unlocked_ = true; + return (true); + } + +private: + bool was_locked_; + bool was_unlocked_; +}; + +// Checks that the logger logs exclusively and other BIND 10 components +// are locked out. + +TEST_F(LoggerTest, Lock) { + // Create a logger + Logger logger("alpha"); + + // Setup our own mock sync object so that we can intercept the lock + // call and check if a lock has been taken. + MockSync* sync = new MockSync("logger"); + logger.setInterprocessSync(sync); + + // Log a message and put things into play. + logger.setSeverity(isc::log::INFO, 100); + logger.info(LOG_LOCK_TEST_MESSAGE); + + EXPECT_TRUE(sync->wasLocked()); + EXPECT_TRUE(sync->wasUnlocked()); +} diff --git a/src/lib/log/tests/message_dictionary_unittest.cc b/src/lib/log/tests/message_dictionary_unittest.cc index 394fea0a73..b8bded364c 100644 --- a/src/lib/log/tests/message_dictionary_unittest.cc +++ b/src/lib/log/tests/message_dictionary_unittest.cc @@ -28,16 +28,17 @@ using namespace std; // global dictionary is loaded, the former should be marked as a duplicate // and the latter should be present. -static const char* values[] = { - "LOG_DUPLICATE_NAMESPACE", "duplicate $NAMESPACE directive found", +namespace { +const char* values[] = { + // This message for DUPLICATE_NAMESPACE must be copied from + // ../log_messages.mes; otherwise logger check might fail. + "LOG_DUPLICATE_NAMESPACE", "line %1: duplicate $NAMESPACE directive found", "NEWSYM", "new symbol added", NULL }; MessageInitializer init(values); - - - +} class MessageDictionaryTest : public ::testing::Test { protected: diff --git a/src/lib/log/tests/run_initializer_unittests.cc b/src/lib/log/tests/run_initializer_unittests.cc index 54ee120a80..6660215678 100644 --- a/src/lib/log/tests/run_initializer_unittests.cc +++ b/src/lib/log/tests/run_initializer_unittests.cc @@ -12,6 +12,7 @@ // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR // PERFORMANCE OF THIS SOFTWARE. +#include #include #include diff --git a/src/lib/log/tests/run_unittests.cc b/src/lib/log/tests/run_unittests.cc index 8a9d1e5821..019a548a76 100644 --- a/src/lib/log/tests/run_unittests.cc +++ b/src/lib/log/tests/run_unittests.cc @@ -12,6 +12,7 @@ // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR // PERFORMANCE OF THIS SOFTWARE. +#include #include #include diff --git a/src/lib/nsas/nsas_messages.mes b/src/lib/nsas/nsas_messages.mes index 6c35172f85..0b19a9e404 100644 --- a/src/lib/nsas/nsas_messages.mes +++ b/src/lib/nsas/nsas_messages.mes @@ -54,8 +54,8 @@ This message indicates an internal error in the NSAS. Please raise a bug report. % NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1 -A debug message output when a call is made to the NSAS (nameserver -address store - part of the resolver) to obtain the nameservers for +A debug message output when a call is made to the NSAS (nameserver +address store - part of the resolver) to obtain the nameservers for the specified zone. % NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms @@ -72,5 +72,5 @@ A NSAS (nameserver address store - part of the resolver) made a query for a resource record of a particular type and class, but instead received an answer with a different given type and class. -This message indicates an internal error in the NSAS. Please raise a +This message indicates an internal error in the NSAS. Please raise a bug report. diff --git a/src/lib/nsas/tests/Makefile.am b/src/lib/nsas/tests/Makefile.am index afd91f63e4..3557c82960 100644 --- a/src/lib/nsas/tests/Makefile.am +++ b/src/lib/nsas/tests/Makefile.am @@ -25,6 +25,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am index aef5dc3d93..80fd22214a 100644 --- a/src/lib/python/isc/Makefile.am +++ b/src/lib/python/isc/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10 -SUBDIRS += xfrin log_messages server_common +SUBDIRS += xfrin log_messages server_common ddns python_PYTHON = __init__.py diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py index ebdc07f547..688ccf5f85 100644 --- a/src/lib/python/isc/bind10/special_component.py +++ b/src/lib/python/isc/bind10/special_component.py @@ -37,6 +37,7 @@ class SockCreator(BaseComponent): BaseComponent.__init__(self, boss, kind) self.__creator = None self.__uid = boss.uid + self.__gid = boss.gid def _start_internal(self): self._boss.curproc = 'b10-sockcreator' @@ -45,6 +46,9 @@ class SockCreator(BaseComponent): self._boss.register_process(self.pid(), self) self._boss.set_creator(self.__creator) self._boss.log_started(self.pid()) + if self.__gid is not None: + logger.info(BIND10_SETGID, self.__gid) + posix.setgid(self.__gid) if self.__uid is not None: logger.info(BIND10_SETUID, self.__uid) posix.setuid(self.__uid) diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am index 658db1e1da..196a8b93c7 100644 --- a/src/lib/python/isc/bind10/tests/Makefile.am +++ b/src/lib/python/isc/bind10/tests/Makefile.am @@ -23,6 +23,7 @@ endif echo Running test: $$pytest ; \ $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py index ec0e8af70d..af529f867e 100644 --- a/src/lib/python/isc/bind10/tests/component_test.py +++ b/src/lib/python/isc/bind10/tests/component_test.py @@ -104,6 +104,8 @@ class ComponentTests(BossUtils, unittest.TestCase): self.__stop_process_params = None self.__start_simple_params = None # Pretending to be boss + self.gid = None + self.__gid_set = None self.uid = None self.__uid_set = None @@ -609,6 +611,9 @@ class ComponentTests(BossUtils, unittest.TestCase): self.assertTrue(process.killed) self.assertFalse(process.terminated) + def setgid(self, gid): + self.__gid_set = gid + def setuid(self, uid): self.__uid_set = uid @@ -637,7 +642,9 @@ class ComponentTests(BossUtils, unittest.TestCase): """ component = isc.bind10.special_component.SockCreator(None, self, 'needed', None) + orig_setgid = isc.bind10.special_component.posix.setgid orig_setuid = isc.bind10.special_component.posix.setuid + isc.bind10.special_component.posix.setgid = self.setgid isc.bind10.special_component.posix.setuid = self.setuid orig_creator = \ isc.bind10.special_component.isc.bind10.sockcreator.Creator @@ -645,18 +652,22 @@ class ComponentTests(BossUtils, unittest.TestCase): isc.bind10.special_component.isc.bind10.sockcreator.Creator = \ lambda path: self.FakeCreator() component.start() - # No uid set in boss, nothing called. + # No gid/uid set in boss, nothing called. + self.assertIsNone(self.__gid_set) self.assertIsNone(self.__uid_set) # Doesn't do anything, but doesn't crash component.stop() component.kill() component.kill(True) + self.gid = 4200 self.uid = 42 component = isc.bind10.special_component.SockCreator(None, self, 'needed', None) component.start() # This time, it get's called + self.assertEqual(4200, self.__gid_set) self.assertEqual(42, self.__uid_set) + isc.bind10.special_component.posix.setgid = orig_setgid isc.bind10.special_component.posix.setuid = orig_setuid isc.bind10.special_component.isc.bind10.sockcreator.Creator = \ orig_creator diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py index d97d21b7e8..f67781c6d3 100644 --- a/src/lib/python/isc/bind10/tests/sockcreator_test.py +++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py @@ -303,6 +303,7 @@ class WrapTests(unittest.TestCase): # Transfer the descriptor send_fd(t1.fileno(), p1.fileno()) + p1.close() p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM) # Now, pass some data trough the socket @@ -318,6 +319,14 @@ class WrapTests(unittest.TestCase): data = t1.recv(1) self.assertEqual(b'C', data) + # Explicitly close temporary socket pair as the Python + # interpreter expects it. It may not be 100% exception safe, + # but since this is only for tests we prefer brevity. + p1.close() + p2.close() + t1.close() + t2.close() + if __name__ == '__main__': isc.log.init("bind10") # FIXME Should this be needed? isc.log.resetUnitTestRootLogger() diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py index 9f9ce684c4..aa0547b25c 100644 --- a/src/lib/python/isc/config/cfgmgr.py +++ b/src/lib/python/isc/config/cfgmgr.py @@ -81,6 +81,7 @@ class ConfigManagerData: and stop loading the system. """ config = ConfigManagerData(data_path, file_name) + logger.info(CFGMGR_CONFIG_FILE, config.db_filename) file = None try: file = open(config.db_filename, 'r') @@ -166,7 +167,7 @@ class ConfigManagerData: i += 1 new_file_name = new_file_name + "." + str(i) if os.path.exists(old_file_name): - logger.info(CFGMGR_RENAMED_CONFIG_FILE, old_file_name, new_file_name) + logger.info(CFGMGR_BACKED_UP_CONFIG_FILE, old_file_name, new_file_name) os.rename(old_file_name, new_file_name) def __eq__(self, other): diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes index ad78be0be0..8701db3df3 100644 --- a/src/lib/python/isc/config/cfgmgr_messages.mes +++ b/src/lib/python/isc/config/cfgmgr_messages.mes @@ -20,6 +20,12 @@ An older version of the configuration database has been found, from which there was an automatic upgrade path to the current version. These changes are now applied, and no action from the administrator is necessary. +% CFGMGR_BACKED_UP_CONFIG_FILE Config file %1 was removed; a backup was made at %2 +BIND 10 has been started with the command to clear the configuration +file. The existing file has been backed up (moved) to the given file +name. A new configuration file will be created in the original location +when necessary. + % CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2 The configuration manager sent a configuration update to a module, but the module responded with an answer that could not be parsed. The answer @@ -31,6 +37,10 @@ assumed to have failed, and will not be stored. The configuration manager daemon was unable to connect to the messaging system. The most likely cause is that msgq is not running. +% CFGMGR_CONFIG_FILE Configuration manager starting with configuration file: %1 +The configuration manager is starting, reading and saving the configuration +settings to the shown file. + % CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1 There was a problem reading the persistent configuration data as stored on disk. The file may be corrupted, or it is of a version from where @@ -51,11 +61,6 @@ error is given. The most likely cause is that the system does not have write access to the configuration database file. The updated configuration is not stored. -% CFGMGR_RENAMED_CONFIG_FILE renamed configuration file %1 to %2, will create new %1 -BIND 10 has been started with the command to clear the configuration file. -The existing file is backed up to the given file name, so that data is not -immediately lost if this was done by accident. - % CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down There was a keyboard interrupt signal to stop the cfgmgr daemon. The daemon will now shut down. diff --git a/src/lib/python/isc/config/config_messages.mes b/src/lib/python/isc/config/config_messages.mes index 9e93ca347f..1fcf59738d 100644 --- a/src/lib/python/isc/config/config_messages.mes +++ b/src/lib/python/isc/config/config_messages.mes @@ -21,16 +21,16 @@ # have that at this moment. So when adding a message, make sure that # the name is not already used in src/lib/config/config_messages.mes -% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1 -There was a logging configuration update, but the internal validator -for logging configuration found that it contained errors. The errors -are shown, and the update is ignored. - % CONFIG_GET_FAILED error getting configuration from cfgmgr: %1 The configuration manager returned an error response when the module requested its configuration. The full error message answer from the configuration manager is appended to the log error. +% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1 +There was a logging configuration update, but the internal validator +for logging configuration found that it contained errors. The errors +are shown, and the update is ignored. + % CONFIG_SESSION_STOPPING_FAILED error sending stopping message: %1 There was a problem when sending a message signaling that the module using this CCSession is stopping. This message is sent so that the rest of the diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am index 6670ee7254..cb59e6fa18 100644 --- a/src/lib/python/isc/config/tests/Makefile.am +++ b/src/lib/python/isc/config/tests/Makefile.am @@ -22,6 +22,7 @@ endif echo Running test: $$pytest ; \ $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \ CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \ diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py index fc53d23221..bb2bcdac17 100644 --- a/src/lib/python/isc/config/tests/module_spec_test.py +++ b/src/lib/python/isc/config/tests/module_spec_test.py @@ -46,8 +46,8 @@ class TestModuleSpec(unittest.TestCase): self.spec1(dd) def test_open_file_obj(self): - file1 = open(self.spec_file("spec1.spec")) - dd = isc.config.module_spec_from_file(file1) + with open(self.spec_file("spec1.spec")) as file1: + dd = isc.config.module_spec_from_file(file1) self.spec1(dd) def test_open_bad_file_obj(self): @@ -89,8 +89,8 @@ class TestModuleSpec(unittest.TestCase): def validate_data(self, specfile_name, datafile_name): dd = self.read_spec_file(specfile_name); - data_file = open(self.spec_file(datafile_name)) - data_str = data_file.read() + with open(self.spec_file(datafile_name)) as data_file: + data_str = data_file.read() data = isc.cc.data.parse_value_str(data_str) return dd.validate_config(True, data) @@ -109,8 +109,8 @@ class TestModuleSpec(unittest.TestCase): def validate_command_params(self, specfile_name, datafile_name, cmd_name): dd = self.read_spec_file(specfile_name); - data_file = open(self.spec_file(datafile_name)) - data_str = data_file.read() + with open(self.spec_file(datafile_name)) as data_file: + data_str = data_file.read() params = isc.cc.data.parse_value_str(data_str) return dd.validate_command(cmd_name, params) @@ -131,8 +131,8 @@ class TestModuleSpec(unittest.TestCase): def test_statistics_validation(self): def _validate_stat(specfile_name, datafile_name): dd = self.read_spec_file(specfile_name); - data_file = open(self.spec_file(datafile_name)) - data_str = data_file.read() + with open(self.spec_file(datafile_name)) as data_file: + data_str = data_file.read() data = isc.cc.data.parse_value_str(data_str) return dd.validate_statistics(True, data, []) self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None)); diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc index 7caa144fa6..467c6adf67 100644 --- a/src/lib/python/isc/datasrc/finder_inc.cc +++ b/src/lib/python/isc/datasrc/finder_inc.cc @@ -225,22 +225,4 @@ In the second element a single RRset is returned for cases where the\n\ result is some kind of delegation, CNAME or similar; in other cases\n\ a list of RRsets is returned, containing all the results.\n\ "; - -const char* const ZoneFinder_find_previous_name_doc = "\ -find_previous_name(isc.dns.Name) -> isc.dns.Name\n\ -\n\ -Gets the previous name in the DNSSEC order. This can be used\n\ -to find the correct NSEC records for proving nonexistence\n\ -of domains.\n\ -\n\ -This method does not include under-zone-cut data (glue data).\n\ -\n\ -Raises isc.datasrc.NotImplemented in case the data source backend\n\ -doesn't support DNSSEC or there is no previous in the zone (NSEC\n\ -records might be missing in the DB, the queried name is less or\n\ -equal to the apex).\n\ -\n\ -Raises isc.datasrc.Error for low-level or internal datasource errors\n\ -(like broken connection to database, wrong data living there).\n\ -"; } // unnamed namespace diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc index ed05fdb21b..1b0e3d1ca9 100644 --- a/src/lib/python/isc/datasrc/finder_python.cc +++ b/src/lib/python/isc/datasrc/finder_python.cc @@ -147,7 +147,8 @@ PyObject* ZoneFinder_helper_all(ZoneFinder* finder, PyObject* args) { // increases the refcount and the container decreases it // later. This way, it feels safer in case the build function // would fail. - return (Py_BuildValue("IO", r, list_container.get())); + return (Py_BuildValue("IOI", r, list_container.get(), + result_flags)); } else { if (rrsp) { // Use N instead of O so the refcount isn't increased twice @@ -253,31 +254,6 @@ ZoneFinder_find_all(PyObject* po_self, PyObject* args) { args)); } -PyObject* -ZoneFinder_findPreviousName(PyObject* po_self, PyObject* args) { - s_ZoneFinder* const self = static_cast(po_self); - PyObject* name_obj; - if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) { - try { - return (createNameObject( - self->cppobj->findPreviousName(PyName_ToName(name_obj)))); - } catch (const isc::NotImplemented& nie) { - PyErr_SetString(getDataSourceException("NotImplemented"), - nie.what()); - return (NULL); - } catch (const std::exception& exc) { - PyErr_SetString(getDataSourceException("Error"), exc.what()); - return (NULL); - } catch (...) { - PyErr_SetString(getDataSourceException("Error"), - "Unexpected exception"); - return (NULL); - } - } else { - return (NULL); - } -} - // This list contains the actual set of functions we have in // python. Each entry has // 1. Python method name @@ -290,8 +266,6 @@ PyMethodDef ZoneFinder_methods[] = { { "get_class", ZoneFinder_getClass, METH_NOARGS, ZoneFinder_getClass_doc }, { "find", ZoneFinder_find, METH_VARARGS, ZoneFinder_find_doc }, { "find_all", ZoneFinder_find_all, METH_VARARGS, ZoneFinder_findAll_doc }, - { "find_previous_name", ZoneFinder_findPreviousName, METH_VARARGS, - ZoneFinder_find_previous_name_doc }, { NULL, NULL, 0, NULL } }; diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py index c7bf6b4b50..74f822f61a 100644 --- a/src/lib/python/isc/datasrc/tests/datasrc_test.py +++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py @@ -83,7 +83,7 @@ def test_findall_common(self, tested): # A success. It should return the list now. # This also tests we can ommit the options parameter - result, rrsets = tested.find_all(isc.dns.Name("mix.example.com.")) + result, rrsets, _ = tested.find_all(isc.dns.Name("mix.example.com.")) self.assertEqual(ZoneFinder.SUCCESS, result) self.assertEqual(2, len(rrsets)) rrsets.sort(key=lambda rrset: rrset.get_type().to_text()) @@ -426,25 +426,6 @@ class DataSrcClient(unittest.TestCase): isc.dns.RRType.A(), "foo") - def test_find_previous(self): - dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG) - - result, finder = dsc.find_zone(isc.dns.Name("example.com")) - self.assertEqual(finder.SUCCESS, result) - - prev = finder.find_previous_name(isc.dns.Name("bbb.example.com")) - self.assertEqual("example.com.", prev.to_text()) - - prev = finder.find_previous_name(isc.dns.Name("zzz.example.com")) - self.assertEqual("www.example.com.", prev.to_text()) - - prev = finder.find_previous_name(prev) - self.assertEqual("*.wild.example.com.", prev.to_text()) - - self.assertRaises(isc.datasrc.NotImplemented, - finder.find_previous_name, - isc.dns.Name("com")) - class DataSrcUpdater(unittest.TestCase): def setUp(self): diff --git a/src/lib/python/isc/ddns/Makefile.am b/src/lib/python/isc/ddns/Makefile.am new file mode 100644 index 0000000000..1b9b6df960 --- /dev/null +++ b/src/lib/python/isc/ddns/Makefile.am @@ -0,0 +1,23 @@ +SUBDIRS = . tests + +python_PYTHON = __init__.py session.py logger.py zone_config.py +BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py +nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py +pylogmessagedir = $(pyexecdir)/isc/log_messages/ + +EXTRA_DIST = libddns_messages.mes + +CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py +CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.pyc + +# Define rule to build logging source files from message file +$(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py: libddns_messages.mes + $(top_builddir)/src/lib/log/compiler/message \ + -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libddns_messages.mes + +pythondir = $(pyexecdir)/isc/ddns + +CLEANDIRS = __pycache__ + +clean-local: + rm -rf $(CLEANDIRS) diff --git a/src/lib/python/isc/ddns/__init__.py b/src/lib/python/isc/ddns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/lib/python/isc/ddns/libddns_messages.mes b/src/lib/python/isc/ddns/libddns_messages.mes new file mode 100644 index 0000000000..7e34e70c18 --- /dev/null +++ b/src/lib/python/isc/ddns/libddns_messages.mes @@ -0,0 +1,214 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +# No namespace declaration - these constants go in the global namespace +# of the libddns_messages python module. + +% LIBDDNS_DATASRC_ERROR update client %1 failed due to data source error: %2 +An update attempt failed due to some error in the corresponding data +source. This is generally an unexpected event, but can still happen +for various reasons such as DB lock contention or a failure of the +backend DB server. The cause of the error is also logged. It's +advisable to check the message, and, if necessary, take an appropriate +action (e.g., restarting the DB server if it dies). If this message +is logged the data source isn't modified due to the +corresponding update request. When used by the b10-ddns, the server +will return a response with an RCODE of SERVFAIL. + +% LIBDDNS_PREREQ_FORMERR update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL. +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it has a non-zero TTL value. +A FORMERR error response is sent to the client. + +% LIBDDNS_PREREQ_FORMERR_ANY update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found. +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. + +% LIBDDNS_PREREQ_FORMERR_CLASS update client %1 for zone %2: Format error in prerequisite (%3). Bad class. +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, the class of the +prerequisite should either match the class of the zone in the Zone Section, +or it should be ANY or NONE, and it is not. A FORMERR error response is sent +to the client. + +% LIBDDNS_PREREQ_FORMERR_NONE update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found. +The prerequisite with the given name, class and type is not well-formed. +The specific prerequisite is shown. In this case, it either has a non-zero +TTL value, or has rdata fields. A FORMERR error response is sent to the client. + +% LIBDDNS_PREREQ_NAME_IN_USE_FAILED update client %1 for zone %2: 'Name is in use' prerequisite not satisfied (%3), rcode: %4 +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is in use'. From RFC2136: +Name is in use. At least one RR with a specified NAME (in +the zone and class specified by the Zone Section) must exist. +Note that this prerequisite is NOT satisfied by empty +nonterminals. + +% LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED update client %1 for zone %2: 'Name is not in use' (%3) prerequisite not satisfied, rcode: %4 +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'Name is not in use'. +From RFC2136: +Name is not in use. No RR of any type is owned by a +specified NAME. Note that this prerequisite IS satisfied by +empty nonterminals. + +% LIBDDNS_PREREQ_NOTZONE update client %1 for zone %2: prerequisite not in zone (%3) +A DDNS UPDATE prerequisite has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific prerequisite is shown. A NOTZONE error response is sent to +the client. + +% LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED update client %1 for zone %2: 'RRset does not exist' (%3) prerequisite not satisfied, rcode: %4 +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset does not exist'. +From RFC2136: +RRset does not exist. No RRs with a specified NAME and TYPE +(in the zone and class denoted by the Zone Section) can exist. + +% LIBDDNS_PREREQ_RRSET_EXISTS_FAILED update client %1 for zone %2: 'RRset exists (value independent)' (%3) prerequisite not satisfied, rcode: %4 +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value independent)'. +From RFC2136: +RRset exists (value dependent). A set of RRs with a +specified NAME and TYPE exists and has the same members +with the same RDATAs as the RRset specified here in this +Section. + +% LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED update client %1 for zone %2: 'RRset exists (value dependent)' (%3) prerequisite not satisfied, rcode: %4 +A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that +was not satisfied is shown. The client is sent an error response with the +given rcode. +In this case, the specific prerequisite is 'RRset exists (value dependent)'. +From RFC2136: +RRset exists (value independent). At least one RR with a +specified NAME and TYPE (in the zone and class specified by +the Zone Section) must exist. + +% LIBDDNS_UPDATE_ADD_BAD_TYPE update client %1 for zone %2: update addition RR bad type: %3 +The Update section of a DDNS update message contains a statement +that tries to add a record of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_APPROVED update client %1 for zone %2 approved +Debug message. An update request was approved in terms of the zone's +update ACL. + +% LIBDDNS_UPDATE_BAD_CLASS update client %1 for zone %2: bad class in update RR: %3 +The Update section of a DDNS update message contains an RRset with +a bad class. The class of the update RRset must be either the same +as the class in the Zone Section, ANY, or NONE. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1 +An error occured while committing the DDNS update changes to the +datasource. The specific error is printed. A SERVFAIL response is sent +back to the client. + +% LIBDDNS_UPDATE_DELETE_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3 +The Update section of a DDNS update message contains a statement +that tries to delete an rrset of an invalid type. Most likely the +record has an RRType that is considered a 'meta' type, which +cannot be zone content data. The specific record is shown. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DELETE_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3 +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY update client %1 for zone %2: update deletion RR contains data %3 +The Update section of a DDNS update message contains a 'delete rrset' +statement with a non-empty RRset. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3 +The Update section of a DDNS update message contains a statement +that tries to delete one or more rrs of an invalid type. Most +likely the records have an RRType that is considered a 'meta' +type, which cannot be zone content data. The specific record is +shown. A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3 +The Update section of a DDNS update message contains a 'delete rrs' +statement with a non-zero TTL. This is not allowed by the protocol. +A FORMERR response is sent back to the client. + +% LIBDDNS_UPDATE_DENIED update client %1 for zone %2 denied +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will respond to the request with an RCODE of +REFUSED as described in Section 3.3 of RFC2136. + +% LIBDDNS_UPDATE_DROPPED update client %1 for zone %2 dropped +Informational message. An update request was denied because it was +rejected by the zone's update ACL. When this library is used by +b10-ddns, the server will then completely ignore the request; no +response will be sent. + +% LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3 +Debug message. An error is found in processing a dynamic update +request. This log message is used for general errors that are not +normally expected to happen. So, in general, it would mean some +problem in the client implementation or an interoperability issue +with this implementation. The client's address, the zone name and +class, and description of the error are logged. + +% LIBDDNS_UPDATE_FORWARD_FAIL update client %1 for zone %2: update forwarding not supported +Debug message. An update request is sent to a secondary server. This +is not necessarily invalid, but this implementation does not yet +support update forwarding as specified in Section 6 of RFC2136 and it +will simply return a response with an RCODE of NOTIMP to the client. +The client's address and the zone name/class are logged. + +% LIBDDNS_UPDATE_NOTAUTH update client %1 for zone %2: not authoritative for update zone +Debug message. An update request was received for a zone for which +the receiving server doesn't have authority. In theory this is an +unexpected event, but there are client implementations that could send +update requests carelessly, so it may not necessarily be so uncommon +in practice. If possible, you may want to check the implementation or +configuration of those clients to suppress the requests. As specified +in Section 3.1 of RFC2136, the receiving server will return a response +with an RCODE of NOTAUTH. + +% LIBDDNS_UPDATE_NOTZONE update client %1 for zone %2: update RR out of zone %3 +A DDNS UPDATE record has a name that does not appear to be inside +the zone specified in the Zone section of the UPDATE message. +The specific update record is shown. A NOTZONE error response is +sent to the client. + +% LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update client %1 for zone %2: result code %3 +The handling of the prerequisite section (RFC2136 Section 3.2) found +that one of the prerequisites was not satisfied. The result code +should give more information on what prerequisite type failed. +If the result code is FORMERR, the prerequisite section was not well-formed. +An error response with the given result code is sent back to the client. + +% LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION update client %1 for zone %2: uncaught exception while processing update section: %3 +An uncaught exception was encountered while processing the Update +section of a DDNS message. The specific exception is shown in the log message. +To make sure DDNS service is not interrupted, this problem is caught instead +of reraised; The update is aborted, and a SERVFAIL is sent back to the client. +This is most probably a bug in the DDNS code, but *could* be caused by +the data source. diff --git a/src/lib/python/isc/ddns/logger.py b/src/lib/python/isc/ddns/logger.py new file mode 100644 index 0000000000..0f95bd7273 --- /dev/null +++ b/src/lib/python/isc/ddns/logger.py @@ -0,0 +1,121 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" This is a logging utility module for other modules of the ddns library +package. + +""" + +import isc.log + +# The logger for this package +logger = isc.log.Logger('libddns') + +class ClientFormatter: + """A utility class to convert a client address to string. + + This class is constructed with a Python standard socket address tuple. + If it's 2-element tuple, it's assumed to be an IPv4 socket address + and will be converted to the form of ':(/key=)'. + If it's 4-element tuple, it's assumed to be an IPv6 socket address. + and will be converted to the form of '[]:(/key=)'. + The optional key= will be added if a TSIG record is given + on construction. tsig-key is the TSIG key name in that case. + + This class is designed to delay the conversion until it's explicitly + requested, so the conversion doesn't happen if the corresponding log + message is suppressed because of its log level (which is often the case + for debug messages). + + Note: this optimization comes with the cost of instantiating the + formatter object itself. It's not really clear which overhead is + heavier, and we may conclude it's actually better to just generate + the strings unconditionally. Alternatively, we can make the stored + address of this object replaceable so that this object can be reused. + Right now this is an open issue. + + """ + def __init__(self, addr, tsig_record=None): + self.__addr = addr + self.__tsig_record = tsig_record + + def __format_addr(self): + if len(self.__addr) == 2: + return self.__addr[0] + ':' + str(self.__addr[1]) + elif len(self.__addr) == 4: + return '[' + self.__addr[0] + ']:' + str(self.__addr[1]) + return None + + def __str__(self): + format = self.__format_addr() + if format is not None and self.__tsig_record is not None: + format += '/key=' + self.__tsig_record.get_name().to_text(True) + return format + +class ZoneFormatter: + """A utility class to convert zone name and class to string. + + This class is constructed with a name of a zone (isc.dns.Name object) + and its RR class (isc.dns.RRClass object). Its text conversion method + (__str__) converts them into a string in the form of + '/' where the trailing dot of the zone name + is omitted. + + If the given zone name on construction is None, it's assumed to be + the zone isn't identified but needs to be somehow logged. The conversion + method returns a special string to indicate this case. + + This class is designed to delay the conversion until it's explicitly + requested, so the conversion doesn't happen if the corresponding log + message is suppressed because of its log level (which is often the case + for debug messages). + + See the note for the ClientFormatter class about overhead tradeoff. + This class shares the same discussion. + + """ + def __init__(self, zname, zclass): + self.__zname = zname + self.__zclass = zclass + + def __str__(self): + if self.__zname is None: + return '(zone unknown/not determined)' + return self.__zname.to_text(True) + '/' + self.__zclass.to_text() + +class RRsetFormatter: + """A utility class to convert rrsets to a short descriptive string. + + This class is constructed with an rrset (isc.dns.RRset object). + Its text conversion method (__str__) converts it into a string + with only the name, class and type of the rrset. + This is used in logging so that the RRset can be identified, without + being completely printed, which would result in an unnecessary + multi-line message. + + This class is designed to delay the conversion until it's explicitly + requested, so the conversion doesn't happen if the corresponding log + message is suppressed because of its log level. + + See the note for the ClientFormatter class about overhead tradeoff. + This class shares the same discussion. + """ + def __init__(self, rrset): + self.__rrset = rrset + + def __str__(self): + return self.__rrset.get_name().to_text() + " " +\ + self.__rrset.get_class().to_text() + " " +\ + self.__rrset.get_type().to_text() diff --git a/src/lib/python/isc/ddns/session.py b/src/lib/python/isc/ddns/session.py new file mode 100644 index 0000000000..366bc8b75f --- /dev/null +++ b/src/lib/python/isc/ddns/session.py @@ -0,0 +1,864 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from isc.dns import * +import isc.ddns.zone_config +from isc.log import * +from isc.ddns.logger import logger, ClientFormatter, ZoneFormatter,\ + RRsetFormatter +from isc.log_messages.libddns_messages import * +from isc.datasrc import ZoneFinder +import isc.xfrin.diff +from isc.acl.acl import ACCEPT, REJECT, DROP +import copy + +# Result codes for UpdateSession.handle() +UPDATE_SUCCESS = 0 +UPDATE_ERROR = 1 +UPDATE_DROP = 2 + +# Convenient aliases of update-specific section names +SECTION_ZONE = Message.SECTION_QUESTION +SECTION_PREREQUISITE = Message.SECTION_ANSWER +SECTION_UPDATE = Message.SECTION_AUTHORITY + +# Shortcut +DBGLVL_TRACE_BASIC = logger.DBGLVL_TRACE_BASIC + +class UpdateError(Exception): + '''Exception for general error in update request handling. + + This exception is intended to be used internally within this module. + When UpdateSession.handle() encounters an error in handling an update + request it can raise this exception to terminate the handling. + + This class is constructed with some information that may be useful for + subsequent possible logging: + - msg (string) A string explaining the error. + - zname (isc.dns.Name) The zone name. Can be None when not identified. + - zclass (isc.dns.RRClass) The zone class. Like zname, can be None. + - rcode (isc.dns.RCode or None) The RCODE to be set in the response + message; this can be None if the response is not expected to be sent. + - nolog (bool) If True, it indicates there's no more need for logging. + + ''' + def __init__(self, msg, zname, zclass, rcode, nolog=False): + Exception.__init__(self, msg) + self.zname = zname + self.zclass = zclass + self.rcode = rcode + self.nolog = nolog + +def foreach_rr(rrset): + ''' + Generator that creates a new RRset with one RR from + the given RRset upon each iteration, usable in calls that + need to loop over an RRset and perform an action with each + of the individual RRs in it. + Example: + for rr in foreach_rr(rrset): + print(str(rr)) + ''' + for rdata in rrset.get_rdata(): + rr = isc.dns.RRset(rrset.get_name(), + rrset.get_class(), + rrset.get_type(), + rrset.get_ttl()) + rr.add_rdata(rdata) + yield rr + +def convert_rrset_class(rrset, rrclass): + '''Returns a (new) rrset with the data from the given rrset, + but of the given class. Useful to convert from NONE and ANY to + a real class. + Note that the caller should be careful what to convert; + and DNS error that could happen during wire-format reading + could technically occur here, and is not caught by this helper. + ''' + new_rrset = isc.dns.RRset(rrset.get_name(), rrclass, + rrset.get_type(), rrset.get_ttl()) + for rdata in rrset.get_rdata(): + # Rdata class is nof modifiable, and must match rrset's + # class, so we need to to some ugly conversion here. + # And we cannot use to_text() (since the class may be unknown) + wire = rdata.to_wire(bytes()) + new_rrset.add_rdata(isc.dns.Rdata(rrset.get_type(), rrclass, wire)) + return new_rrset + +def collect_rrsets(collection, rrset): + ''' + Helper function to collect similar rrsets. + Collect all rrsets with the same name, class, and type + collection is the currently collected list of RRsets, + rrset is the RRset to add; + if an RRset with the same name, class and type as the + given rrset exists in the collection, its rdata fields + are added to that RRset. Otherwise, the rrset is added + to the given collection. + TTL is ignored. + This method does not check rdata contents for duplicate + values. + + The collection and its rrsets are modified in-place, + this method does not return anything. + ''' + found = False + for existing_rrset in collection: + if existing_rrset.get_name() == rrset.get_name() and\ + existing_rrset.get_class() == rrset.get_class() and\ + existing_rrset.get_type() == rrset.get_type(): + for rdata in rrset.get_rdata(): + existing_rrset.add_rdata(rdata) + found = True + if not found: + collection.append(rrset) + +class DDNS_SOA: + '''Class to handle the SOA in the DNS update ''' + + def __get_serial_internal(self, origin_soa): + '''Get serial number from soa''' + return Serial(int(origin_soa.get_rdata()[0].to_text().split()[2])) + + def __write_soa_internal(self, origin_soa, soa_num): + '''Write back serial number to soa''' + new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(), + RRType.SOA(), origin_soa.get_ttl()) + soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split() + soa_rdata_parts[2] = str(soa_num.get_value()) + new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(), + " ".join(soa_rdata_parts))) + return new_soa + + def soa_update_check(self, origin_soa, new_soa): + '''Check whether the new soa is valid. If the serial number is bigger + than the old one, it is valid, then return True, otherwise, return + False. Make sure the origin_soa and new_soa parameters are not none + before invoke soa_update_check. + Parameters: + origin_soa, old SOA resource record. + new_soa, new SOA resource record. + Output: + if the serial number of new soa is bigger than the old one, return + True, otherwise return False. + ''' + old_serial = self.__get_serial_internal(origin_soa) + new_serial = self.__get_serial_internal(new_soa) + if(new_serial > old_serial): + return True + else: + return False + + def update_soa(self, origin_soa, inc_number = 1): + ''' Update the soa number incrementally as RFC 2136. Please make sure + that the origin_soa exists and not none before invoke this function. + Parameters: + origin_soa, the soa resource record which will be updated. + inc_number, the number which will be added into the serial number of + origin_soa, the default value is one. + Output: + The new origin soa whoes serial number has been updated. + ''' + soa_num = self.__get_serial_internal(origin_soa) + soa_num = soa_num + inc_number + if soa_num.get_value() == 0: + soa_num = soa_num + 1 + return self.__write_soa_internal(origin_soa, soa_num) + +class UpdateSession: + '''Protocol handling for a single dynamic update request. + + This class is instantiated with a request message and some other + information that will be used for handling the request. Its main + method, handle(), will process the request, and normally build + a response message according to the result. The application of this + class can use the message to send a response to the client. + + ''' + def __init__(self, req_message, client_addr, zone_config): + '''Constructor. + + Parameters: + - req_message (isc.dns.Message) The request message. This must be + in the PARSE mode, its Opcode must be UPDATE, and must have been + TSIG validatd if it's TSIG signed. + - client_addr (socket address) The address/port of the update client + in the form of Python socket address object. This is mainly for + logging and access control. + - zone_config (ZoneConfig) A tentative container that encapsulates + the server's zone configuration. See zone_config.py. + - req_data (binary) Wire format data of the request message. + It will be used for TSIG verification if necessary. + + ''' + self.__message = req_message + self.__tsig = req_message.get_tsig_record() + self.__client_addr = client_addr + self.__zone_config = zone_config + self.__added_soa = None + + def get_message(self): + '''Return the update message. + + After handle() is called, it's generally transformed to the response + to be returned to the client. If the request has been dropped, + this method returns None. If this method is called before handle() + the return value would be identical to the request message passed on + construction, although it's of no practical use. + + ''' + return self.__message + + def handle(self): + '''Handle the update request according to RFC2136. + + This method returns a tuple of the following three elements that + indicate the result of the request. + - Result code of the request processing, which are: + UPDATE_SUCCESS Update request granted and succeeded. + UPDATE_ERROR Some error happened to be reported in the response. + UPDATE_DROP Error happened and no response should be sent. + Except the case of UPDATE_DROP, the UpdateSession object will have + created a response that is to be returned to the request client, + which can be retrieved by get_message(). If it's UPDATE_DROP, + subsequent call to get_message() returns None. + - The name of the updated zone (isc.dns.Name object) in case of + UPDATE_SUCCESS; otherwise None. + - The RR class of the updated zone (isc.dns.RRClass object) in case + of UPDATE_SUCCESS; otherwise None. + + ''' + try: + self._get_update_zone() + # Contrary to what RFC2136 specifies, we do ACL checks before + # prerequisites. It's now generally considered to be a bad + # idea, and actually does harm such as information + # leak. It should make more sense to prevent any security issues + # by performing ACL check as early as possible. + self.__check_update_acl(self.__zname, self.__zclass) + self._create_diff() + prereq_result = self.__check_prerequisites() + if prereq_result != Rcode.NOERROR(): + self.__make_response(prereq_result) + return UPDATE_ERROR, self.__zname, self.__zclass + update_result = self.__do_update() + if update_result != Rcode.NOERROR(): + self.__make_response(update_result) + return UPDATE_ERROR, self.__zname, self.__zclass + self.__make_response(Rcode.NOERROR()) + return UPDATE_SUCCESS, self.__zname, self.__zclass + except UpdateError as e: + if not e.nolog: + logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_ERROR, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(e.zname, e.zclass), e) + # If RCODE is specified, create a corresponding resonse and return + # ERROR; otherwise clear the message and return DROP. + if e.rcode is not None: + self.__make_response(e.rcode) + return UPDATE_ERROR, None, None + self.__message = None + return UPDATE_DROP, None, None + except isc.datasrc.Error as e: + logger.error(LIBDDNS_DATASRC_ERROR, + ClientFormatter(self.__client_addr, self.__tsig), e) + self.__make_response(Rcode.SERVFAIL()) + return UPDATE_ERROR, None, None + + def _get_update_zone(self): + '''Parse the zone section and find the zone to be updated. + + If the zone section is valid and the specified zone is found in + the configuration, sets private member variables for this session: + __datasrc_client: A matching data source that contains the specified + zone + __zname: The zone name as a Name object + __zclass: The zone class as an RRClass object + If this method raises an exception, these members are not set. + + Note: This method is protected for ease of use in tests, where + methods are tested that need the setup done here without calling + the full handle() method. + ''' + # Validation: the zone section must contain exactly one question, + # and it must be of type SOA. + n_zones = self.__message.get_rr_count(SECTION_ZONE) + if n_zones != 1: + raise UpdateError('Invalid number of records in zone section: ' + + str(n_zones), None, None, Rcode.FORMERR()) + zrecord = self.__message.get_question()[0] + if zrecord.get_type() != RRType.SOA(): + raise UpdateError('update zone section contains non-SOA', + None, None, Rcode.FORMERR()) + + # See if we're serving a primary zone specified in the zone section. + zname = zrecord.get_name() + zclass = zrecord.get_class() + zone_type, datasrc_client = self.__zone_config.find_zone(zname, zclass) + if zone_type == isc.ddns.zone_config.ZONE_PRIMARY: + self.__datasrc_client = datasrc_client + self.__zname = zname + self.__zclass = zclass + return + elif zone_type == isc.ddns.zone_config.ZONE_SECONDARY: + # We are a secondary server; since we don't yet support update + # forwarding, we return 'not implemented'. + logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_FORWARD_FAIL, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(zname, zclass)) + raise UpdateError('forward', zname, zclass, Rcode.NOTIMP(), True) + # zone wasn't found + logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_NOTAUTH, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(zname, zclass)) + raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH(), True) + + def _create_diff(self): + ''' + Initializes the internal data structure used for searching current + data and for adding and deleting data. This is supposed to be called + after ACL checks but before prerequisite checks (since the latter + needs the find calls provided by the Diff class). + Adds the private member: + __diff: A buffer of changes made against the zone by this update + This object also contains find() calls, see documentation + of the Diff class. + + Note: This method is protected for ease of use in tests, where + methods are tested that need the setup done here without calling + the full handle() method. + ''' + self.__diff = isc.xfrin.diff.Diff(self.__datasrc_client, + self.__zname, + journaling=True, + single_update_mode=True) + + def __check_update_acl(self, zname, zclass): + '''Apply update ACL for the zone to be updated.''' + acl = self.__zone_config.get_update_acl(zname, zclass) + action = acl.execute(isc.acl.dns.RequestContext( + (self.__client_addr[0], self.__client_addr[1]), self.__tsig)) + if action == REJECT: + logger.info(LIBDDNS_UPDATE_DENIED, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(zname, zclass)) + raise UpdateError('rejected', zname, zclass, Rcode.REFUSED(), True) + if action == DROP: + logger.info(LIBDDNS_UPDATE_DROPPED, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(zname, zclass)) + raise UpdateError('dropped', zname, zclass, None, True) + logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_APPROVED, + ClientFormatter(self.__client_addr, self.__tsig), + ZoneFormatter(zname, zclass)) + + def __make_response(self, rcode): + '''Transform the internal message to the update response. + + According RFC2136 Section 3.8, the zone section will be cleared + as well as other sections. The response Rcode will be set to the + given value. + + ''' + self.__message.make_response() + self.__message.clear_section(SECTION_ZONE) + self.__message.set_rcode(rcode) + + def __prereq_rrset_exists(self, rrset): + '''Check whether an rrset with the given name and type exists. Class, + TTL, and Rdata (if any) of the given RRset are ignored. + RFC2136 Section 2.4.1. + Returns True if the prerequisite is satisfied, False otherwise. + + Note: the only thing used in the call to find() here is the + result status. The actual data is immediately dropped. As + a future optimization, we may want to add a find() option to + only return what the result code would be (and not read/copy + any actual data). + ''' + result, _, _ = self.__diff.find(rrset.get_name(), rrset.get_type()) + return result == ZoneFinder.SUCCESS + + def __prereq_rrset_exists_value(self, rrset): + '''Check whether an rrset that matches name, type, and rdata(s) of the + given rrset exists. + RFC2136 Section 2.4.2 + Returns True if the prerequisite is satisfied, False otherwise. + ''' + result, found_rrset, _ = self.__diff.find(rrset.get_name(), + rrset.get_type()) + if result == ZoneFinder.SUCCESS and\ + rrset.get_name() == found_rrset.get_name() and\ + rrset.get_type() == found_rrset.get_type(): + # We need to match all actual RRs, unfortunately there is no + # direct order-independent comparison for rrsets, so this + # a slightly inefficient way to handle that. + + # shallow copy of the rdata list, so we are sure that this + # loop does not mess with actual data. + found_rdata = copy.copy(found_rrset.get_rdata()) + for rdata in rrset.get_rdata(): + if rdata in found_rdata: + found_rdata.remove(rdata) + else: + return False + return len(found_rdata) == 0 + return False + + def __prereq_rrset_does_not_exist(self, rrset): + '''Check whether no rrsets with the same name and type as the given + rrset exist. + RFC2136 Section 2.4.3. + Returns True if the prerequisite is satisfied, False otherwise. + ''' + return not self.__prereq_rrset_exists(rrset) + + def __prereq_name_in_use(self, rrset): + '''Check whether the name of the given RRset is in use (i.e. has + 1 or more RRs). + RFC2136 Section 2.4.4 + Returns True if the prerequisite is satisfied, False otherwise. + + Note: the only thing used in the call to find_all() here is + the result status. The actual data is immediately dropped. As + a future optimization, we may want to add a find_all() option + to only return what the result code would be (and not read/copy + any actual data). + ''' + result, rrsets, flags = self.__diff.find_all(rrset.get_name()) + if result == ZoneFinder.SUCCESS and\ + (flags & ZoneFinder.RESULT_WILDCARD == 0): + return True + return False + + def __prereq_name_not_in_use(self, rrset): + '''Check whether the name of the given RRset is not in use (i.e. does + not exist at all, or is an empty nonterminal. + RFC2136 Section 2.4.5. + Returns True if the prerequisite is satisfied, False otherwise. + ''' + return not self.__prereq_name_in_use(rrset) + + def __check_in_zone(self, rrset): + '''Returns true if the name of the given rrset is equal to + or a subdomain of the zname from the Zone Section.''' + relation = rrset.get_name().compare(self.__zname).get_relation() + return relation == NameComparisonResult.SUBDOMAIN or\ + relation == NameComparisonResult.EQUAL + + def __check_prerequisites(self): + '''Check the prerequisites section of the UPDATE Message. + RFC2136 Section 2.4. + Returns a dns Rcode signaling either no error (Rcode.NOERROR()) + or that one of the prerequisites failed (any other Rcode). + ''' + + # Temporary array to store exact-match RRsets + exact_match_rrsets = [] + + for rrset in self.__message.get_section(SECTION_PREREQUISITE): + # First check if the name is in the zone + if not self.__check_in_zone(rrset): + logger.info(LIBDDNS_PREREQ_NOTZONE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.NOTZONE() + + # Algorithm taken from RFC2136 Section 3.2 + if rrset.get_class() == RRClass.ANY(): + if rrset.get_ttl().get_value() != 0 or\ + rrset.get_rdata_count() != 0: + logger.info(LIBDDNS_PREREQ_FORMERR_ANY, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + elif rrset.get_type() == RRType.ANY(): + if not self.__prereq_name_in_use(rrset): + rcode = Rcode.NXDOMAIN() + logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset), rcode) + return rcode + else: + if not self.__prereq_rrset_exists(rrset): + rcode = Rcode.NXRRSET() + logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset), rcode) + return rcode + elif rrset.get_class() == RRClass.NONE(): + if rrset.get_ttl().get_value() != 0 or\ + rrset.get_rdata_count() != 0: + logger.info(LIBDDNS_PREREQ_FORMERR_NONE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + elif rrset.get_type() == RRType.ANY(): + if not self.__prereq_name_not_in_use(rrset): + rcode = Rcode.YXDOMAIN() + logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset), rcode) + return rcode + else: + if not self.__prereq_rrset_does_not_exist(rrset): + rcode = Rcode.YXRRSET() + logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset), rcode) + return rcode + elif rrset.get_class() == self.__zclass: + if rrset.get_ttl().get_value() != 0: + logger.info(LIBDDNS_PREREQ_FORMERR, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + else: + collect_rrsets(exact_match_rrsets, rrset) + else: + logger.info(LIBDDNS_PREREQ_FORMERR_CLASS, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + + for collected_rrset in exact_match_rrsets: + if not self.__prereq_rrset_exists_value(collected_rrset): + rcode = Rcode.NXRRSET() + logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(collected_rrset), rcode) + return rcode + + # All prerequisites are satisfied + return Rcode.NOERROR() + + def __set_soa_rrset(self, rrset): + '''Sets the given rrset to the member __added_soa (which + is used by __do_update for updating the SOA record''' + self.__added_soa = rrset + + def __do_prescan(self): + '''Perform the prescan as defined in RFC2136 section 3.4.1. + This method has a side-effect; it sets self._new_soa if + it encounters the addition of a SOA record in the update + list (so serial can be checked by update later, etc.). + It puts the added SOA in self.__added_soa. + ''' + for rrset in self.__message.get_section(SECTION_UPDATE): + if not self.__check_in_zone(rrset): + logger.info(LIBDDNS_UPDATE_NOTZONE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.NOTZONE() + if rrset.get_class() == self.__zclass: + # In fact, all metatypes are in a specific range, + # so one check can test TKEY to ANY + # (some value check is needed anyway, since we do + # not have defined RRtypes for MAILA and MAILB) + if rrset.get_type().get_code() >= 249: + logger.info(LIBDDNS_UPDATE_ADD_BAD_TYPE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + if rrset.get_type() == RRType.SOA(): + # In case there's multiple soa records in the update + # somehow, just take the last + for rr in foreach_rr(rrset): + self.__set_soa_rrset(rr) + elif rrset.get_class() == RRClass.ANY(): + if rrset.get_ttl().get_value() != 0: + logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + if rrset.get_rdata_count() > 0: + logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + if rrset.get_type().get_code() >= 249 and\ + rrset.get_type().get_code() <= 254: + logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + elif rrset.get_class() == RRClass.NONE(): + if rrset.get_ttl().get_value() != 0: + logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + if rrset.get_type().get_code() >= 249: + logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + else: + logger.info(LIBDDNS_UPDATE_BAD_CLASS, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + RRsetFormatter(rrset)) + return Rcode.FORMERR() + return Rcode.NOERROR() + + def __do_update_add_single_rr(self, rr, existing_rrset): + '''Helper for __do_update_add_rrs_to_rrset: only add the + rr if it is not present yet + (note that rr here should already be a single-rr rrset) + ''' + if existing_rrset is None: + self.__diff.add_data(rr) + else: + rr_rdata = rr.get_rdata()[0] + if not rr_rdata in existing_rrset.get_rdata(): + self.__diff.add_data(rr) + + def __do_update_add_rrs_to_rrset(self, rrset): + '''Add the rrs from the given rrset to the internal diff. + There is handling for a number of special cases mentioned + in RFC2136; + - If the addition is a CNAME, but existing data at its + name is not, the addition is ignored, and vice versa. + - If it is a CNAME, and existing data is too, it is + replaced (existing data is deleted) + An additional restriction is that SOA data is ignored as + well (it is handled separately by the __do_update method). + + Note that in the (near) future, this method may have + addition special-cases processing. + ''' + # For a number of cases, we may need to remove data in the zone + # (note; SOA is handled separately by __do_update, so that one + # is explicitely ignored here) + if rrset.get_type() == RRType.SOA(): + return + result, orig_rrset, _ = self.__diff.find(rrset.get_name(), + rrset.get_type()) + if result == ZoneFinder.CNAME: + # Ignore non-cname rrs that try to update CNAME records + # (if rrset itself is a CNAME, the finder result would be + # SUCCESS, see next case) + return + elif result == ZoneFinder.SUCCESS: + # if update is cname, and zone rr is not, ignore + if rrset.get_type() == RRType.CNAME(): + # Remove original CNAME record (the new one + # is added below) + self.__diff.delete_data(orig_rrset) + # We do not have WKS support at this time, but if there + # are special Update equality rules such as for WKS, and + # we do have support for the type, this is where the check + # (and potential delete) would go. + elif result == ZoneFinder.NXRRSET: + # There is data present, but not for this type. + # If this type is CNAME, ignore the update + if rrset.get_type() == RRType.CNAME(): + return + for rr in foreach_rr(rrset): + self.__do_update_add_single_rr(rr, orig_rrset) + + def __do_update_delete_rrset(self, rrset): + '''Deletes the rrset with the name and type of the given + rrset from the zone data (by putting all existing data + in the internal diff as delete statements). + Special cases: if the delete statement is for the + zone's apex, and the type is either SOA or NS, it + is ignored.''' + # find the rrset with local updates + result, to_delete, _ = self.__diff.find_updated(rrset.get_name(), + rrset.get_type()) + if result == ZoneFinder.SUCCESS: + if to_delete.get_name() == self.__zname and\ + (to_delete.get_type() == RRType.SOA() or\ + to_delete.get_type() == RRType.NS()): + # ignore + return + for rr in foreach_rr(to_delete): + self.__diff.delete_data(rr) + + def __ns_deleter_helper(self, rrset): + '''Special case helper for deleting NS resource records + at the zone apex. In that scenario, the last NS record + may never be removed (and any action that would do so + should be ignored). + ''' + # Find the current NS rrset, including local additions and deletions + result, orig_rrset, _ = self.__diff.find_updated(rrset.get_name(), + rrset.get_type()) + + # Even a real rrset comparison wouldn't help here... + # The goal is to make sure that after deletion of the + # given rrset, at least 1 NS record is left (at the apex). + # So we make a (shallow) copy of the existing rrset, + # and for each rdata in the to_delete set, we check if it wouldn't + # delete the last one. If it would, that specific one is ignored. + # If it would not, the rdata is removed from the temporary list + orig_rrset_rdata = copy.copy(orig_rrset.get_rdata()) + for rdata in rrset.get_rdata(): + if len(orig_rrset_rdata) == 1 and rdata == orig_rrset_rdata[0]: + # ignore + continue + else: + # create an individual RRset for deletion + to_delete = isc.dns.RRset(rrset.get_name(), + rrset.get_class(), + rrset.get_type(), + rrset.get_ttl()) + to_delete.add_rdata(rdata) + orig_rrset_rdata.remove(rdata) + self.__diff.delete_data(to_delete) + + def __do_update_delete_name(self, rrset): + '''Delete all data at the name of the given rrset, + by adding all data found by find_all as delete statements + to the internal diff. + Special case: if the name is the zone's apex, SOA and + NS records are kept. + ''' + # Find everything with the name, including local additions + result, rrsets, flags = self.__diff.find_all_updated(rrset.get_name()) + if result == ZoneFinder.SUCCESS and\ + (flags & ZoneFinder.RESULT_WILDCARD == 0): + for to_delete in rrsets: + # if name == self.__zname and type is soa or ns, don't delete! + if to_delete.get_name() == self.__zname and\ + (to_delete.get_type() == RRType.SOA() or + to_delete.get_type() == RRType.NS()): + continue + else: + for rr in foreach_rr(to_delete): + self.__diff.delete_data(rr) + + def __do_update_delete_rrs_from_rrset(self, rrset): + '''Deletes all resource records in the given rrset from the + zone. Resource records that do not exist are ignored. + If the rrset if of type SOA, it is ignored. + Uses the __ns_deleter_helper if the rrset's name is the + zone's apex, and the type is NS. + ''' + # Delete all rrs in the rrset, except if name=self.__zname and type=soa, or + # type = ns and there is only one left (...) + + # The delete does not want class NONE, we would not have gotten here + # if it wasn't, but now is a good time to change it to the zclass. + to_delete = convert_rrset_class(rrset, self.__zclass) + + if rrset.get_name() == self.__zname: + if rrset.get_type() == RRType.SOA(): + # ignore + return + elif rrset.get_type() == RRType.NS(): + # hmm. okay. annoying. There must be at least one left, + # delegate to helper method + self.__ns_deleter_helper(to_delete) + return + for rr in foreach_rr(to_delete): + self.__diff.delete_data(rr) + + def __update_soa(self): + '''Checks the member value __added_soa, and depending on + whether it has been set and what its value is, creates + a new SOA if necessary. + Then removes the original SOA and adds the new one, + by adding the needed operations to the internal diff.''' + # Get the existing SOA + # if a new soa was specified, add that one, otherwise, do the + # serial magic and add the newly created one + + # get it from DS and to increment and stuff + result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA(), + ZoneFinder.NO_WILDCARD | + ZoneFinder.FIND_GLUE_OK) + # We may implement recovering from missing SOA data at some point, but + # for now servfail on such a broken state + if result != ZoneFinder.SUCCESS: + raise UpdateError("Error finding SOA record in datasource.", + self.__zname, self.__zclass, Rcode.SERVFAIL()) + serial_operation = DDNS_SOA() + if self.__added_soa is not None and\ + serial_operation.soa_update_check(old_soa, self.__added_soa): + new_soa = self.__added_soa + else: + # increment goes here + new_soa = serial_operation.update_soa(old_soa) + + self.__diff.delete_data(old_soa) + self.__diff.add_data(new_soa) + + def __do_update(self): + '''Scan, check, and execute the Update section in the + DDNS Update message. + Returns an Rcode to signal the result (NOERROR upon success, + any error result otherwise). + ''' + # prescan + prescan_result = self.__do_prescan() + if prescan_result != Rcode.NOERROR(): + return prescan_result + + # update + try: + # Do special handling for SOA first + self.__update_soa() + + # Algorithm from RFC2136 Section 3.4 + # Note that this works on full rrsets, not individual RRs. + # Some checks might be easier with individual RRs, but only if we + # would use the ZoneUpdater directly (so we can query the + # 'zone-as-it-would-be-so-far'. However, due to the current use + # of the Diff class, this is not the case, and therefore it + # is easier to work with full rrsets for the most parts + # (less lookups needed; conversion to individual rrs is + # the same effort whether it is done here or in the several + # do_update statements) + for rrset in self.__message.get_section(SECTION_UPDATE): + if rrset.get_class() == self.__zclass: + self.__do_update_add_rrs_to_rrset(rrset) + elif rrset.get_class() == RRClass.ANY(): + if rrset.get_type() == RRType.ANY(): + self.__do_update_delete_name(rrset) + else: + self.__do_update_delete_rrset(rrset) + elif rrset.get_class() == RRClass.NONE(): + self.__do_update_delete_rrs_from_rrset(rrset) + + self.__diff.commit() + return Rcode.NOERROR() + except isc.datasrc.Error as dse: + logger.info(LIBDDNS_UPDATE_DATASRC_ERROR, dse) + return Rcode.SERVFAIL() + except Exception as uce: + logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION, + ClientFormatter(self.__client_addr), + ZoneFormatter(self.__zname, self.__zclass), + uce) + return Rcode.SERVFAIL() diff --git a/src/lib/python/isc/ddns/tests/Makefile.am b/src/lib/python/isc/ddns/tests/Makefile.am new file mode 100644 index 0000000000..4235a2bebf --- /dev/null +++ b/src/lib/python/isc/ddns/tests/Makefile.am @@ -0,0 +1,28 @@ +PYCOVERAGE_RUN = @PYCOVERAGE_RUN@ +PYTESTS = session_tests.py zone_config_tests.py +EXTRA_DIST = $(PYTESTS) +CLEANFILES = $(builddir)/rwtest.sqlite3.copied + +# If necessary (rare cases), explicitly specify paths to dynamic libraries +# required by loadable python modules. +if SET_ENV_LIBRARY_PATH +LIBRARY_PATH_PLACEHOLDER = $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH) +endif + +# test using command-line arguments, so use check-local target instead of TESTS +# B10_FROM_BUILD is necessary to load data source backend from the build tree. +check-local: +if ENABLE_PYTHON_COVERAGE + touch $(abs_top_srcdir)/.coverage + rm -f .coverage + ${LN_S} $(abs_top_srcdir)/.coverage .coverage +endif + for pytest in $(PYTESTS) ; do \ + echo Running test: $$pytest ; \ + $(LIBRARY_PATH_PLACEHOLDER) \ + TESTDATA_PATH=$(abs_top_srcdir)/src/lib/testutils/testdata \ + TESTDATA_WRITE_PATH=$(builddir) \ + B10_FROM_BUILD=$(abs_top_builddir) \ + PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \ + $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ + done diff --git a/src/lib/python/isc/ddns/tests/session_tests.py b/src/lib/python/isc/ddns/tests/session_tests.py new file mode 100644 index 0000000000..f7c2d3cbde --- /dev/null +++ b/src/lib/python/isc/ddns/tests/session_tests.py @@ -0,0 +1,1544 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import os +import shutil +import isc.log +import unittest +from isc.dns import * +from isc.datasrc import DataSourceClient, ZoneFinder +from isc.ddns.session import * +from isc.ddns.zone_config import * + +# Some common test parameters +TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep +READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied +TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep +WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied" +WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}" + +TEST_ZONE_NAME = Name('example.org') +UPDATE_RRTYPE = RRType.SOA() +TEST_RRCLASS = RRClass.IN() +TEST_ZONE_RECORD = Question(TEST_ZONE_NAME, TEST_RRCLASS, UPDATE_RRTYPE) +TEST_CLIENT6 = ('2001:db8::1', 53, 0, 0) +TEST_CLIENT4 = ('192.0.2.1', 53) +# TSIG key for tests when needed. The key name is TEST_ZONE_NAME. +TEST_TSIG_KEY = TSIGKey("example.org:SFuWd/q99SzF8Yzd1QbB9g==") + +def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[], + updates=[], tsig_key=None): + msg = Message(Message.RENDER) + msg.set_qid(5353) # arbitrary chosen + msg.set_opcode(Opcode.UPDATE()) + msg.set_rcode(Rcode.NOERROR()) + for z in zones: + msg.add_question(z) + for p in prerequisites: + msg.add_rrset(SECTION_PREREQUISITE, p) + for u in updates: + msg.add_rrset(SECTION_UPDATE, u) + + renderer = MessageRenderer() + if tsig_key is not None: + msg.to_wire(renderer, TSIGContext(tsig_key)) + else: + msg.to_wire(renderer) + + # re-read the created data in the parse mode + msg.clear(Message.PARSE) + msg.from_wire(renderer.get_data(), Message.PRESERVE_ORDER) + + return msg + +def add_rdata(rrset, rdata): + ''' + Helper function for easily adding Rdata fields to RRsets. + This function assumes the given rdata is of type string or bytes, + and corresponds to the given rrset + ''' + rrset.add_rdata(isc.dns.Rdata(rrset.get_type(), + rrset.get_class(), + rdata)) + +def create_rrset(name, rrclass, rrtype, ttl, rdatas = []): + ''' + Helper method to easily create RRsets, auto-converts + name, rrclass, rrtype, and ttl (if possibly through their + respective constructors) + rdatas is a list of rr data strings, or bytestrings, which + should match the RRType of the rrset to create + ''' + if type(name) != Name: + name = Name(name) + if type(rrclass) != RRClass: + rrclass = RRClass(rrclass) + if type(rrtype) != RRType: + rrtype = RRType(rrtype) + if type(ttl) != RRTTL: + ttl = RRTTL(ttl) + rrset = isc.dns.RRset(name, rrclass, rrtype, ttl) + for rdata in rdatas: + add_rdata(rrset, rdata) + return rrset + +class SessionModuleTests(unittest.TestCase): + '''Tests for module-level functions in the session.py module''' + + def test_foreach_rr_in_rrset(self): + rrset = create_rrset("www.example.org", TEST_RRCLASS, + RRType.A(), 3600, [ "192.0.2.1" ]) + + l = [] + for rr in foreach_rr(rrset): + l.append(str(rr)) + self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l) + + add_rdata(rrset, "192.0.2.2") + add_rdata(rrset, "192.0.2.3") + + # but through the generator, there should be several 1-line entries + l = [] + for rr in foreach_rr(rrset): + l.append(str(rr)) + self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n", + "www.example.org. 3600 IN A 192.0.2.2\n", + "www.example.org. 3600 IN A 192.0.2.3\n", + ], l) + + def test_convert_rrset_class(self): + # Converting an RRSET to a different class should work + # if the rdata types can be converted + rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(), + 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02']) + + rrset2 = convert_rrset_class(rrset, RRClass.IN()) + self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" + + "www.example.org. 3600 IN A 192.0.2.2\n", + str(rrset2)) + + rrset3 = convert_rrset_class(rrset2, RRClass.NONE()) + self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " + + "c0000201\nwww.example.org. 3600 CLASS254 " + + "A \\# 4 c0000202\n", + str(rrset3)) + + # depending on what type of bad data is given, a number + # of different exceptions could be raised (TODO: i recall + # there was a ticket about making a better hierarchy for + # dns/parsing related exceptions) + self.assertRaises(InvalidRdataLength, convert_rrset_class, + rrset, RRClass.CH()) + add_rdata(rrset, b'\xc0\x00') + self.assertRaises(DNSMessageFORMERR, convert_rrset_class, + rrset, RRClass.IN()) + + def test_collect_rrsets(self): + ''' + Tests the 'rrset collector' method, which collects rrsets + with the same name and type + ''' + collected = [] + + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.1" ])) + # Same name and class, different type + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.TXT(), 0, [ "one" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.2" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.TXT(), 0, [ "two" ])) + # Same class and type as an existing one, different name + collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.3" ])) + # Same name and type as an existing one, different class + collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(), + RRType.TXT(), 0, [ "one" ])) + collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.4" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(), + RRType.TXT(), 0, [ "two" ])) + + strings = [ rrset.to_text() for rrset in collected ] + # note + vs , in this list + expected = ['a.example.org. 0 IN A 192.0.2.1\n' + + 'a.example.org. 0 IN A 192.0.2.2\n', + 'a.example.org. 0 IN TXT "one"\n' + + 'a.example.org. 0 IN TXT "two"\n', + 'b.example.org. 0 IN A 192.0.2.3\n' + + 'b.example.org. 0 IN A 192.0.2.4\n', + 'a.example.org. 0 CH TXT "one"\n' + + 'a.example.org. 0 CH TXT "two"\n'] + + self.assertEqual(expected, strings) + +class SessionTestBase(unittest.TestCase): + '''Base class for all sesion related tests. + + It just initializes common test parameters in its setUp() and defines + some common utility method(s). + + ''' + def setUp(self): + shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE) + self._datasrc_client = DataSourceClient("sqlite3", + WRITE_ZONE_DB_CONFIG) + self._update_msg = create_update_msg() + self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "ACCEPT"}])} + self._session = UpdateSession(self._update_msg, TEST_CLIENT4, + ZoneConfig(set(), TEST_RRCLASS, + self._datasrc_client, + self._acl_map)) + self._session._get_update_zone() + self._session._create_diff() + + def tearDown(self): + # With the Updater created in _get_update_zone, and tests + # doing all kinds of crazy stuff, one might get database locked + # errors if it doesn't clean up explicitely after each test + self._session = None + + def check_response(self, msg, expected_rcode): + '''Perform common checks on update resposne message.''' + self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_QR)) + # note: we convert opcode to text it'd be more helpful on failure. + self.assertEqual(Opcode.UPDATE().to_text(), msg.get_opcode().to_text()) + self.assertEqual(expected_rcode.to_text(), msg.get_rcode().to_text()) + # All sections should be cleared + self.assertEqual(0, msg.get_rr_count(SECTION_ZONE)) + self.assertEqual(0, msg.get_rr_count(SECTION_PREREQUISITE)) + self.assertEqual(0, msg.get_rr_count(SECTION_UPDATE)) + self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL)) + +class TestDDNSSOA(unittest.TestCase): + '''unittest for the DDNS_SOA''' + def test_update_soa(self): + '''unittest for update_soa function''' + soa_update = DDNS_SOA() + soa_rr = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1233 3600 1800 2419200 7200"]) + expected_soa_rr = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1234 3600 1800 2419200 7200"]) + self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(), + expected_soa_rr.get_rdata()[0].to_text()) + max_serial = 2 ** 32 - 1 + soa_rdata = "%d %s"%(max_serial,"3600 1800 2419200 7200") + soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), 3600, + ["ns1.example.org. " + "admin.example.org. " + + soa_rdata]) + expected_soa_rr = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1 3600 1800 2419200 7200"]) + self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(), + expected_soa_rr.get_rdata()[0].to_text()) + + def test_soa_update_check(self): + '''unittest for soa_update_check function''' + small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), + 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1233 3600 1800 2419200 7200"]) + large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), + 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1234 3600 1800 2419200 7200"]) + soa_update = DDNS_SOA() + # The case of (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) in rfc 1982 + self.assertTrue(soa_update.soa_update_check(small_soa_rr, + large_soa_rr)) + self.assertFalse(soa_update.soa_update_check(large_soa_rr, + small_soa_rr)) + small_serial = 1235 + 2 ** 31 + soa_rdata = "%d %s"%(small_serial,"3600 1800 2419200 7200") + small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), + 3600, ["ns1.example.org. " + + "admin.example.org. " + + soa_rdata]) + large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), + 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1234 3600 1800 2419200 7200"]) + # The case of (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1)) in rfc 1982 + self.assertTrue(soa_update.soa_update_check(small_soa_rr, + large_soa_rr)) + self.assertFalse(soa_update.soa_update_check(large_soa_rr, + small_soa_rr)) + +class SessionTest(SessionTestBase): + '''Basic session tests''' + + def test_handle(self): + '''Basic update case''' + result, zname, zclass = self._session.handle() + self.assertEqual(UPDATE_SUCCESS, result) + self.assertEqual(TEST_ZONE_NAME, zname) + self.assertEqual(TEST_RRCLASS, zclass) + + # Just checking these are different from the success code. + self.assertNotEqual(UPDATE_ERROR, result) + self.assertNotEqual(UPDATE_DROP, result) + + def test_broken_request(self): + # Zone section is empty + msg = create_update_msg(zones=[]) + session = UpdateSession(msg, TEST_CLIENT6, None) + result, zname, zclass = session.handle() + self.assertEqual(UPDATE_ERROR, result) + self.assertEqual(None, zname) + self.assertEqual(None, zclass) + self.check_response(session.get_message(), Rcode.FORMERR()) + + # Zone section contains multiple records + msg = create_update_msg(zones=[TEST_ZONE_RECORD, TEST_ZONE_RECORD]) + session = UpdateSession(msg, TEST_CLIENT4, None) + self.assertEqual(UPDATE_ERROR, session.handle()[0]) + self.check_response(session.get_message(), Rcode.FORMERR()) + + # Zone section's type is not SOA + msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS, + RRType.A())]) + session = UpdateSession(msg, TEST_CLIENT4, None) + self.assertEqual(UPDATE_ERROR, session.handle()[0]) + self.check_response(session.get_message(), Rcode.FORMERR()) + + def test_update_secondary(self): + # specified zone is configured as a secondary. Since this + # implementation doesn't support update forwarding, the result + # should be NOTIMP. + msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS, + RRType.SOA())]) + session = UpdateSession(msg, TEST_CLIENT4, + ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)}, + TEST_RRCLASS, self._datasrc_client)) + self.assertEqual(UPDATE_ERROR, session.handle()[0]) + self.check_response(session.get_message(), Rcode.NOTIMP()) + + def check_notauth(self, zname, zclass=TEST_RRCLASS): + '''Common test sequence for the 'notauth' test''' + msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA())]) + session = UpdateSession(msg, TEST_CLIENT4, + ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)}, + TEST_RRCLASS, self._datasrc_client)) + self.assertEqual(UPDATE_ERROR, session.handle()[0]) + self.check_response(session.get_message(), Rcode.NOTAUTH()) + + def test_update_notauth(self): + '''Update attempt for non authoritative zones''' + # zone name doesn't match + self.check_notauth(Name('example.com')) + # zone name is a subdomain of the actual authoritative zone + # (match must be exact) + self.check_notauth(Name('sub.example.org')) + # zone class doesn't match + self.check_notauth(Name('example.org'), RRClass.CH()) + + def test_update_datasrc_error(self): + # if the data source client raises an exception, it should result in + # a SERVFAIL. + class BadDataSourceClient: + def find_zone(self, name): + raise isc.datasrc.Error('faked exception') + msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS, + RRType.SOA())]) + session = UpdateSession(msg, TEST_CLIENT4, + ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)}, + TEST_RRCLASS, + BadDataSourceClient())) + self.assertEqual(UPDATE_ERROR, session.handle()[0]) + self.check_response(session.get_message(), Rcode.SERVFAIL()) + + def test_foreach_rr_in_rrset(self): + rrset = create_rrset("www.example.org", TEST_RRCLASS, + RRType.A(), 3600, [ "192.0.2.1" ]) + + l = [] + for rr in foreach_rr(rrset): + l.append(str(rr)) + self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l) + + add_rdata(rrset, "192.0.2.2") + add_rdata(rrset, "192.0.2.3") + + # but through the generator, there should be several 1-line entries + l = [] + for rr in foreach_rr(rrset): + l.append(str(rr)) + self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n", + "www.example.org. 3600 IN A 192.0.2.2\n", + "www.example.org. 3600 IN A 192.0.2.3\n", + ], l) + + def test_convert_rrset_class(self): + # Converting an RRSET to a different class should work + # if the rdata types can be converted + rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(), + 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02']) + + rrset2 = convert_rrset_class(rrset, RRClass.IN()) + self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" + + "www.example.org. 3600 IN A 192.0.2.2\n", + str(rrset2)) + + rrset3 = convert_rrset_class(rrset2, RRClass.NONE()) + self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " + + "c0000201\nwww.example.org. 3600 CLASS254 " + + "A \\# 4 c0000202\n", + str(rrset3)) + + # depending on what type of bad data is given, a number + # of different exceptions could be raised (TODO: i recall + # there was a ticket about making a better hierarchy for + # dns/parsing related exceptions) + self.assertRaises(InvalidRdataLength, convert_rrset_class, + rrset, RRClass.CH()) + add_rdata(rrset, b'\xc0\x00') + self.assertRaises(DNSMessageFORMERR, convert_rrset_class, + rrset, RRClass.IN()) + + def test_collect_rrsets(self): + ''' + Tests the 'rrset collector' method, which collects rrsets + with the same name and type + ''' + collected = [] + + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.1" ])) + # Same name and class, different type + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.TXT(), 0, [ "one" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.2" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(), + RRType.TXT(), 0, [ "two" ])) + # Same class and type as an existing one, different name + collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.3" ])) + # Same name and type as an existing one, different class + collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(), + RRType.TXT(), 0, [ "one" ])) + collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.4" ])) + collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(), + RRType.TXT(), 0, [ "two" ])) + + strings = [ rrset.to_text() for rrset in collected ] + # note + vs , in this list + expected = ['a.example.org. 0 IN A 192.0.2.1\n' + + 'a.example.org. 0 IN A 192.0.2.2\n', + 'a.example.org. 0 IN TXT "one"\n' + + 'a.example.org. 0 IN TXT "two"\n', + 'b.example.org. 0 IN A 192.0.2.3\n' + + 'b.example.org. 0 IN A 192.0.2.4\n', + 'a.example.org. 0 CH TXT "one"\n' + + 'a.example.org. 0 CH TXT "two"\n'] + + self.assertEqual(expected, strings) + + def __prereq_helper(self, method, expected, rrset): + '''Calls the given method with self._datasrc_client + and the given rrset, and compares the return value. + Function does not do much but makes the code look nicer''' + self.assertEqual(expected, method(rrset)) + + def __check_prerequisite_exists_combined(self, method, rrclass, expected): + '''shared code for the checks for the very similar (but reversed + in behaviour) methods __prereq_rrset_exists and + __prereq_rrset_does_not_exist. + For rrset_exists, rrclass should be ANY, for rrset_does_not_exist, + it should be NONE. + ''' + # Basic existence checks + # www.example.org should have an A, but not an MX + rrset = create_rrset("www.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, expected, rrset) + rrset = create_rrset("www.example.org", rrclass, RRType.MX(), 0) + self.__prereq_helper(method, not expected, rrset) + + # example.org should have an MX, but not an A + rrset = create_rrset("example.org", rrclass, RRType.MX(), 0) + self.__prereq_helper(method, expected, rrset) + rrset = create_rrset("example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, not expected, rrset) + + # Also check the case where the name does not even exist + rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, not expected, rrset) + + # Wildcard expansion should not be applied, but literal matches + # should work + rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, not expected, rrset) + + rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, expected, rrset) + + # Likewise, CNAME directly should match, but what it points to should + # not + rrset = create_rrset("cname.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, not expected, rrset) + + rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME(), 0) + self.__prereq_helper(method, expected, rrset) + + # And also make sure a delegation (itself) is not treated as existing + # data + rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, not expected, rrset) + # But the delegation data itself should match + rrset = create_rrset("sub.example.org", rrclass, RRType.NS(), 0) + self.__prereq_helper(method, expected, rrset) + # As should glue + rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A(), 0) + self.__prereq_helper(method, expected, rrset) + + def test_check_prerequisite_exists(self): + method = self._session._UpdateSession__prereq_rrset_exists + self.__check_prerequisite_exists_combined(method, + RRClass.ANY(), + True) + + def test_check_prerequisite_does_not_exist(self): + method = self._session._UpdateSession__prereq_rrset_does_not_exist + self.__check_prerequisite_exists_combined(method, + RRClass.NONE(), + False) + + def test_check_prerequisite_exists_value(self): + method = self._session._UpdateSession__prereq_rrset_exists_value + + rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 0) + # empty one should not match + self.__prereq_helper(method, False, rrset) + + # When the rdata is added, it should match + add_rdata(rrset, "192.0.2.1") + self.__prereq_helper(method, True, rrset) + + # But adding more should not + add_rdata(rrset, "192.0.2.2") + self.__prereq_helper(method, False, rrset) + + # Also test one with more than one RR + rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0) + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns1.example.org.") + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns2.example.org") + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns3.example.org.") + self.__prereq_helper(method, True, rrset) + add_rdata(rrset, "ns4.example.org.") + self.__prereq_helper(method, False, rrset) + + # Repeat that, but try a different order of Rdata addition + rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0) + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns3.example.org.") + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns2.example.org.") + self.__prereq_helper(method, False, rrset) + add_rdata(rrset, "ns1.example.org.") + self.__prereq_helper(method, True, rrset) + add_rdata(rrset, "ns4.example.org.") + self.__prereq_helper(method, False, rrset) + + # and test one where the name does not even exist + rrset = create_rrset("doesnotexist.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.1" ]) + self.__prereq_helper(method, False, rrset) + + def __check_prerequisite_name_in_use_combined(self, method, rrclass, + expected): + '''shared code for the checks for the very similar (but reversed + in behaviour) methods __prereq_name_in_use and + __prereq_name_not_in_use + ''' + rrset = create_rrset("example.org", rrclass, RRType.ANY(), 0) + self.__prereq_helper(method, expected, rrset) + + rrset = create_rrset("www.example.org", rrclass, RRType.ANY(), 0) + self.__prereq_helper(method, expected, rrset) + + rrset = create_rrset("doesnotexist.example.org", rrclass, + RRType.ANY(), 0) + self.__prereq_helper(method, not expected, rrset) + + rrset = create_rrset("belowdelegation.sub.example.org", rrclass, + RRType.ANY(), 0) + self.__prereq_helper(method, not expected, rrset) + + rrset = create_rrset("foo.wildcard.example.org", rrclass, + RRType.ANY(), 0) + self.__prereq_helper(method, not expected, rrset) + + # empty nonterminal should not match + rrset = create_rrset("nonterminal.example.org", rrclass, + RRType.ANY(), 0) + self.__prereq_helper(method, not expected, rrset) + rrset = create_rrset("empty.nonterminal.example.org", rrclass, + RRType.ANY(), 0) + self.__prereq_helper(method, expected, rrset) + + def test_check_prerequisite_name_in_use(self): + method = self._session._UpdateSession__prereq_name_in_use + self.__check_prerequisite_name_in_use_combined(method, + RRClass.ANY(), + True) + + def test_check_prerequisite_name_not_in_use(self): + method = self._session._UpdateSession__prereq_name_not_in_use + self.__check_prerequisite_name_in_use_combined(method, + RRClass.NONE(), + False) + + def check_prerequisite_result(self, expected, prerequisites): + '''Helper method for checking the result of a prerequisite check; + creates an update session, and fills it with the list of rrsets + from 'prerequisites'. Then checks if __check_prerequisites() + returns the Rcode specified in 'expected'.''' + msg = create_update_msg([TEST_ZONE_RECORD], prerequisites) + zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client, + self._acl_map) + session = UpdateSession(msg, TEST_CLIENT4, zconfig) + session._get_update_zone() + session._create_diff() + # compare the to_text output of the rcodes (nicer error messages) + # This call itself should also be done by handle(), + # but just for better failures, it is first called on its own + self.assertEqual(expected.to_text(), + session._UpdateSession__check_prerequisites().to_text()) + # Now see if handle finds the same result + (result, _, _) = session.handle() + self.assertEqual(expected.to_text(), + session._UpdateSession__message.get_rcode().to_text()) + # And that the result looks right + if expected == Rcode.NOERROR(): + self.assertEqual(UPDATE_SUCCESS, result) + else: + self.assertEqual(UPDATE_ERROR, result) + + def check_prescan_result(self, expected, updates, expected_soa = None): + '''Helper method for checking the result of a prerequisite check; + creates an update session, and fills it with the list of rrsets + from 'updates'. Then checks if __do_prescan() + returns the Rcode specified in 'expected'.''' + msg = create_update_msg([TEST_ZONE_RECORD], [], updates) + zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client, + self._acl_map) + session = UpdateSession(msg, TEST_CLIENT4, zconfig) + session._get_update_zone() + session._create_diff() + # compare the to_text output of the rcodes (nicer error messages) + # This call itself should also be done by handle(), + # but just for better failures, it is first called on its own + self.assertEqual(expected.to_text(), + session._UpdateSession__do_prescan().to_text()) + # If there is an expected soa, check it + self.assertEqual(str(expected_soa), + str(session._UpdateSession__added_soa)) + + def check_full_handle_result(self, expected, updates, prerequisites=[]): + '''Helper method for checking the result of a full handle; + creates an update session, and fills it with the list of rrsets + from 'updates'. Then checks if __handle() + results in a response with rcode 'expected'.''' + msg = create_update_msg([TEST_ZONE_RECORD], prerequisites, updates) + zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client, + self._acl_map) + session = UpdateSession(msg, TEST_CLIENT4, zconfig) + + # Now see if handle finds the same result + (result, _, _) = session.handle() + self.assertEqual(expected.to_text(), + session._UpdateSession__message.get_rcode().to_text()) + # And that the result looks right + if expected == Rcode.NOERROR(): + self.assertEqual(UPDATE_SUCCESS, result) + else: + self.assertEqual(UPDATE_ERROR, result) + + def test_check_prerequisites(self): + # This test checks if the actual prerequisite-type-specific + # methods are called. + # It does test all types of prerequisites, but it does not test + # every possible result for those types (those are tested above, + # in the specific prerequisite type tests) + + # Let's first define a number of prereq's that should succeed + rrset_exists_yes = create_rrset("example.org", RRClass.ANY(), + RRType.SOA(), 0) + + rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.1" ]) + + rrset_does_not_exist_yes = create_rrset("foo.example.org", + RRClass.NONE(), RRType.SOA(), + 0) + + name_in_use_yes = create_rrset("www.example.org", RRClass.ANY(), + RRType.ANY(), 0) + + name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE(), + RRType.ANY(), 0) + + rrset_exists_value_1 = create_rrset("example.org", RRClass.IN(), + RRType.NS(), 0, + [ "ns1.example.org" ]) + rrset_exists_value_2 = create_rrset("example.org", RRClass.IN(), + RRType.NS(), 0, + [ "ns2.example.org" ]) + rrset_exists_value_3 = create_rrset("example.org", RRClass.IN(), + RRType.NS(), 0, + [ "ns3.example.org" ]) + + # and a number that should not + rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY(), + RRType.SOA(), 0) + + rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN(), + RRType.A(), 0, [ "192.0.2.2" ]) + + rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE(), + RRType.SOA(), 0) + + name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(), + RRType.ANY(), 0) + + name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE(), + RRType.ANY(), 0) + # check 'no' result codes + self.check_prerequisite_result(Rcode.NXRRSET(), + [ rrset_exists_no ]) + self.check_prerequisite_result(Rcode.NXRRSET(), + [ rrset_exists_value_no ]) + self.check_prerequisite_result(Rcode.YXRRSET(), + [ rrset_does_not_exist_no ]) + self.check_prerequisite_result(Rcode.NXDOMAIN(), + [ name_in_use_no ]) + self.check_prerequisite_result(Rcode.YXDOMAIN(), + [ name_not_in_use_no ]) + + # the 'yes' codes should result in ok + # individually + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_exists_yes ] ) + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_exists_value_yes ]) + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_does_not_exist_yes ]) + self.check_prerequisite_result(Rcode.NOERROR(), + [ name_in_use_yes ]) + self.check_prerequisite_result(Rcode.NOERROR(), + [ name_not_in_use_yes ]) + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_exists_value_1, + rrset_exists_value_2, + rrset_exists_value_3]) + + # and together + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_exists_yes, + rrset_exists_value_yes, + rrset_does_not_exist_yes, + name_in_use_yes, + name_not_in_use_yes, + rrset_exists_value_1, + rrset_exists_value_2, + rrset_exists_value_3]) + + # try out a permutation, note that one rrset is split up, + # and the order of the RRs should not matter + self.check_prerequisite_result(Rcode.NOERROR(), + [ rrset_exists_value_3, + rrset_exists_yes, + rrset_exists_value_2, + name_in_use_yes, + rrset_exists_value_1]) + + # Should fail on the first error, even if most of the + # prerequisites are ok + self.check_prerequisite_result(Rcode.NXDOMAIN(), + [ rrset_exists_value_3, + rrset_exists_yes, + rrset_exists_value_2, + name_in_use_yes, + name_in_use_no, + rrset_exists_value_1]) + + def test_prerequisite_notzone(self): + rrset = create_rrset("some.other.zone.", RRClass.ANY(), RRType.SOA(), 0) + self.check_prerequisite_result(Rcode.NOTZONE(), [ rrset ]) + + def test_prerequisites_formerr(self): + # test for form errors in the prerequisite section + + # Class ANY, non-zero TTL + rrset = create_rrset("example.org", RRClass.ANY(), RRType.SOA(), 1) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + # Class ANY, but with rdata + rrset = create_rrset("example.org", RRClass.ANY(), RRType.A(), 0, + [ b'\x00\x00\x00\x00' ]) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + # Class NONE, non-zero TTL + rrset = create_rrset("example.org", RRClass.NONE(), RRType.SOA(), 1) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + # Class NONE, but with rdata + rrset = create_rrset("example.org", RRClass.NONE(), RRType.A(), 0, + [ b'\x00\x00\x00\x00' ]) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + # Matching class and type, but non-zero TTL + rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 1, + [ "192.0.2.1" ]) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + # Completely different class + rrset = create_rrset("example.org", RRClass.CH(), RRType.TXT(), 0, + [ "foo" ]) + self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ]) + + def __prereq_helper(self, method, expected, rrset): + '''Calls the given method with self._datasrc_client + and the given rrset, and compares the return value. + Function does not do much but makes the code look nicer''' + self.assertEqual(expected, method(rrset)) + + def __initialize_update_rrsets(self): + '''Prepare a number of RRsets to be used in several update tests + The rrsets are stored in self''' + orig_a_rrset = create_rrset("www.example.org", TEST_RRCLASS, + RRType.A(), 3600, [ "192.0.2.1" ]) + self.orig_a_rrset = orig_a_rrset + + rrset_update_a = create_rrset("www.example.org", TEST_RRCLASS, + RRType.A(), 3600, + [ "192.0.2.2", "192.0.2.3" ]) + self.rrset_update_a = rrset_update_a + + rrset_update_soa = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, + [ "ns1.example.org. " + + "admin.example.org. " + + "1233 3600 1800 2419200 7200" ]) + self.rrset_update_soa = rrset_update_soa + + rrset_update_soa_del = create_rrset("example.org", RRClass.NONE(), + RRType.SOA(), 0, + [ "ns1.example.org. " + + "admin.example.org. " + + "1233 3600 1800 2419200 7200" ]) + self.rrset_update_soa_del = rrset_update_soa_del + + rrset_update_soa2 = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, + [ "ns1.example.org. " + + "admin.example.org. " + + "4000 3600 1800 2419200 7200" ]) + self.rrset_update_soa2 = rrset_update_soa2 + + rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY(), + RRType.ANY(), 0) + self.rrset_update_del_name = rrset_update_del_name + + rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY(), + RRType.ANY(), 0) + self.rrset_update_del_name_apex = rrset_update_del_name_apex + + rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY(), + RRType.A(), 0) + self.rrset_update_del_rrset = rrset_update_del_rrset + + rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY(), + RRType.MX(), 0) + self.rrset_update_del_mx_apex = rrset_update_del_mx_apex + + rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY(), + RRType.SOA(), 0) + self.rrset_update_del_soa_apex = rrset_update_del_soa_apex + + rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY(), + RRType.NS(), 0) + self.rrset_update_del_ns_apex = rrset_update_del_ns_apex + + rrset_update_del_rrset_part = create_rrset("www.example.org", + RRClass.NONE(), RRType.A(), + 0, + [ b'\xc0\x00\x02\x02', + b'\xc0\x00\x02\x03' ]) + self.rrset_update_del_rrset_part = rrset_update_del_rrset_part + + rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE(), + RRType.NS(), 0, + [ b'\x03ns1\x07example\x03org\x00', + b'\x03ns2\x07example\x03org\x00', + b'\x03ns3\x07example\x03org\x00' ]) + self.rrset_update_del_rrset_ns = rrset_update_del_rrset_ns + + rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE(), + RRType.MX(), 0, + [ b'\x00\x0a\x04mail\x07example\x03org\x00' ]) + self.rrset_update_del_rrset_mx = rrset_update_del_rrset_mx + + def test_acl_before_prereq(self): + name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(), + RRType.ANY(), 0) + + # Test a prerequisite that would fail + self.check_full_handle_result(Rcode.NXDOMAIN(), [], [ name_in_use_no ]) + + # Change ACL so that it would be denied + self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "REJECT"}])} + + # The prerequisite should now not be reached; it should fail on the + # ACL + self.check_full_handle_result(Rcode.REFUSED(), [], [ name_in_use_no ]) + + def test_prescan(self): + '''Test whether the prescan succeeds on data that is ok, and whether + if notices the SOA if present''' + # prepare a set of correct update statements + self.__initialize_update_rrsets() + + self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_a ]) + + # check if soa is noticed + self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa ], + self.rrset_update_soa) + + # Other types of succesful prechecks + self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa2 ], + self.rrset_update_soa2) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_del_name ]) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_del_name_apex ]) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset ]) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_del_mx_apex ]) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset_part ]) + + # and check a few permutations of the above + # all of them (with one of the soas) + self.check_prescan_result(Rcode.NOERROR(), + [ + self.rrset_update_a, + self.rrset_update_soa, + self.rrset_update_del_name, + self.rrset_update_del_name_apex, + self.rrset_update_del_rrset, + self.rrset_update_del_mx_apex, + self.rrset_update_del_rrset_part + ], + self.rrset_update_soa) + + # Two soas. Should we reject or simply use the last? + # (RFC is not really explicit on this, but between the lines I read + # use the last) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_soa, + self.rrset_update_soa2 ], + self.rrset_update_soa2) + self.check_prescan_result(Rcode.NOERROR(), + [ self.rrset_update_soa2, + self.rrset_update_soa ], + self.rrset_update_soa) + + self.check_prescan_result(Rcode.NOERROR(), + [ + self.rrset_update_del_mx_apex, + self.rrset_update_del_name, + self.rrset_update_del_name_apex, + self.rrset_update_del_rrset_part, + self.rrset_update_a, + self.rrset_update_del_rrset, + self.rrset_update_soa + ], + self.rrset_update_soa) + + def test_prescan_failures(self): + '''Test whether prescan fails on bad data''' + # out of zone data + rrset = create_rrset("different.zone", RRClass.ANY(), RRType.TXT(), 0) + self.check_prescan_result(Rcode.NOTZONE(), [ rrset ]) + + # forbidden type, zone class + rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY(), 0, + [ b'\x00' ]) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + # non-zero TTL, class ANY + rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 1) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + # non-zero Rdata, class ANY + rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 0, + [ "foo" ]) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + # forbidden type, class ANY + rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.AXFR(), 0, + [ b'\x00' ]) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + # non-zero TTL, class NONE + rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.TXT(), 1) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + # forbidden type, class NONE + rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.AXFR(), 0, + [ b'\x00' ]) + self.check_prescan_result(Rcode.FORMERR(), [ rrset ]) + + def __check_inzone_data(self, expected_result, name, rrtype, + expected_rrset = None): + '''Does a find on TEST_ZONE for the given rrset's name and type, + then checks if the result matches the expected result. + If so, and if expected_rrset is given, they are compared as + well.''' + _, finder = self._datasrc_client.find_zone(TEST_ZONE_NAME) + result, found_rrset, _ = finder.find(name, rrtype, + finder.NO_WILDCARD | + finder.FIND_GLUE_OK) + self.assertEqual(expected_result, result) + # Sigh. Need rrsets.compare() again. + # To be sure, compare name, class, type, and ttl + if expected_rrset is not None: + self.assertEqual(expected_rrset.get_name(), found_rrset.get_name()) + self.assertEqual(expected_rrset.get_class(), found_rrset.get_class()) + self.assertEqual(expected_rrset.get_type(), found_rrset.get_type()) + self.assertEqual(expected_rrset.get_ttl().to_text(), + found_rrset.get_ttl().to_text()) + expected_rdata =\ + [ rdata.to_text() for rdata in expected_rrset.get_rdata() ] + found_rdata =\ + [ rdata.to_text() for rdata in found_rrset.get_rdata() ] + expected_rdata.sort() + found_rdata.sort() + self.assertEqual(expected_rdata, found_rdata) + + def test_update_add_delete_rrset(self): + ''' + Tests a sequence of related add and delete updates. Some other + cases are tested by later tests. + ''' + self.__initialize_update_rrsets() + + # initially, the www should only contain one rr + # (set to self.orig_a_rrset) + + # during this test, we will extend it at some point + extended_a_rrset = create_rrset("www.example.org", TEST_RRCLASS, + RRType.A(), 3600, + [ "192.0.2.1", + "192.0.2.2", + "192.0.2.3" ]) + + # Sanity check, make sure original data is really there before updates + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.orig_a_rrset) + + # Add two rrs + self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ]) + + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + extended_a_rrset) + + # Adding the same RRsets should not make a difference. + self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ]) + + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + extended_a_rrset) + + # Now delete those two, and we should end up with the original RRset + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset_part ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.orig_a_rrset) + + # 'Deleting' them again should make no difference + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset_part ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.orig_a_rrset) + + # But deleting the entire rrset, independent of its contents, should + # work + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("www.example.org"), + RRType.A()) + + # Check that if we update the SOA, it is updated to our value + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_soa2 ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.SOA(), + self.rrset_update_soa2) + + def test_glue_deletions(self): + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("sub.example.org."), + RRType.NS()) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("ns.sub.example.org."), + RRType.A()) + + # See that we can delete glue + rrset_delete_glue = create_rrset("ns.sub.example.org.", + RRClass.ANY(), + RRType.A(), + 0) + self.check_full_handle_result(Rcode.NOERROR(), + [ rrset_delete_glue ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("sub.example.org."), + RRType.NS()) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("ns.sub.example.org."), + RRType.A()) + + # Check that we don't accidentally delete a delegation if we + # try to delete non-existent glue + rrset_delete_nonexistent_glue = create_rrset("foo.sub.example.org.", + RRClass.ANY(), + RRType.A(), + 0) + self.check_full_handle_result(Rcode.NOERROR(), + [ rrset_delete_nonexistent_glue ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("sub.example.org."), + RRType.NS()) + + def test_update_add_new_data(self): + ''' + This tests adds data where none is present + ''' + # Add data at a completely new name + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("new.example.org"), + RRType.A()) + rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A(), + 3600, [ "192.0.2.1", "192.0.2.2" ]) + self.check_full_handle_result(Rcode.NOERROR(), [ rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("new.example.org"), + RRType.A(), + rrset) + + # Also try a name where data is present, but none of this + # specific type + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET, + isc.dns.Name("new.example.org"), + RRType.TXT()) + rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT(), + 3600, [ "foo" ]) + self.check_full_handle_result(Rcode.NOERROR(), [ rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("new.example.org"), + RRType.TXT(), + rrset) + + def test_update_add_new_data_interspersed(self): + ''' + This tests adds data where none is present, similar to + test_update_add_new_data, but this time the second RRset + is put into the record between the two RRs of the first + RRset. + ''' + # Add data at a completely new name + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("new_a.example.org"), + RRType.A()) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("new_txt.example.org"), + RRType.TXT()) + + rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(), + 3600, [ "192.0.2.1" ]) + + rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT(), + 3600, [ "foo" ]) + + rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(), + 3600, [ "192.0.2.2" ]) + + self.check_full_handle_result(Rcode.NOERROR(), + [ rrset1, rrset2, rrset3 ]) + + # The update should have merged rrset1 and rrset3 + rrset_merged = create_rrset("new_a.example.org", TEST_RRCLASS, + RRType.A(), 3600, + [ "192.0.2.1", "192.0.2.2" ]) + + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("new_a.example.org"), + RRType.A(), + rrset_merged) + + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("new_txt.example.org"), + RRType.TXT(), + rrset2) + + def test_update_delete_name(self): + ''' + Tests whether deletion of every RR for a name works + ''' + self.__initialize_update_rrsets() + + # First check it is there + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A()) + + # Delete the entire name + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_name ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("www.example.org"), + RRType.A()) + + # Should still be gone after pointless second delete + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_name ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("www.example.org"), + RRType.A()) + + def test_update_apex_special_cases(self): + ''' + Tests a few special cases when deleting data from the apex + ''' + self.__initialize_update_rrsets() + + # the original SOA + orig_soa_rrset = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, + [ "ns1.example.org. " + + "admin.example.org. " + + "1234 3600 1800 2419200 7200" ]) + # At some point, the SOA SERIAL will be auto-incremented + incremented_soa_rrset_01 = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1235 3600 1800 2419200 7200" ]) + incremented_soa_rrset_02 = create_rrset("example.org", TEST_RRCLASS, + RRType.SOA(), 3600, ["ns1.example.org. " + + "admin.example.org. " + + "1236 3600 1800 2419200 7200" ]) + + # We will delete some of the NS records + orig_ns_rrset = create_rrset("example.org", TEST_RRCLASS, + RRType.NS(), 3600, + [ "ns1.example.org.", + "ns2.example.org.", + "ns3.example.org." ]) + + # Sanity check, make sure original data is really there before updates + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.NS(), + orig_ns_rrset) + # We will delete the MX record later in this test, so let's make + # sure that it exists (we do not care about its value) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.MX()) + + # Check that we cannot delete the SOA record by direct deletion + # both by name+type and by full rrset + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_soa_apex, + self.rrset_update_soa_del ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.SOA(), + incremented_soa_rrset_01) + + # If we delete everything at the apex, the SOA and NS rrsets should be + # untouched (but serial will be incremented) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_name_apex ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.SOA(), + incremented_soa_rrset_02) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.NS(), + orig_ns_rrset) + # but the MX should be gone + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET, + isc.dns.Name("example.org"), + RRType.MX()) + + # Deleting the NS rrset by name and type only, it should also be left + # untouched + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_ns_apex ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.NS(), + orig_ns_rrset) + + def test_update_apex_special_case_ns_rrset(self): + # If we delete the NS at the apex specifically, it should still + # keep one record + self.__initialize_update_rrsets() + # When we are done, we should have a reduced NS rrset + short_ns_rrset = create_rrset("example.org", TEST_RRCLASS, + RRType.NS(), 3600, + [ "ns3.example.org." ]) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset_ns ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.NS(), + short_ns_rrset) + + def test_update_apex_special_case_ns_rrset2(self): + # If we add new NS records, then delete all existing ones, it + # should not keep any + self.__initialize_update_rrsets() + new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS(), 3600, + [ "newns1.example.org", "newns2.example.org" ]) + + self.check_full_handle_result(Rcode.NOERROR(), + [ new_ns, + self.rrset_update_del_rrset_ns ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.NS(), + new_ns) + + def test_update_delete_normal_rrset_at_apex(self): + ''' + Tests a number of 'normal rrset' deletes at the apex + ''' + + # MX should simply be deleted + self.__initialize_update_rrsets() + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("example.org"), + RRType.MX()) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset_mx ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET, + isc.dns.Name("example.org"), + RRType.MX()) + + def test_update_add_then_delete_rrset(self): + # If we add data, then delete the whole rrset, added data should + # be gone as well + self.__initialize_update_rrsets() + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A()) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_a, + self.rrset_update_del_rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("www.example.org"), + RRType.A()) + + def test_update_add_then_delete_name(self): + # If we add data, then delete the entire name, added data should + # be gone as well + self.__initialize_update_rrsets() + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A()) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_a, + self.rrset_update_del_name ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN, + isc.dns.Name("www.example.org"), + RRType.A()) + + def test_update_delete_then_add_rrset(self): + # If we delete an entire rrset, then add something there again, + # the addition should be done + self.__initialize_update_rrsets() + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A()) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_rrset, + self.rrset_update_a ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.rrset_update_a) + + def test_update_delete_then_add_rrset(self): + # If we delete an entire name, then add something there again, + # the addition should be done + self.__initialize_update_rrsets() + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A()) + self.check_full_handle_result(Rcode.NOERROR(), + [ self.rrset_update_del_name, + self.rrset_update_a ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.rrset_update_a) + + def test_update_cname_special_cases(self): + self.__initialize_update_rrsets() + + # Sanity check + orig_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS, + RRType.CNAME(), 3600, + [ "www.example.org." ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME, + isc.dns.Name("cname.example.org"), + RRType.A(), + orig_cname_rrset) + + # If we try to add data where a cname is preset + rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A(), + 3600, [ "192.0.2.1" ]) + + self.check_full_handle_result(Rcode.NOERROR(), [ rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME, + isc.dns.Name("cname.example.org"), + RRType.A(), + orig_cname_rrset) + + # But updating the cname itself should work + new_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS, + RRType.CNAME(), 3600, + [ "mail.example.org." ]) + self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME, + isc.dns.Name("cname.example.org"), + RRType.A(), + new_cname_rrset) + + self.__initialize_update_rrsets() + + # Likewise, adding a cname where other data is + # present should do nothing either + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.orig_a_rrset) + new_cname_rrset = create_rrset("www.example.org", TEST_RRCLASS, + RRType.CNAME(), 3600, + [ "mail.example.org." ]) + self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ]) + self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS, + isc.dns.Name("www.example.org"), + RRType.A(), + self.orig_a_rrset) + + def test_update_bad_class(self): + rrset = create_rrset("example.org.", RRClass.CH(), RRType.TXT(), 0, + [ "foo" ]) + self.check_full_handle_result(Rcode.FORMERR(), [ rrset ]) + + def test_uncaught_exception(self): + def my_exc(): + raise Exception("foo") + self._session._UpdateSession__update_soa = my_exc + self.assertEqual(Rcode.SERVFAIL().to_text(), + self._session._UpdateSession__do_update().to_text()) + +class SessionACLTest(SessionTestBase): + '''ACL related tests for update session.''' + def test_update_acl_check(self): + '''Test for various ACL checks. + + Note that accepted cases are covered in the basic tests. + + ''' + # create a separate session, with default (empty) ACL map. + session = UpdateSession(self._update_msg, + TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS, + self._datasrc_client)) + # then the request should be rejected. + self.assertEqual((UPDATE_ERROR, None, None), session.handle()) + + # recreate the request message, and test with an ACL that would result + # in 'DROP'. get_message() should return None. + msg = create_update_msg() + acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "DROP", "from": + TEST_CLIENT4[0]}])} + session = UpdateSession(msg, TEST_CLIENT4, + ZoneConfig([], TEST_RRCLASS, + self._datasrc_client, acl_map)) + self.assertEqual((UPDATE_DROP, None, None), session.handle()) + self.assertEqual(None, session.get_message()) + + def test_update_tsigacl_check(self): + '''Test for various ACL checks using TSIG.''' + # This ACL will accept requests from TEST_CLIENT4 (any port) *and* + # has TSIG signed by TEST_ZONE_NAME; all others will be rejected. + acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "ACCEPT", + "from": TEST_CLIENT4[0], + "key": TEST_ZONE_NAME.to_text()}])} + + # If the message doesn't contain TSIG, it doesn't match the ACCEPT + # ACL entry, and the request should be rejected. + session = UpdateSession(self._update_msg, + TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS, + self._datasrc_client, + acl_map)) + self.assertEqual((UPDATE_ERROR, None, None), session.handle()) + self.check_response(session.get_message(), Rcode.REFUSED()) + + # If the message contains TSIG, it should match the ACCEPT + # ACL entry, and the request should be granted. + session = UpdateSession(create_update_msg(tsig_key=TEST_TSIG_KEY), + TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS, + self._datasrc_client, + acl_map)) + self.assertEqual((UPDATE_SUCCESS, TEST_ZONE_NAME, TEST_RRCLASS), + session.handle()) + +if __name__ == "__main__": + isc.log.init("bind10") + isc.log.resetUnitTestRootLogger() + unittest.main() diff --git a/src/lib/python/isc/ddns/tests/zone_config_tests.py b/src/lib/python/isc/ddns/tests/zone_config_tests.py new file mode 100644 index 0000000000..7facb48eb5 --- /dev/null +++ b/src/lib/python/isc/ddns/tests/zone_config_tests.py @@ -0,0 +1,159 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import isc.log +from isc.dns import * +from isc.datasrc import DataSourceClient +from isc.ddns.zone_config import * +import isc.acl.dns +from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError + +import unittest +import socket + +# Some common test parameters +TEST_ZONE_NAME = Name('example.org') +TEST_SECONDARY_ZONE_NAME = Name('example.com') +TEST_RRCLASS = RRClass.IN() +TEST_TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==") +TEST_ACL_CONTEXT = isc.acl.dns.RequestContext( + socket.getaddrinfo("192.0.2.1", 1234, 0, socket.SOCK_DGRAM, + socket.IPPROTO_UDP, socket.AI_NUMERICHOST)[0][4]) + +class FakeDataSourceClient: + '''Faked data source client used in the ZoneConfigTest. + + It emulates isc.datasrc.DataSourceClient, but only has to provide + the find_zone() interface (and only the first element of the return + value matters). By default it returns 'SUCCESS' (exact match) for + any input. It can be dynamically customized via the set_find_result() + method. + + ''' + def __init__(self): + self.__find_result = DataSourceClient.SUCCESS + + def find_zone(self, zname): + return (self.__find_result, None) + + def set_find_result(self, result): + self.__find_result = result + +class ZoneConfigTest(unittest.TestCase): + '''Some basic tests for the ZoneConfig class.''' + def setUp(self): + self.__datasrc_client = FakeDataSourceClient() + self.zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)}, + TEST_RRCLASS, self.__datasrc_client) + + def test_find_zone(self): + # Primay zone case: zone is in the data source, and not in secondaries + self.assertEqual((ZONE_PRIMARY, self.__datasrc_client), + (self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))) + + # Secondary zone case: zone is in the data source and in secondaries. + self.assertEqual((ZONE_SECONDARY, None), + (self.zconfig.find_zone(TEST_SECONDARY_ZONE_NAME, + TEST_RRCLASS))) + + # 'not found' case: zone not in the data source. + self.__datasrc_client.set_find_result(DataSourceClient.NOTFOUND) + self.assertEqual((ZONE_NOTFOUND, None), + (self.zconfig.find_zone(Name('example'), + TEST_RRCLASS))) + # same for the partial match + self.__datasrc_client.set_find_result(DataSourceClient.PARTIALMATCH) + self.assertEqual((ZONE_NOTFOUND, None), + (self.zconfig.find_zone(Name('example'), + TEST_RRCLASS))) + # a bit unusual case: zone not in the data source, but in secondaries. + # this is probably a configuration error, but ZoneConfig doesn't do + # this level check. + self.__datasrc_client.set_find_result(DataSourceClient.NOTFOUND) + self.assertEqual((ZONE_NOTFOUND, None), + (self.zconfig.find_zone(TEST_ZONE_NAME, + TEST_RRCLASS))) + # zone class doesn't match (but zone name matches) + self.__datasrc_client.set_find_result(DataSourceClient.SUCCESS) + zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)}, + RRClass.CH(), self.__datasrc_client) + self.assertEqual((ZONE_NOTFOUND, None), + (zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))) + # similar to the previous case, but also in the secondary list + zconfig = ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)}, + RRClass.CH(), self.__datasrc_client) + self.assertEqual((ZONE_NOTFOUND, None), + (zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))) + + # check some basic tests varying the secondary list. + # empty secondary list doesn't cause any disruption. + zconfig = ZoneConfig(set(), TEST_RRCLASS, self.__datasrc_client) + self.assertEqual((ZONE_PRIMARY, self.__datasrc_client), + self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)) + # adding some mulitle tuples, including subdomain of the test zone + # name, and the same zone name but a different class + zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS), + (Name('example'), TEST_RRCLASS), + (Name('sub.example.org'), TEST_RRCLASS), + (TEST_ZONE_NAME, RRClass.CH())}, + TEST_RRCLASS, self.__datasrc_client) + self.assertEqual((ZONE_PRIMARY, self.__datasrc_client), + self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)) + +class ACLConfigTest(unittest.TestCase): + def setUp(self): + self.__datasrc_client = FakeDataSourceClient() + self.__zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)}, + TEST_RRCLASS, self.__datasrc_client) + + def test_get_update_acl(self): + # By default, no ACL is set, and the default ACL is "reject all" + acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS) + self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT)) + + # Add a map entry that would match the request, and it should now be + # accepted. + acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "ACCEPT"}])} + self.__zconfig.set_update_acl_map(acl_map) + acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS) + self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT)) + + # 'All reject' ACL will still apply for any other zones + acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS) + self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT)) + acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH()) + self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT)) + + # Test with a map with a few more ACL entries. Should be nothing + # special. + acl_map = {(Name('example.com'), TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "REJECT"}]), + (TEST_ZONE_NAME, TEST_RRCLASS): + REQUEST_LOADER.load([{"action": "ACCEPT"}]), + (TEST_ZONE_NAME, RRClass.CH()): + REQUEST_LOADER.load([{"action": "DROP"}])} + self.__zconfig.set_update_acl_map(acl_map) + acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS) + self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT)) + acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS) + self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT)) + acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH()) + self.assertEqual(DROP, acl.execute(TEST_ACL_CONTEXT)) + +if __name__ == "__main__": + isc.log.init("bind10") + isc.log.resetUnitTestRootLogger() + unittest.main() diff --git a/src/lib/python/isc/ddns/zone_config.py b/src/lib/python/isc/ddns/zone_config.py new file mode 100644 index 0000000000..848eac1c9c --- /dev/null +++ b/src/lib/python/isc/ddns/zone_config.py @@ -0,0 +1,102 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from isc.acl.dns import REQUEST_LOADER +import isc.dns +from isc.datasrc import DataSourceClient + +# Constants representing zone types +ZONE_NOTFOUND = -1 # Zone isn't found in find_zone() +ZONE_PRIMARY = 0 # Primary zone +ZONE_SECONDARY = 1 # Secondary zone + +# The default ACL if unspecifed on construction of ZoneConfig. +DEFAULT_ACL = REQUEST_LOADER.load([{"action": "REJECT"}]) + +class ZoneConfig: + '''A temporary helper class to encapsulate zone related configuration. + + Its find_zone method will search the conceptual configuration for a + given zone, and return a tuple of zone type (primary or secondary) and + the client object to access the data source stroing the zone. + It's very likely that details of zone related configurations like this + will change in near future, so the main purpose of this class is to + provide an independent interface for the main DDNS session module + until the details are fixed. + + ''' + def __init__(self, secondaries, datasrc_class, datasrc_client, acl_map={}): + '''Constructor. + + Parameters: + - secondaries: a set of 2-element tuples. Each element is a pair + of isc.dns.Name and isc.dns.RRClass, and identifies a single + secondary zone. + - datasrc_class: isc.dns.RRClass object. Specifies the RR class + of datasrc_client. + - datasrc_client: isc.dns.DataSourceClient object. A data source + class for the RR class of datasrc_class. It's expected to contain + a zone that is eventually updated in the ddns package. + - acl_map: a dictionary that maps a tuple of + (isc.dns.Name, isc.dns.RRClass) to an isc.dns.dns.RequestACL + object. It defines an ACL to be applied to the zone defined + by the tuple. If unspecified, or the map is empty, the default + ACL will be applied to all zones, which is to reject any requests. + + ''' + self.__secondaries = secondaries + self.__datasrc_class = datasrc_class + self.__datasrc_client = datasrc_client + self.__default_acl = DEFAULT_ACL + self.__acl_map = acl_map + + def find_zone(self, zone_name, zone_class): + '''Return the type and accessor client object for given zone.''' + if self.__datasrc_class == zone_class and \ + self.__datasrc_client.find_zone(zone_name)[0] == \ + DataSourceClient.SUCCESS: + if (zone_name, zone_class) in self.__secondaries: + return ZONE_SECONDARY, None + return ZONE_PRIMARY, self.__datasrc_client + return ZONE_NOTFOUND, None + + def get_update_acl(self, zone_name, zone_class): + '''Return the update ACL for the given zone. + + This method searches the internally stored ACL map to see if + there's an ACL to be applied to the given zone. If found, that + ACL will be returned; otherwise the default ACL (see the constructor + description) will be returned. + + Parameters: + zone_name (isc.dns.Name): The zone name. + zone_class (isc.dns.RRClass): The zone class. + ''' + acl = self.__acl_map.get((zone_name, zone_class)) + if acl is not None: + return acl + return self.__default_acl + + def set_update_acl_map(self, new_map): + '''Set a new ACL map. + + This replaces any stored ACL map, either at construction or + by a previous call to this method, with the given new one. + + Parameter: + new_map: same as the acl_map parameter of the constructor. + + ''' + self.__acl_map = new_map diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc index ed053984d2..69e70b727e 100644 --- a/src/lib/python/isc/log/log.cc +++ b/src/lib/python/isc/log/log.cc @@ -541,8 +541,14 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) { // into the formatter. It will print itself in the end. for (size_t i(start); i < number; ++ i) { PyObjectContainer param_container(PySequence_GetItem(args, i)); - formatter = formatter.arg(objectToStr(param_container.get(), - true)); + try { + formatter = formatter.arg(objectToStr(param_container.get(), + true)); + } + catch (...) { + formatter.deactivate(); + throw; + } } Py_RETURN_NONE; } diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am index 170eee6cb0..ec29b7ad49 100644 --- a/src/lib/python/isc/log/tests/Makefile.am +++ b/src/lib/python/isc/log/tests/Makefile.am @@ -17,6 +17,7 @@ check-local: chmod +x $(abs_builddir)/log_console.py $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out if ENABLE_PYTHON_COVERAGE touch $(abs_top_srcdir)/.coverage @@ -28,6 +29,7 @@ endif $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done ; \ for pytest in $(PYTESTS_GEN) ; do \ @@ -36,5 +38,6 @@ endif $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \ done diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am index 6b4be9469b..6d23df3b5d 100644 --- a/src/lib/python/isc/log_messages/Makefile.am +++ b/src/lib/python/isc/log_messages/Makefile.am @@ -12,6 +12,7 @@ EXTRA_DIST += zonemgr_messages.py EXTRA_DIST += cfgmgr_messages.py EXTRA_DIST += config_messages.py EXTRA_DIST += notify_out_messages.py +EXTRA_DIST += libddns_messages.py EXTRA_DIST += libxfrin_messages.py EXTRA_DIST += server_common_messages.py EXTRA_DIST += dbutil_messages.py @@ -28,6 +29,7 @@ CLEANFILES += zonemgr_messages.pyc CLEANFILES += cfgmgr_messages.pyc CLEANFILES += config_messages.pyc CLEANFILES += notify_out_messages.pyc +CLEANFILES += libddns_messages.pyc CLEANFILES += libxfrin_messages.pyc CLEANFILES += server_common_messages.pyc CLEANFILES += dbutil_messages.pyc diff --git a/src/lib/python/isc/log_messages/libddns_messages.py b/src/lib/python/isc/log_messages/libddns_messages.py new file mode 100644 index 0000000000..58d886d52c --- /dev/null +++ b/src/lib/python/isc/log_messages/libddns_messages.py @@ -0,0 +1 @@ +from work.libddns_messages import * diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py index bfa7167309..83ac1d0014 100644 --- a/src/lib/python/isc/notify/notify_out.py +++ b/src/lib/python/isc/notify/notify_out.py @@ -34,7 +34,9 @@ logger = isc.log.Logger("notify_out") # initialized yet. see trac ticket #1103 from isc.dns import * -ZONE_NEW_DATA_READY_CMD = 'notify' +ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready' +ZONE_XFRIN_FAILED = 'zone_xfrin_failed' + _MAX_NOTIFY_NUM = 30 _MAX_NOTIFY_TRY_NUM = 5 _EVENT_NONE = 0 diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes index b77a60c068..3bc0f3855a 100644 --- a/src/lib/python/isc/notify/notify_out_messages.mes +++ b/src/lib/python/isc/notify/notify_out_messages.mes @@ -15,6 +15,18 @@ # No namespace declaration - these constants go in the global namespace # of the notify_out_messages python module. +% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1 +notify_out failed to get access to one of configured data sources. +Detailed error is shown in the log message. This can be either a +configuration error or installation setup failure. + +% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found +notify_out attempted to get slave information of a zone but the zone +isn't found in the expected data source. This shouldn't happen, +because notify_out first identifies a list of available zones before +this process. So this means some critical inconsistency in the data +source or software bug. + % NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3 The notify_out library tried to send a notify message to the given address, but it appears to be an invalid address. The configuration @@ -48,6 +60,16 @@ given address, but the reply did not have the QR bit set to one. Since there was a response, no more notifies will be sent to this server for this notification event. +% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1 +There was an uncaught exception in the handling of a notify reply +message, either in the message parser, or while trying to extract data +from the parsed message. The error is printed, and notify_out will +treat the response as a bad message, but this does point to a +programming error, since all exceptions should have been caught +explicitly. Please file a bug report. Since there was a response, +no more notifies will be sent to this server for this notification +event. + % NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded The maximum number of retries for the notify target has been exceeded. Either the address of the secondary nameserver is wrong, or it is not @@ -72,33 +94,11 @@ The notify message to the given address (noted as address#port) has timed out, and the message will be resent until the max retry limit is reached. -% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1 -There was an uncaught exception in the handling of a notify reply -message, either in the message parser, or while trying to extract data -from the parsed message. The error is printed, and notify_out will -treat the response as a bad message, but this does point to a -programming error, since all exceptions should have been caught -explicitly. Please file a bug report. Since there was a response, -no more notifies will be sent to this server for this notification -event. - -% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1 -notify_out failed to get access to one of configured data sources. -Detailed error is shown in the log message. This can be either a -configuration error or installation setup failure. - -% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found -notify_out attempted to get slave information of a zone but the zone -isn't found in the expected data source. This shouldn't happen, -because notify_out first identifies a list of available zones before -this process. So this means some critical inconsistency in the data -source or software bug. - -% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR -This is a warning issued when the notify_out module finds a zone that -doesn't have an NS RR. Notify message won't be sent to such a zone. - % NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA This is a warning issued when the notify_out module finds a zone that doesn't have an SOA RR or has multiple SOA RRs. Notify message won't be sent to such a zone. + +% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR +This is a warning issued when the notify_out module finds a zone that +doesn't have an NS RR. Notify message won't be sent to such a zone. diff --git a/src/lib/python/isc/server_common/Makefile.am b/src/lib/python/isc/server_common/Makefile.am index a9eca2eac3..d89df2f156 100644 --- a/src/lib/python/isc/server_common/Makefile.am +++ b/src/lib/python/isc/server_common/Makefile.am @@ -1,6 +1,7 @@ SUBDIRS = tests -python_PYTHON = __init__.py tsig_keyring.py +python_PYTHON = __init__.py tsig_keyring.py auth_command.py dns_tcp.py +python_PYTHON += logger.py pythondir = $(pyexecdir)/isc/server_common diff --git a/src/lib/python/isc/server_common/auth_command.py b/src/lib/python/isc/server_common/auth_command.py new file mode 100644 index 0000000000..eb9c892fe5 --- /dev/null +++ b/src/lib/python/isc/server_common/auth_command.py @@ -0,0 +1,90 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +'''This module is a utility to create some intermodule command(s) for Auth.''' + +from isc.dns import * +import isc.log +from isc.config.ccsession import create_command +from isc.log_messages.server_common_messages import * +from isc.server_common.logger import logger + +AUTH_MODULE_NAME = 'Auth' + +def auth_loadzone_command(module_cc, zone_name, zone_class): + '''Create a 'loadzone' command with a given zone for Auth server. + + This function checks the Auth module configuration to see if it + servers a given zone via an in-memory data source on top of SQLite3 + data source, and, if so, generate an inter-module command for Auth + to force it to reload the zone. + + Parameters: + module_cc (CCSession): a CC session that can get access to auth module + configuration as a remote configuration + zone_name (isc.dns.Name): the zone name to be possibly reloaded + zone_class (isc.dns.RRClass): the RR class of the zone to be possibly + reloaded. + + Return: a CC command message for the reload if the zone is found; + otherwise None. + + ''' + # Note: this function was originally a dedicated subroutine of xfrin, + # but was moved here so it can be shared by some other modules + # (specifically, by ddns). It's expected that we'll soon fundamentally + # revisit the whole data source related configuration, at which point + # this function should be substantially modified if not completely + # deprecated (which is a more likely scenario). For this reason, the + # corresponding tests were still kept in xfrin. + + datasources, is_default =\ + module_cc.get_remote_config_value(AUTH_MODULE_NAME, "datasources") + if is_default: + return None + for d in datasources: + if "type" not in d: + continue + try: + if "class" in d: + dclass = RRClass(d["class"]) + else: + dclass = RRClass("IN") + except InvalidRRClass as err: + logger.info(PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR, err) + continue + + if d["type"].lower() == "memory" and dclass == zone_class: + for zone in d["zones"]: + if "filetype" not in zone: + continue + if "origin" not in zone: + continue + if "filetype" not in zone: + continue + try: + name = Name(zone["origin"]) + except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape, + TooLongName, IncompleteName): + logger.info(PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR, + err) + continue + + if zone["filetype"].lower() == "sqlite3" and name == zone_name: + param = {"origin": zone_name.to_text(), + "class": zone_class.to_text(), + "datasrc": d["type"]} + return create_command("loadzone", param) + return None diff --git a/src/lib/python/isc/server_common/dns_tcp.py b/src/lib/python/isc/server_common/dns_tcp.py new file mode 100644 index 0000000000..3b78d0d698 --- /dev/null +++ b/src/lib/python/isc/server_common/dns_tcp.py @@ -0,0 +1,280 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Utility for handling DNS transactions over TCP. + +This module defines a few convenient utility classes for handling DNS +transactions via a TCP socket. + +""" + +import isc.log +from isc.server_common.logger import logger +from isc.log_messages.server_common_messages import * +from isc.ddns.logger import ClientFormatter +import errno +import socket +import struct + +class DNSTCPSendBuffer: + '''A composite buffer for a DNS message sent over TCP. + + This class encapsulates binary data supposed to be a complete DNS + message, taking into account the 2-byte length field preceeding the + actual data. + + An object of this class is constructed with a binary object for the + DNS message data (in wire-format), conceptually "appended" to the + 2-byte length field. The length is automatically calculated and + converted to the wire-format data in the network byte order. + + Its get_data() method returns a binary object corresponding to the + consecutive region of the conceptual buffer starting from the specified + position. The returned region may not necessarily contain all remaining + data from the specified position; this class can internally hold multiple + separate binary objects to represent the conceptual buffer, and, + in that case, get_data() identifies the object that contains the + specified position of data, and returns the longest consecutive region + from that position. So the caller must call get_data(), incrementing + the position as it transmits the data, until it gets None. + + This class is primarily intended to be a private utility for the + DNSTCPContext class, but can be used by other general applications + that need to send DNS messages over TCP in their own way. + + ''' + def __init__(self, data): + '''Consructor. + + Parameter: + data (binary): A binary sequence that is supposed to be a + complete DNS message in the wire format. It must not + exceed 65535 bytes in length; otherwise ValueError will be + raised. This class does not check any further validity on + the data as a DNS message. + + ''' + self.__data_size = len(data) + self.__len_size = 2 # fixed length + if self.__data_size > 0xffff: + raise ValueError('Too large data for DNS/TCP, size: ' + + str(self.__data_size)) + self.__lenbuf = struct.pack('H', socket.htons(self.__data_size)) + self.__databuf = data + + def get_total_len(self): + '''Return the total length of the buffer, including the length field. + + ''' + return self.__data_size + self.__len_size + + def get_data(self, pos): + '''Return a portion of data from a specified position. + + Parameter: + pos (int): The position in the TCP DNS message data (including + the 2-byte length field) from which the data are to be returned. + + Return: + A Python binary object that corresponds to a part of the TCP + DNS message data starting at the specified position. It may + or may not contain all remaining data from that position. + If the given position is beyond the end of the enrire data, + None will be returned. + + ''' + if pos >= self.__len_size: + pos -= self.__len_size + if pos >= self.__data_size: + return None + return self.__databuf[pos:] + return self.__lenbuf[pos:] + +class DNSTCPContextError(Exception): + '''An exception raised against logic errors in DNSTCPContext. + + This is raised only when the context class is used in an unexpected way, + that is for a caller's bug. + + ''' + pass + +class DNSTCPContext: + '''Context of a TCP connection used for DNS transactions. + + This class offers the following services: + - Handle the initial 2-byte length field internally. The user of + this class only has to deal with the bare DNS message (just like + the one transmiited over UDP). + - Asynchronous I/O. It supports the non blocking operation mode, + where method calls never block. The caller is told whether it's + ongoing and it should watch the socket or it's fully completed. + - Socket error handling: it internally catches socket related exceptions + and handle them in an appropriate way. A fatal error will be reported + to the caller in the form of a normal return value. The application + of this class can therefore assume it's basically exception free. + + Notes: + - the initial implementation only supports non blocking mode, but + it's intended to be extended so it can work in both blocking or + non blocking mode as we see the need for it. + - the initial implementation only supports send operations on an + already connected socket, but the intent is to extend this class + so it can handle receive or connect operations. + + ''' + + # Result codes used in send()/send_ready() methods. + SEND_DONE = 1 + SENDING = 2 + CLOSED = 3 + + def __init__(self, sock): + '''Constructor. + + Parameter: + sock (Python socket): the socket to be used for the transaction. + It must represent a TCP socket; otherwise DNSTCPContextError + will be raised. It's also expected to be connected, but it's + not checked on construction; a subsequent send operation would + fail. + + ''' + if sock.proto != socket.IPPROTO_TCP: + raise DNSTCPContextError('not a TCP socket, proto: ' + + str(sock.proto)) + sock.setblocking(False) + self.__sock = sock + self.__send_buffer = None + self.__remote_addr = sock.getpeername() # record it for logging + + def send(self, data): + '''Send a DNS message. + + In the non blocking mode, it sends as much data as possible via + the underlying TCP socket until it would block or all data are sent + out, and returns the corresponding result code. This method + therefore doesn't block in this mode. + + Note: the initial implementation only works in the non blocking + mode. + + This method must not be called once an error is detected and + CLOSED is returned or a prior send attempt is ongoing (with + the result code of SENDING); otherwise DNSTCPContextError is + raised. + + Parameter: + data (binary): A binary sequence that is supposed to be a + complete DNS message in the wire format. It must meet + the assumption that DNSTCPSendBuffer requires. + + Return: + An integer constant representing the result: + - SEND_DONE All data have been sent out successfully. + - SENDING All writable data has been sent out, and further + attempt would block at the moment. The caller is expected + to detect it when the underlying socket is writable again + and call send_ready() to continue the send. + - CLOSED A network error happened before the send operation is + completed. The underlying socket has been closed, and this + context object will be unusable. + + ''' + if self.__sock is None: + raise DNSTCPContextError('send() called after close') + if self.__send_buffer is not None: + raise DNSTCPContextError('duplicate send()') + + self.__send_buffer = DNSTCPSendBuffer(data) + self.__send_marker = 0 + return self.__do_send() + + def send_ready(self): + '''Resume sending a DNS message. + + This method is expected to be called followed by a send() call or + another send_ready() call that resulted in SENDING, when the caller + detects the underlying socket becomes writable. It works as + send() except that it continues the send operation from the suspended + position of the data at the time of the previous call. + + This method must not be called once an error is detected and + CLOSED is returned or a send() method hasn't been called to + start the operation; otherwise DNSTCPContextError is raised. + + Return: see send(). + + ''' + if self.__sock is None: + raise DNSTCPContextError('send() called after close') + if self.__send_buffer is None: + raise DNSTCPContextError('send_ready() called before send') + + return self.__do_send() + + def __do_send(self): + while True: + data = self.__send_buffer.get_data(self.__send_marker) + if data is None: + # send complete; clear the internal buffer for next possible + # send. + logger.debug(logger.DBGLVL_TRACE_DETAIL, + PYSERVER_COMMON_DNS_TCP_SEND_DONE, + ClientFormatter(self.__remote_addr), + self.__send_marker) + self.__send_buffer = None + self.__send_marker = 0 + return self.SEND_DONE + try: + cc = self.__sock.send(data) + except socket.error as ex: + total_len = self.__send_buffer.get_total_len() + if ex.errno == errno.EAGAIN: + logger.debug(logger.DBGLVL_TRACE_DETAIL, + PYSERVER_COMMON_DNS_TCP_SEND_PENDING, + ClientFormatter(self.__remote_addr), + self.__send_marker, total_len) + return self.SENDING + logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_ERROR, + ClientFormatter(self.__remote_addr), + self.__send_marker, total_len, ex) + self.__sock.close() + self.__sock = None + return self.CLOSED + self.__send_marker += cc + + def close(self): + '''Close the socket. + + This method closes the underlying socket. Once called, the context + object is effectively useless; any further method call would result + in a DNSTCPContextError exception. + + The underlying socket will be automatically (and implicitly) closed + when this object is deallocated, but Python seems to expect socket + objects should be explicitly closed before deallocation. So it's + generally advisable for the user of this object to call this method + explictily when it doesn't need the context. + + This method can be called more than once or can be called after + other I/O related methods have returned CLOSED; it's compatible + with the close() method of the Python socket class. + + ''' + if self.__sock is None: + return + self.__sock.close() + self.__sock = None # prevent furhter operation diff --git a/src/lib/python/isc/server_common/logger.py b/src/lib/python/isc/server_common/logger.py new file mode 100644 index 0000000000..7451e05ffb --- /dev/null +++ b/src/lib/python/isc/server_common/logger.py @@ -0,0 +1,20 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +'''Common definitions regarding logging for the server_common package.''' + +import isc.log + +logger = isc.log.Logger("server_common") diff --git a/src/lib/python/isc/server_common/server_common_messages.mes b/src/lib/python/isc/server_common/server_common_messages.mes index b32205c4a3..9eab129d76 100644 --- a/src/lib/python/isc/server_common/server_common_messages.mes +++ b/src/lib/python/isc/server_common/server_common_messages.mes @@ -21,6 +21,35 @@ # have that at this moment. So when adding a message, make sure that # the name is not already used in src/lib/config/config_messages.mes +% PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1 +There was an invalid name when parsing Auth configuration. + +% PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1 +There was an invalid RR class when parsing Auth configuration. + +% PYSERVER_COMMON_DNS_TCP_SEND_DONE completed sending TCP message to %1 (%2 bytes in total) +Debug message. A complete DNS message has been successfully +transmitted over a TCP connection, possibly after multiple send +operations. The destination address and the total size of the message +(including the 2-byte length field) are shown in the log message. + +% PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4 +A DNS message has been attempted to be sent out over a TCP connection, +but it failed due to some network error. Although it's not expected +to happen too often, it can still happen for various reasons. The +administrator may want to examine the cause of the failure, which is +included in the log message, to see if it requires some action to +be taken at the server side. When this message is logged, the +corresponding TCP connection was closed immediately after the error +was detected. + +% PYSERVER_COMMON_DNS_TCP_SEND_PENDING sent part TCP message to %1 (up to %2/%3 bytes) +Debug message. A part of DNS message has been transmitted over a TCP +connection, and it's suspended because further attempt would block. +The destination address and the total size of the message that has +been transmitted so far (including the 2-byte length field) are shown +in the log message. + % PYSERVER_COMMON_TSIG_KEYRING_DEINIT Deinitializing global TSIG keyring A debug message noting that the global TSIG keyring is being removed from memory. Most programs don't do that, they just exit, which is OK. diff --git a/src/lib/python/isc/server_common/tests/Makefile.am b/src/lib/python/isc/server_common/tests/Makefile.am index 4829edc508..82cd854f57 100644 --- a/src/lib/python/isc/server_common/tests/Makefile.am +++ b/src/lib/python/isc/server_common/tests/Makefile.am @@ -1,5 +1,5 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@ -PYTESTS = tsig_keyring_test.py +PYTESTS = tsig_keyring_test.py dns_tcp_test.py EXTRA_DIST = $(PYTESTS) # If necessary (rare cases), explicitly specify paths to dynamic libraries @@ -20,5 +20,6 @@ endif echo Running test: $$pytest ; \ $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done diff --git a/src/lib/python/isc/server_common/tests/dns_tcp_test.py b/src/lib/python/isc/server_common/tests/dns_tcp_test.py new file mode 100644 index 0000000000..7e74c047cb --- /dev/null +++ b/src/lib/python/isc/server_common/tests/dns_tcp_test.py @@ -0,0 +1,246 @@ +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +'''Tests for isc.server_common.dns_tcp''' + +import isc.log +from isc.server_common.dns_tcp import * +import socket +import errno +import unittest + +def check_length_field(assert_eq, len_data, expected_len): + # Examine the "length field" part of the data. It should be 2-byte field, + # and (in our implementation) always given as a separate chunk of data. + # The 16-bit length value of the actual data should be stored in the + # network byte order. + len_high = (expected_len >> 8) & 0x00ff + len_low = (expected_len & 0x00ff) + assert_eq(2, len(len_data)) + assert_eq(len_high, len_data[0]) + assert_eq(len_low, len_data[1]) + +class BufferTest(unittest.TestCase): + def check_length_field(self, buf, expected_len): + '''Common subtest for the main tests that checks the length buffer.''' + check_length_field(self.assertEqual, buf.get_data(0), expected_len) + + # Confirm the get_data(1) returns the latter half of the (partial) + # buffer. + self.assertEqual(1, len(buf.get_data(1))) + self.assertEqual(expected_len & 0x00ff, buf.get_data(1)[0]) + + def test_small_data(self): + # The smallest size (in practice) of data: that of the header field. + expected_data = b'x' * 12 + buf = DNSTCPSendBuffer(expected_data) + self.check_length_field(buf, 12) + + self.assertEqual(expected_data, buf.get_data(2)) + self.assertEqual(b'x' * 11, buf.get_data(3)) + self.assertEqual(None, buf.get_data(14)) + + def test_large_data(self): + # Test with an arbitrarily large size of data. + buf = DNSTCPSendBuffer(b'x' * 65534) + self.check_length_field(buf, 65534) + self.assertEqual(b'x' * 65534, buf.get_data(2)) + self.assertEqual(b'x' * 2, buf.get_data(65534)) + self.assertEqual(None, buf.get_data(65536)) + + def test_largest_data(self): + # This is the largest possible size of DNS message. + buf = DNSTCPSendBuffer(b'y' * 65535) + self.check_length_field(buf, 65535) + self.assertEqual(b'y', buf.get_data(65536)) + self.assertEqual(None, buf.get_data(65537)) + + def test_too_large_data(self): + # The maximum possible size of a valid DNS message is 65535. + # Beyond that, the buffer construction should result in an exception. + self.assertRaises(ValueError, DNSTCPSendBuffer, b'0' * 65536) + + def test_empty_data(self): + # Unusual, but it's not rejected + buf = DNSTCPSendBuffer(b'') + self.check_length_field(buf, 0) + self.assertEqual(None, buf.get_data(2)) + + def test_get_total_len(self): + self.assertEqual(14, DNSTCPSendBuffer(b'x' * 12).get_total_len()) + self.assertEqual(2, DNSTCPSendBuffer(b'').get_total_len()) + self.assertEqual(65537, DNSTCPSendBuffer(b'X' * 65535).get_total_len()) + +class FakeSocket: + '''Emulating python socket w/o involving IO while allowing inspection.''' + def __init__(self, proto=socket.IPPROTO_TCP): + self._setblocking_val = None # record the latest value of setblocking() + self._closed = False # set to True on close() + self._sent_data = [] # record the transmitted data in send() + self._send_buflen = None # capacity of the faked "send buffer"; + # None means infinity, -1 means "closed" + self._send_cc = 0 # waterline of the send buffer + self.proto = proto # protocol (should be TCP, but can be faked) + + def setblocking(self, on): + self._setblocking_val = on + + def close(self): + self._closed = True + + def send(self, data): + # Calculate the available space in the "send buffer" + if self._send_buflen == -1: + raise socket.error(errno.EPIPE, "Broken pipe") + elif self._send_buflen is None: + available_space = len(data) + else: + available_space = self._send_buflen - self._send_cc + if available_space == 0: + # if there's no space, (assuming it's nonblocking mode) raise + # EAGAIN. + raise socket.error(errno.EAGAIN, + "Resource temporarily unavailable") + # determine the sendable part of the data, record it, update "buffer". + cc = min(available_space, len(data)) + self._sent_data.append(data[:cc]) + self._send_cc += cc + return cc + + def make_send_ready(self): + # pretend that the accrued data has been cleared, making room in + # the send buffer. + self._send_cc = 0 + + def getpeername(self): + '''Return faked remote address''' + return ('2001:db8::1', 53000, 0, 0) + +class ContextTest(unittest.TestCase): + def setUp(self): + self.__sock = FakeSocket() + # there should be no setblocking value on the fake socket by default. + self.assertEqual(None, self.__sock._setblocking_val) + self.__ctx = DNSTCPContext(self.__sock) + # dummy data that has the same length as the DNS header section: + self.__test_data = b'x' * 12 + + def test_initialization(self): + # Creating a context (in setUp) should make the socket non-blocking. + self.assertFalse(self.__sock._setblocking_val) + + # Only a TCP socket is accepted. + self.assertRaises(DNSTCPContextError, DNSTCPContext, + FakeSocket(proto=socket.IPPROTO_UDP)) + + def test_send_all(self): + # By default, a single send() call will send out all data by 2 + # send() calls: one for the 2-byte length data and the other for the + # actual data. + self.assertEqual(DNSTCPContext.SEND_DONE, + self.__ctx.send(self.__test_data)) + self.assertEqual(2, len(self.__sock._sent_data)) + check_length_field(self.assertEqual, self.__sock._sent_data[0], + len(self.__test_data)) + self.assertEqual(self.__test_data, self.__sock._sent_data[1]) + + def test_send_divided(self): + # set the "send buffer" of the fake socket to 7 (half of the size of + # len + data). + self.__sock._send_buflen = 7 + + # The initial send() can only send out the half of the data in + # two calls to socket.send(): the first one for the length field, + # and the other is for the first 5 bytes of the data + self.assertEqual(DNSTCPContext.SENDING, + self.__ctx.send(self.__test_data)) + self.assertEqual(2, len(self.__sock._sent_data)) + check_length_field(self.assertEqual, self.__sock._sent_data[0], + len(self.__test_data)) + self.assertEqual(self.__test_data[:5], self.__sock._sent_data[1]) + + # "flush" the send buffer of the fake socket + self.__sock.make_send_ready() + + # send_ready() can now complete the send. The remaining data should + # have been passed. + self.assertEqual(DNSTCPContext.SEND_DONE, self.__ctx.send_ready()) + self.assertEqual(3, len(self.__sock._sent_data)) + self.assertEqual(self.__test_data[5:], self.__sock._sent_data[2]) + + def test_send_multi(self): + # On a successful completion of send, another send can be done. + for i in (0, 2): + self.assertEqual(DNSTCPContext.SEND_DONE, + self.__ctx.send(self.__test_data)) + self.assertEqual(i + 2, len(self.__sock._sent_data)) + check_length_field(self.assertEqual, self.__sock._sent_data[i], + len(self.__test_data)) + self.assertEqual(self.__test_data, self.__sock._sent_data[i + 1]) + + def test_send_reset(self): + # the connection will be "reset" before the initial send. + # send() should return CLOSED, and the underlying socket should be + # closed. + self.__sock._send_buflen = -1 + self.assertEqual(DNSTCPContext.CLOSED, + self.__ctx.send(self.__test_data)) + self.assertTrue(self.__sock._closed) + + # Once closed, send() cannot be called any more + self.assertRaises(DNSTCPContextError, self.__ctx.send, + self.__test_data) + # Calling close() is okay (it's NO-OP) + self.__ctx.close() + + def test_send_divided_reset(self): + # Similar to send_reset, but send() succeeds, and then the connection + # will be "reset". + self.__sock._send_buflen = 7 + self.assertEqual(DNSTCPContext.SENDING, + self.__ctx.send(self.__test_data)) + self.__sock._send_buflen = -1 + self.assertEqual(DNSTCPContext.CLOSED, self.__ctx.send_ready()) + self.assertTrue(self.__sock._closed) + + # Once closed, send_ready() cannot be called any more + self.assertRaises(DNSTCPContextError, self.__ctx.send_ready) + + def test_duplicate_send(self): + # send() cannot be called until it's completed + self.__sock._send_buflen = 7 + self.assertEqual(DNSTCPContext.SENDING, + self.__ctx.send(self.__test_data)) + self.assertRaises(DNSTCPContextError, self.__ctx.send, + self.__test_data) + + def test_skip_send(self): + # send_ready() cannot be called before send(). + self.assertRaises(DNSTCPContextError, self.__ctx.send_ready) + + def test_close(self): + self.assertEqual(DNSTCPContext.SEND_DONE, + self.__ctx.send(self.__test_data)) + self.__ctx.close() + self.assertTrue(self.__sock._closed) + + # Duplicate close is just ignored, and the socket is still closed. + self.__ctx.close() + self.assertTrue(self.__sock._closed) + +if __name__ == "__main__": + isc.log.init("bind10") + isc.log.resetUnitTestRootLogger() + unittest.main() diff --git a/src/lib/python/isc/server_common/tsig_keyring.py b/src/lib/python/isc/server_common/tsig_keyring.py index 308cfd451f..de3b759ac6 100644 --- a/src/lib/python/isc/server_common/tsig_keyring.py +++ b/src/lib/python/isc/server_common/tsig_keyring.py @@ -20,10 +20,10 @@ tsig_keys module. import isc.dns import isc.log +from isc.server_common.logger import logger from isc.log_messages.server_common_messages import * updater = None -logger = isc.log.Logger("server_common") class Unexpected(Exception): """ diff --git a/src/lib/python/isc/util/cio/tests/socketsession_test.py b/src/lib/python/isc/util/cio/tests/socketsession_test.py index 66b43d59e7..d492f6d904 100644 --- a/src/lib/python/isc/util/cio/tests/socketsession_test.py +++ b/src/lib/python/isc/util/cio/tests/socketsession_test.py @@ -22,6 +22,8 @@ TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR") TEST_UNIX_FILE = TESTDATA_OBJDIR + '/ssessiontest.unix' TEST_DATA = b'BIND10 test' TEST_PORT = 53535 +TEST_PORT2 = 53536 +TEST_PORT3 = 53537 class TestForwarder(unittest.TestCase): '''In general, this is a straightforward port of the C++ counterpart. @@ -31,12 +33,15 @@ class TestForwarder(unittest.TestCase): ''' def setUp(self): + self.listen_sock = None self.forwarder = SocketSessionForwarder(TEST_UNIX_FILE) if os.path.exists(TEST_UNIX_FILE): os.unlink(TEST_UNIX_FILE) self.large_text = b'a' * 65535 def tearDown(self): + if self.listen_sock is not None: + self.listen_sock.close() if os.path.exists(TEST_UNIX_FILE): os.unlink(TEST_UNIX_FILE) @@ -172,15 +177,22 @@ class TestForwarder(unittest.TestCase): sock.settimeout(10) self.assertEqual(TEST_DATA, sock.recvfrom(len(TEST_DATA))[0]) else: - server_sock.close() self.assertEqual(len(TEST_DATA), passed_sock.send(TEST_DATA)) client_sock.setblocking(True) client_sock.settimeout(10) self.assertEqual(TEST_DATA, client_sock.recv(len(TEST_DATA))) + server_sock.close() + client_sock.close() + + passed_sock.close() + sock.close() def test_push_and_pop(self): - # This is a straightforward port of C++ pushAndPop test. + # This is a straightforward port of C++ pushAndPop test. See the + # C++ version why we use multiple ports for "local". local6 = ('::1', TEST_PORT, 0, 0) + local6_alt = ('::1', TEST_PORT2, 0, 0) + local6_alt2 = ('::1', TEST_PORT3, 0, 0) remote6 = ('2001:db8::1', 5300, 0, 0) self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, local6, remote6, TEST_DATA, True) @@ -188,6 +200,7 @@ class TestForwarder(unittest.TestCase): local6, remote6, TEST_DATA, False) local4 = ('127.0.0.1', TEST_PORT) + local4_alt = ('127.0.0.1', TEST_PORT2) remote4 = ('192.0.2.2', 5300) self.check_push_and_pop(AF_INET, SOCK_DGRAM, IPPROTO_UDP, local4, remote4, TEST_DATA, False) @@ -195,11 +208,11 @@ class TestForwarder(unittest.TestCase): local4, remote4, TEST_DATA, False) self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, - local6, remote6, self.large_text, False) + local6_alt, remote6, self.large_text, False) self.check_push_and_pop(AF_INET6, SOCK_STREAM, IPPROTO_TCP, local6, remote6, self.large_text, False) self.check_push_and_pop(AF_INET, SOCK_DGRAM, IPPROTO_UDP, - local4, remote4, self.large_text, False) + local4_alt, remote4, self.large_text, False) self.check_push_and_pop(AF_INET, SOCK_STREAM, IPPROTO_TCP, local4, remote4, self.large_text, False) @@ -207,7 +220,7 @@ class TestForwarder(unittest.TestCase): # scope (zone) ID scope6 = ('fe80::1', TEST_PORT, 0, 1) self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, - local6, scope6, TEST_DATA, False) + local6_alt2, scope6, TEST_DATA, False) def test_push_too_fast(self): # A straightforward port of C++ pushTooFast test. @@ -235,6 +248,7 @@ class TestForwarder(unittest.TestCase): receiver = SocketSessionReceiver(accept_sock) s.close() self.assertRaises(SocketSessionError, receiver.pop) + accept_sock.close() class TestReceiver(unittest.TestCase): # We only check a couple of failure cases on construction. Valid cases diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py index 80fa909990..ea51967c30 100644 --- a/src/lib/python/isc/xfrin/diff.py +++ b/src/lib/python/isc/xfrin/diff.py @@ -15,15 +15,18 @@ """ This helps the XFR in process with accumulating parts of diff and applying -it to the datasource. +it to the datasource. It also has a 'single update mode' which is useful +for DDNS. The name of the module is not yet fully decided. We might want to move it -under isc.datasrc or somewhere else, because we might want to reuse it with -future DDNS process. But until then, it lives here. +under isc.datasrc or somewhere else, because we are reusing it with DDNS. +But for now, it lives here. """ import isc.dns +from isc.datasrc import ZoneFinder import isc.log +from isc.datasrc import ZoneFinder from isc.log_messages.libxfrin_messages import * class NoSuchZone(Exception): @@ -59,7 +62,8 @@ class Diff: the changes to underlying data source right away, but keeps them for a while. """ - def __init__(self, ds_client, zone, replace=False, journaling=False): + def __init__(self, ds_client, zone, replace=False, journaling=False, + single_update_mode=False): """ Initializes the diff to a ready state. It checks the zone exists in the datasource and if not, NoSuchZone is raised. This also creates @@ -76,6 +80,25 @@ class Diff: incoming updates but does not support journaling, the Diff object will still continue applying the diffs with disabling journaling. + If single_update_mode is true, the update is expected to only contain + 1 set of changes (i.e. one set of additions, and one set of deletions). + If so, the additions and deletions are kept separately, and applied + in one go upon commit() or apply(). In this mode, additions and + deletions can be done in any order. The first addition and the + first deletion still have to be the new and old SOA records, + respectively. Once apply() or commit() has been called, this + requirement is renewed (since the diff object is essentialy reset). + + In this single_update_mode, upon commit, the deletions are performed + first, and then the additions. With the previously mentioned + restrictions, this means that the actual update looks like a single + IXFR changeset (which can then be journaled). Apart from those + restrictions, this class does not do any checking of data; it is + the caller's responsibility to keep the data 'sane', and this class + does not presume to have any knowledge of DNS zone content sanity. + For instance, though it enforces the SOA to be deleted first, and + added first, it does no checks on the SERIAL value. + You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented exceptions. """ @@ -91,9 +114,14 @@ class Diff: raise NoSuchZone("Zone " + str(zone) + " does not exist in the data source " + str(ds_client)) - self.__buffer = [] + self.__single_update_mode = single_update_mode + if single_update_mode: + self.__additions = [] + self.__deletions = [] + else: + self.__buffer = [] - def __check_commited(self): + def __check_committed(self): """ This checks if the diff is already commited or broken. If it is, it raises ValueError. This check is for methods that need to work only on @@ -103,14 +131,47 @@ class Diff: raise ValueError("The diff is already commited or it has raised " + "an exception, you come late") + def __append_with_soa_check(self, buf, operation, rr): + """ + Helper method for __data_common(). + Add the given rr to the given buffer, but with a SOA check; + - if the buffer is empty, the RRType of the rr must be SOA + - if the buffer is not empty, the RRType must not be SOA + Raises a ValueError if these rules are not satisified. + If they are, the RR is appended to the buffer. + Arguments: + buf: buffer to add to + operation: operation to perform (either 'add' or 'delete') + rr: RRset to add to the buffer + """ + # first add or delete must be of type SOA + if len(buf) == 0 and\ + rr.get_type() != isc.dns.RRType.SOA(): + raise ValueError("First " + operation + + " in single update mode must be of type SOA") + # And later adds or deletes may not + elif len(buf) != 0 and\ + rr.get_type() == isc.dns.RRType.SOA(): + raise ValueError("Multiple SOA records in single " + + "update mode " + operation) + buf.append((operation, rr)) + def __data_common(self, rr, operation): """ Schedules an operation with rr. It does all the real work of add_data and delete_data, including all checks. + + Raises a ValueError in several cases: + - if the rrset contains multiple rrs + - if the class of the rrset does not match that of the update + - in single_update_mode if the first rr is not of type SOA (both + for addition and deletion) + - in single_update_mode if any later rr is of type SOA (both for + addition and deletion) """ - self.__check_commited() + self.__check_committed() if rr.get_rdata_count() != 1: raise ValueError('The rrset must contain exactly 1 Rdata, but ' + 'it holds ' + str(rr.get_rdata_count())) @@ -118,10 +179,21 @@ class Diff: raise ValueError("The rrset's class " + str(rr.get_class()) + " does not match updater's " + str(self.__updater.get_class())) - self.__buffer.append((operation, rr)) - if len(self.__buffer) >= DIFF_APPLY_TRESHOLD: - # Time to auto-apply, so the data don't accumulate too much - self.apply() + if self.__single_update_mode: + if operation == 'add': + if not self._remove_rr_from_deletions(rr): + self.__append_with_soa_check(self.__additions, operation, + rr) + elif operation == 'delete': + if not self._remove_rr_from_additions(rr): + self.__append_with_soa_check(self.__deletions, operation, + rr) + else: + self.__buffer.append((operation, rr)) + if len(self.__buffer) >= DIFF_APPLY_TRESHOLD: + # Time to auto-apply, so the data don't accumulate too much + # This is not done for DDNS type data + self.apply() def add_data(self, rr): """ @@ -175,23 +247,34 @@ class Diff: sigdata2 = rrset2.get_rdata()[0].to_text().split()[0] return sigdata1 == sigdata2 - buf = [] - for (op, rrset) in self.__buffer: - old = buf[-1][1] if len(buf) > 0 else None - if old is None or op != buf[-1][0] or \ - rrset.get_name() != old.get_name() or \ - (not same_type(rrset, old)): - buf.append((op, isc.dns.RRset(rrset.get_name(), - rrset.get_class(), - rrset.get_type(), - rrset.get_ttl()))) - if rrset.get_ttl() != buf[-1][1].get_ttl(): - logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(), - buf[-1][1].get_ttl(), rrset.get_name(), - rrset.get_class(), rrset.get_type()) - for rdatum in rrset.get_rdata(): - buf[-1][1].add_rdata(rdatum) - self.__buffer = buf + def compact_buffer(buffer_to_compact): + '''Internal helper function for compacting buffers, compacts the + given buffer. + Returns the compacted buffer. + ''' + buf = [] + for (op, rrset) in buffer_to_compact: + old = buf[-1][1] if len(buf) > 0 else None + if old is None or op != buf[-1][0] or \ + rrset.get_name() != old.get_name() or \ + (not same_type(rrset, old)): + buf.append((op, isc.dns.RRset(rrset.get_name(), + rrset.get_class(), + rrset.get_type(), + rrset.get_ttl()))) + if rrset.get_ttl() != buf[-1][1].get_ttl(): + logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(), + buf[-1][1].get_ttl(), rrset.get_name(), + rrset.get_class(), rrset.get_type()) + for rdatum in rrset.get_rdata(): + buf[-1][1].add_rdata(rdatum) + return buf + + if self.__single_update_mode: + self.__additions = compact_buffer(self.__additions) + self.__deletions = compact_buffer(self.__deletions) + else: + self.__buffer = compact_buffer(self.__buffer) def apply(self): """ @@ -209,25 +292,41 @@ class Diff: It also can raise isc.datasrc.Error. If that happens, you should stop using this object and abort the modification. """ - self.__check_commited() - # First, compact the data - self.compact() - try: - # Then pass the data inside the data source - for (operation, rrset) in self.__buffer: + def apply_buffer(buf): + ''' + Helper method to apply all operations in the given buffer + ''' + for (operation, rrset) in buf: if operation == 'add': self.__updater.add_rrset(rrset) elif operation == 'delete': self.__updater.delete_rrset(rrset) else: raise ValueError('Unknown operation ' + operation) + + self.__check_committed() + # First, compact the data + self.compact() + try: + # Then pass the data inside the data source + if self.__single_update_mode: + apply_buffer(self.__deletions) + apply_buffer(self.__additions) + else: + apply_buffer(self.__buffer) + # As everything is already in, drop the buffer except: # If there's a problem, we can't continue. self.__updater = None raise - self.__buffer = [] + # all went well, reset state of buffers + if self.__single_update_mode: + self.__additions = [] + self.__deletions = [] + else: + self.__buffer = [] def commit(self): """ @@ -237,7 +336,7 @@ class Diff: This might raise isc.datasrc.Error. """ - self.__check_commited() + self.__check_committed() # Push the data inside the data source self.apply() # Make sure they are visible. @@ -259,5 +358,229 @@ class Diff: Probably useful only for testing and introspection purposes. Don't modify the list. + + Raises a ValueError if the buffer is in single_update_mode. """ - return self.__buffer + if self.__single_update_mode: + raise ValueError("Compound buffer requested in single-update mode") + else: + return self.__buffer + + def get_single_update_buffers(self): + """ + Returns the current buffers of changes not yet passed into the data + source. It is a tuple of the current deletions and additions, which + each are in a form like [('delete', rrset), ('delete', rrset), ...], + and [('add', rrset), ('add', rrset), ..]. + + Probably useful only for testing and introspection purposes. Don't + modify the lists. + + Raises a ValueError if the buffer is not in single_update_mode. + """ + if not self.__single_update_mode: + raise ValueError("Separate buffers requested in single-update mode") + else: + return (self.__deletions, self.__additions) + + def find(self, name, rrtype, + options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)): + """ + Calls the find() method in the ZoneFinder associated with this + Diff's ZoneUpdater, i.e. the find() on the zone as it was on the + moment this Diff object got created. + See the ZoneFinder documentation for a full description. + Note that the result does not include changes made in this Diff + instance so far. + Options default to NO_WILDCARD and FIND_GLUE_OK. + Raises a ValueError if the Diff has been committed already + """ + self.__check_committed() + return self.__updater.find(name, rrtype, options) + + def find_all(self, name, + options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)): + """ + Calls the find() method in the ZoneFinder associated with this + Diff's ZoneUpdater, i.e. the find_all() on the zone as it was on the + moment this Diff object got created. + See the ZoneFinder documentation for a full description. + Note that the result does not include changes made in this Diff + instance so far. + Options default to NO_WILDCARD and FIND_GLUE_OK. + Raises a ValueError if the Diff has been committed already + """ + self.__check_committed() + return self.__updater.find_all(name, options) + + def __remove_rr_from_buffer(self, buf, rr): + '''Helper for common code in remove_rr_from_deletions() and + remove_rr_from_additions(); + returns the result of the removal operation on the given buffer + ''' + def same_rr(a, b): + # Consider two rr's the same if name, type, and rdata match + # Note that at this point it should have been checked that + # the rr in the buffer and the given rr have exactly one rdata + return a.get_name() == b.get_name() and\ + a.get_type() == b.get_type() and\ + a.get_rdata()[0] == b.get_rdata()[0] + if rr.get_type() == isc.dns.RRType.SOA(): + return buf + else: + return [ op for op in buf if not same_rr(op[1], rr)] + + def _remove_rr_from_deletions(self, rr): + ''' + Removes the given rr from the currently buffered deletions; + returns True if anything is removed, False if the RR was not present. + This method is protected; it is not meant to be called from anywhere + but the add_data() method. It is not private for easier testing. + ''' + orig_size = len(self.__deletions) + self.__deletions = self.__remove_rr_from_buffer(self.__deletions, rr) + return len(self.__deletions) != orig_size + + def _remove_rr_from_additions(self, rr): + ''' + Removes the given rr from the currently buffered additions; + returns True if anything is removed, False if the RR was not present. + This method is protected; it is not meant to be called from anywhere + but the delete_data() method. It is not private for easier testing. + ''' + orig_size = len(self.__additions) + self.__additions = self.__remove_rr_from_buffer(self.__additions, rr) + return len(self.__additions) != orig_size + + def __get_name_from_additions(self, name): + ''' + Returns a list of all rrs in the additions queue that have the given + Name. + This method is protected; it is not meant to be called from anywhere + but the find_all_updated() method. It is not private for easier + testing. + ''' + return [ rr for (_, rr) in self.__additions if rr.get_name() == name ] + + def __get_name_from_deletions(self, name): + ''' + Returns a list of all rrs in the deletions queue that have the given + Name + This method is protected; it is not meant to be called from anywhere + but the find_all_updated() method. It is not private for easier + testing. + ''' + return [ rr for (_, rr) in self.__deletions if rr.get_name() == name ] + + def __get_name_type_from_additions(self, name, rrtype): + ''' + Returns a list of the rdatas of the rrs in the additions queue that + have the given name and type + This method is protected; it is not meant to be called from anywhere + but the find_updated() method. It is not private for easier testing. + ''' + return [ rr for (_, rr) in self.__additions\ + if rr.get_name() == name and rr.get_type() == rrtype ] + + def __get_name_type_from_deletions(self, name, rrtype): + ''' + Returns a list of the rdatas of the rrs in the deletions queue that + have the given name and type + This method is protected; it is not meant to be called from anywhere + but the find_updated() method. It is not private for easier testing. + ''' + return [ rr.get_rdata()[0] for (_, rr) in self.__deletions\ + if rr.get_name() == name and rr.get_type() == rrtype ] + + def find_updated(self, name, rrtype): + ''' + Returns the result of find(), but with current updates applied, i.e. + as if this diff has been committed. Only performs additional + processing in the case find() returns SUCCESS, NXDOMAIN, or NXRRSET; + in all other cases, the results are returned directly. + Any RRs in the current deletions buffer are removed from the result, + and RRs in the current additions buffer are added to the result. + If the result was SUCCESS, but every RR in it is removed due to + deletions, and there is nothing in the additions, the rcode is changed + to NXRRSET. + If the result was NXDOMAIN or NXRRSET, and there are rrs in the + additions buffer, the result is changed to SUCCESS. + ''' + if not self.__single_update_mode: + raise ValueError("find_updated() can only be used in " + + "single-update mode") + result, rrset, flags = self.find(name, rrtype) + + added_rrs = self.__get_name_type_from_additions(name, rrtype) + deleted_rrs = self.__get_name_type_from_deletions(name, rrtype) + + if result == ZoneFinder.SUCCESS: + new_rrset = isc.dns.RRset(name, self.__updater.get_class(), + rrtype, rrset.get_ttl()) + for rdata in rrset.get_rdata(): + if rdata not in deleted_rrs: + new_rrset.add_rdata(rdata) + # If all data has been deleted, and there is nothing to add + # we cannot really know whether it is NXDOMAIN or NXRRSET, + # NXRRSET seems safest (we could find out, but it would require + # another search on the name which is probably not worth the + # trouble + if new_rrset.get_rdata_count() == 0 and len(added_rrs) == 0: + result = ZoneFinder.NXRRSET + new_rrset = None + elif (result == ZoneFinder.NXDOMAIN or result == ZoneFinder.NXRRSET)\ + and len(added_rrs) > 0: + new_rrset = isc.dns.RRset(name, self.__updater.get_class(), + rrtype, added_rrs[0].get_ttl()) + # There was no data in the zone, but there is data now + result = ZoneFinder.SUCCESS + else: + # Can't reliably handle other cases, just return the original + # data + return result, rrset, flags + + for rr in added_rrs: + # Can only be 1-rr RRsets at this point + new_rrset.add_rdata(rr.get_rdata()[0]) + + return result, new_rrset, flags + + def find_all_updated(self, name): + ''' + Returns the result of find_all(), but with current updates applied, + i.e. as if this diff has been committed. Only performs additional + processing in the case find() returns SUCCESS or NXDOMAIN; + in all other cases, the results are returned directly. + Any RRs in the current deletions buffer are removed from the result, + and RRs in the current additions buffer are added to the result. + If the result was SUCCESS, but every RR in it is removed due to + deletions, and there is nothing in the additions, the rcode is changed + to NXDOMAIN. + If the result was NXDOMAIN, and there are rrs in the additions buffer, + the result is changed to SUCCESS. + ''' + if not self.__single_update_mode: + raise ValueError("find_all_updated can only be used in " + + "single-update mode") + result, rrsets, flags = self.find_all(name) + new_rrsets = [] + added_rrs = self.__get_name_from_additions(name) + if result == ZoneFinder.SUCCESS and\ + (flags & ZoneFinder.RESULT_WILDCARD == 0): + deleted_rrs = self.__get_name_from_deletions(name) + for rr in rrsets: + if rr not in deleted_rrs: + new_rrsets.append(rr) + if len(new_rrsets) == 0 and len(added_rrs) == 0: + result = ZoneFinder.NXDOMAIN + elif result == ZoneFinder.NXDOMAIN and\ + len(added_rrs) > 0: + result = ZoneFinder.SUCCESS + else: + # Can't reliably handle other cases, just return the original + # data + return result, rrsets, flags + for rr in added_rrs: + if rr.get_name() == name: + new_rrsets.append(rr) + return result, new_rrsets, flags diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am index 416d62b45e..459efc3851 100644 --- a/src/lib/python/isc/xfrin/tests/Makefile.am +++ b/src/lib/python/isc/xfrin/tests/Makefile.am @@ -20,5 +20,6 @@ endif echo Running test: $$pytest ; \ $(LIBRARY_PATH_PLACEHOLDER) \ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \ + B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \ done diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py index 7c1158af92..906406f26b 100644 --- a/src/lib/python/isc/xfrin/tests/diff_tests.py +++ b/src/lib/python/isc/xfrin/tests/diff_tests.py @@ -15,7 +15,7 @@ import isc.log import unittest -import isc.datasrc +from isc.datasrc import ZoneFinder from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata from isc.xfrin.diff import Diff, NoSuchZone @@ -48,6 +48,13 @@ class DiffTest(unittest.TestCase): self.__broken_called = False self.__warn_called = False self.__should_replace = False + self.__find_called = False + self.__find_name = None + self.__find_type = None + self.__find_options = None + self.__find_all_called = False + self.__find_all_name = None + self.__find_all_options = None # Some common values self.__rrclass = RRClass.IN() self.__type = RRType.A() @@ -70,6 +77,31 @@ class DiffTest(unittest.TestCase): self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass, '192.0.2.2')) + # Also create a few other (valid) rrsets + # A SOA record + self.__rrset_soa = RRset(Name('example.org.'), self.__rrclass, + RRType.SOA(), RRTTL(3600)) + self.__rrset_soa.add_rdata(Rdata(RRType.SOA(), self.__rrclass, + "ns1.example.org. " + + "admin.example.org. " + + "1233 3600 1800 2419200 7200")) + # A few single-rr rrsets that together would for a multi-rr rrset + self.__rrset3 = RRset(Name('c.example.org.'), self.__rrclass, + RRType.TXT(), self.__ttl) + self.__rrset3.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "one")) + self.__rrset4 = RRset(Name('c.example.org.'), self.__rrclass, + RRType.TXT(), self.__ttl) + self.__rrset4.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "two")) + self.__rrset5 = RRset(Name('c.example.org.'), self.__rrclass, + RRType.TXT(), self.__ttl) + self.__rrset5.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "three")) + self.__rrset6 = RRset(Name('d.example.org.'), self.__rrclass, + RRType.A(), self.__ttl) + self.__rrset6.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.1")) + self.__rrset7 = RRset(Name('d.example.org.'), self.__rrclass, + RRType.A(), self.__ttl) + self.__rrset7.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2")) + def __mock_compact(self): """ This can be put into the diff to hook into its compact method and see @@ -156,6 +188,23 @@ class DiffTest(unittest.TestCase): return self + def find(self, name, rrtype, options=None): + self.__find_called = True + self.__find_name = name + self.__find_type = rrtype + self.__find_options = options + # Doesn't really matter what is returned, as long + # as the test can check that it's passed along + return "find_return" + + def find_all(self, name, options=None): + self.__find_all_called = True + self.__find_all_name = name + self.__find_all_options = options + # Doesn't really matter what is returned, as long + # as the test can check that it's passed along + return "find_all_return" + def test_create(self): """ This test the case when the diff is successfuly created. It just @@ -265,6 +314,9 @@ class DiffTest(unittest.TestCase): self.assertRaises(ValueError, diff.commit) self.assertRaises(ValueError, diff.add_data, self.__rrset2) self.assertRaises(ValueError, diff.delete_data, self.__rrset1) + self.assertRaises(ValueError, diff.find, Name('foo.example.org.'), + RRType.A()) + self.assertRaises(ValueError, diff.find_all, Name('foo.example.org.')) diff.apply = orig_apply self.assertRaises(ValueError, diff.apply) # This one does not state it should raise, so check it doesn't @@ -478,14 +530,563 @@ class DiffTest(unittest.TestCase): diff.compact() self.assertEqual(2, len(diff.get_buffer())) - def test_relpace(self): - """ + def test_replace(self): + ''' Test that when we want to replace the whole zone, it is propagated. - """ + ''' self.__should_replace = True diff = Diff(self, "example.org.", True) self.assertTrue(self.__updater_requested) + def test_get_buffer(self): + ''' + Test that the getters raise when used in the wrong mode + ''' + diff_multi = Diff(self, Name('example.org.'), single_update_mode=False) + self.assertRaises(ValueError, diff_multi.get_single_update_buffers) + self.assertEqual([], diff_multi.get_buffer()) + + diff_single = Diff(self, Name('example.org.'), single_update_mode=True) + self.assertRaises(ValueError, diff_single.get_buffer) + self.assertEqual(([], []), diff_single.get_single_update_buffers()) + + def test_finds_single(self): + ''' + Test that find_updated() and find_all_updated() can only be used + in single-update-mode. + ''' + diff_multi = Diff(self, Name('example.org.'), single_update_mode=False) + self.assertRaises(ValueError, diff_multi.find_updated, + Name('example.org.'), RRType.A()) + self.assertRaises(ValueError, diff_multi.find_all_updated, + Name('example.org.')) + + def test_single_update_mode(self): + ''' + Test single-update mode. In this mode, updates and deletes can + be done in any order, but there may only be one changeset. + For both updates and deletes, exactly one SOA rr must be given, + and it must be the first change. + ''' + + # full rrset for A (to check compact()) + txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT(), + RRTTL(3600)) + txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "one")) + txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "two")) + txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "three")) + a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A(), + RRTTL(3600)) + a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.1")) + a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.2")) + + diff = Diff(self, Name('example.org.'), single_update_mode=True) + + # adding a first should fail + self.assertRaises(ValueError, diff.add_data, a) + # But soa should work + diff.add_data(self.__rrset_soa) + # And then A should as well + diff.add_data(self.__rrset3) + diff.add_data(self.__rrset4) + diff.add_data(self.__rrset5) + # But another SOA should fail again + self.assertRaises(ValueError, diff.add_data, self.__rrset_soa) + + # Same for delete + self.assertRaises(ValueError, diff.delete_data, self.__rrset6) + diff.delete_data(self.__rrset_soa) + diff.delete_data(self.__rrset6) + diff.delete_data(self.__rrset7) + self.assertRaises(ValueError, diff.delete_data, self.__rrset_soa) + + # Not compacted yet, so the buffers should be as we + # filled them + (delbuf, addbuf) = diff.get_single_update_buffers() + self.assertEqual([('delete', self.__rrset_soa), + ('delete', self.__rrset6), + ('delete', self.__rrset7)], delbuf) + self.assertEqual([('add', self.__rrset_soa), + ('add', self.__rrset3), + ('add', self.__rrset4), + ('add', self.__rrset5)], addbuf) + + # Compact should compact the A records in both buffers + diff.compact() + (delbuf, addbuf) = diff.get_single_update_buffers() + # need rrset equality again :/ + self.assertEqual(2, len(delbuf)) + self.assertEqual(2, len(delbuf[0])) + self.assertEqual('delete', delbuf[0][0]) + self.assertEqual(self.__rrset_soa.to_text(), delbuf[0][1].to_text()) + self.assertEqual(2, len(delbuf[1])) + self.assertEqual('delete', delbuf[1][0]) + self.assertEqual(a.to_text(), delbuf[1][1].to_text()) + + self.assertEqual(2, len(addbuf)) + self.assertEqual(2, len(addbuf[0])) + self.assertEqual('add', addbuf[0][0]) + self.assertEqual(self.__rrset_soa.to_text(), addbuf[0][1].to_text()) + self.assertEqual(2, len(addbuf[1])) + self.assertEqual('add', addbuf[1][0]) + self.assertEqual(txt.to_text(), addbuf[1][1].to_text()) + + # Apply should reset the buffers + diff.apply() + (delbuf, addbuf) = diff.get_single_update_buffers() + self.assertEqual([], delbuf) + self.assertEqual([], addbuf) + + # Now the change has been applied, and the buffers are cleared, + # Adding non-SOA records should fail again. + self.assertRaises(ValueError, diff.add_data, a) + self.assertRaises(ValueError, diff.delete_data, a) + + def test_add_delete_same(self): + ''' + Test that if a record is added, then deleted, it is not added to + both buffers, but remove from the addition, and vice versa + ''' + diff = Diff(self, Name('example.org.'), single_update_mode=True) + # Need SOA records first + diff.delete_data(self.__rrset_soa) + diff.add_data(self.__rrset_soa) + + deletions, additions = diff.get_single_update_buffers() + self.assertEqual(1, len(deletions)) + self.assertEqual(1, len(additions)) + + diff.add_data(self.__rrset1) + deletions, additions = diff.get_single_update_buffers() + self.assertEqual(1, len(deletions)) + self.assertEqual(2, len(additions)) + + diff.delete_data(self.__rrset1) + deletions, additions = diff.get_single_update_buffers() + self.assertEqual(1, len(deletions)) + self.assertEqual(1, len(additions)) + + diff.delete_data(self.__rrset2) + deletions, additions = diff.get_single_update_buffers() + self.assertEqual(2, len(deletions)) + self.assertEqual(1, len(additions)) + + diff.add_data(self.__rrset2) + deletions, additions = diff.get_single_update_buffers() + self.assertEqual(1, len(deletions)) + self.assertEqual(1, len(additions)) + + def test_find(self): + diff = Diff(self, Name('example.org.')) + name = Name('www.example.org.') + rrtype = RRType.A() + + self.assertFalse(self.__find_called) + self.assertEqual(None, self.__find_name) + self.assertEqual(None, self.__find_type) + self.assertEqual(None, self.__find_options) + + self.assertEqual("find_return", diff.find(name, rrtype)) + + self.assertTrue(self.__find_called) + self.assertEqual(name, self.__find_name) + self.assertEqual(rrtype, self.__find_type) + self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK, + self.__find_options) + + def test_find_options(self): + diff = Diff(self, Name('example.org.')) + name = Name('foo.example.org.') + rrtype = RRType.TXT() + options = ZoneFinder.NO_WILDCARD + + self.assertEqual("find_return", diff.find(name, rrtype, options)) + + self.assertTrue(self.__find_called) + self.assertEqual(name, self.__find_name) + self.assertEqual(rrtype, self.__find_type) + self.assertEqual(options, self.__find_options) + + def test_find_all(self): + diff = Diff(self, Name('example.org.')) + name = Name('www.example.org.') + + self.assertFalse(self.__find_all_called) + self.assertEqual(None, self.__find_all_name) + self.assertEqual(None, self.__find_all_options) + + self.assertEqual("find_all_return", diff.find_all(name)) + + self.assertTrue(self.__find_all_called) + self.assertEqual(name, self.__find_all_name) + self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK, + self.__find_all_options) + + def test_find_all_options(self): + diff = Diff(self, Name('example.org.')) + name = Name('www.example.org.') + options = isc.datasrc.ZoneFinder.NO_WILDCARD + + self.assertFalse(self.__find_all_called) + self.assertEqual(None, self.__find_all_name) + self.assertEqual(None, self.__find_all_options) + + self.assertEqual("find_all_return", diff.find_all(name, options)) + + self.assertTrue(self.__find_all_called) + self.assertEqual(name, self.__find_all_name) + self.assertEqual(options, self.__find_all_options) + + def __common_remove_rr_from_buffer(self, diff, add_method, remove_method, + op_str, buf_nr): + add_method(self.__rrset_soa) + add_method(self.__rrset2) + add_method(self.__rrset3) + add_method(self.__rrset4) + + # sanity check + buf = diff.get_single_update_buffers()[buf_nr] + expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa, + self.__rrset2, + self.__rrset3, + self.__rrset4 ] ] + result = [ (op, str(rr)) for (op, rr) in buf ] + self.assertEqual(expected, result) + + # remove one + self.assertTrue(remove_method(self.__rrset2)) + buf = diff.get_single_update_buffers()[buf_nr] + expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa, + self.__rrset3, + self.__rrset4 ] ] + result = [ (op, str(rr)) for (op, rr) in buf ] + self.assertEqual(expected, result) + + # SOA should not be removed + self.assertFalse(remove_method(self.__rrset_soa)) + buf = diff.get_single_update_buffers()[buf_nr] + expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa, + self.__rrset3, + self.__rrset4 ] ] + result = [ (op, str(rr)) for (op, rr) in buf ] + self.assertEqual(expected, result) + + # remove another + self.assertTrue(remove_method(self.__rrset4)) + buf = diff.get_single_update_buffers()[buf_nr] + expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa, + self.__rrset3 ] ] + result = [ (op, str(rr)) for (op, rr) in buf ] + self.assertEqual(expected, result) + + # remove nonexistent should return False + self.assertFalse(remove_method(self.__rrset4)) + buf = diff.get_single_update_buffers()[buf_nr] + expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa, + self.__rrset3 ] ] + result = [ (op, str(rr)) for (op, rr) in buf ] + self.assertEqual(expected, result) + + def test_remove_rr_from_additions(self): + diff = Diff(self, Name('example.org'), single_update_mode=True) + self.__common_remove_rr_from_buffer(diff, diff.add_data, + diff._remove_rr_from_additions, + 'add', 1) + + def test_remove_rr_from_deletions(self): + diff = Diff(self, Name('example.org'), single_update_mode=True) + self.__common_remove_rr_from_buffer(diff, diff.delete_data, + diff._remove_rr_from_deletions, + 'delete', 0) + + def __create_find(self, result, rrset, flags): + ''' + Overwrites the local find() method with a method that returns + the tuple (result, rrset, flags) + ''' + def new_find(name, rrtype, fflags): + return (result, rrset, flags) + self.find = new_find + + def __create_find_all(self, result, rrsets, flags): + ''' + Overwrites the local find() method with a method that returns + the tuple (result, rrsets, flags) + ''' + def new_find_all(name, fflags): + return (result, rrsets, flags) + self.find_all = new_find_all + + def __check_find_call(self, method, query_rrset, expected_rcode, + expected_rdatas=None): + ''' + Helper for find tests; calls the given method with the name and + type of the given rrset. Checks for the given rcode. + If expected_rdatas is not none, the result name, and type are + checked to match the given rrset ones, and the rdatas are checked + to be equal. + The given method must have the same arguments and return type + as find() + ''' + result, rrset, _ = method(query_rrset.get_name(), + query_rrset.get_type()) + self.assertEqual(expected_rcode, result) + if expected_rdatas is not None: + self.assertEqual(query_rrset.get_name(), rrset.get_name()) + self.assertEqual(query_rrset.get_type(), rrset.get_type()) + if expected_rdatas is not None: + self.assertEqual(expected_rdatas, rrset.get_rdata()) + else: + self.assertEqual(None, rrset) + + def __check_find_all_call(self, method, query_rrset, expected_rcode, + expected_rrs=[]): + ''' + Helper for find tests; calls the given method with the name and + type of the given rrset. Checks for the given rcode. + If expected_rdatas is not none, the result name, and type are + checked to match the given rrset ones, and the rdatas are checked + to be equal. + The given method must have the same arguments and return type + as find() + ''' + result, rrsets, _ = method(query_rrset.get_name()) + self.assertEqual(expected_rcode, result) + # We have no real equality function for rrsets, but since + # the rrsets in question are themselves returns, pointer equality + # works as well + self.assertEqual(expected_rrs, rrsets) + + def test_find_updated_existing_data(self): + ''' + Tests whether existent data is updated with the additions and + deletions from the Diff + ''' + diff = Diff(self, Name('example.org'), single_update_mode=True) + diff.add_data(self.__rrset_soa) + diff.delete_data(self.__rrset_soa) + + # override the actual find method + self.__create_find(ZoneFinder.SUCCESS, self.__rrset3, 0) + + # sanity check + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + # check that normal find also returns the original data + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + # Adding another should have it returned in the find_updated + diff.add_data(self.__rrset4) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata() + + self.__rrset4.get_rdata()) + + # check that normal find still returns the original data + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + # Adding a different type should have no effect + diff.add_data(self.__rrset2) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata() + + self.__rrset4.get_rdata()) + + # check that normal find still returns the original data + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + # Deleting 3 now should result in only 4 being updated + diff.delete_data(self.__rrset3) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset4.get_rdata()) + + # check that normal find still returns the original data + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + # Deleting 4 now should result in empty rrset + diff.delete_data(self.__rrset4) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.NXRRSET) + + # check that normal find still returns the original data + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + + def test_find_updated_nonexistent_data(self): + ''' + Test whether added data for a query that would originally result + in NXDOMAIN works + ''' + diff = Diff(self, Name('example.org'), single_update_mode=True) + diff.add_data(self.__rrset_soa) + diff.delete_data(self.__rrset_soa) + + # override the actual find method + self.__create_find(ZoneFinder.NXDOMAIN, None, 0) + + # Sanity check + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.NXDOMAIN) + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.NXDOMAIN) + + # Add data and see it is returned + diff.add_data(self.__rrset3) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.NXDOMAIN) + + # Add unrelated data, result should be the same + diff.add_data(self.__rrset2) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.SUCCESS, self.__rrset3.get_rdata()) + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.NXDOMAIN) + + # Remove, result should now be NXDOMAIN again + diff.delete_data(self.__rrset3) + result, rrset, _ = diff.find_updated(self.__rrset3.get_name(), + self.__rrset3.get_type()) + self.__check_find_call(diff.find_updated, self.__rrset3, + ZoneFinder.NXDOMAIN) + self.__check_find_call(diff.find, self.__rrset3, + ZoneFinder.NXDOMAIN) + + def test_find_updated_other(self): + ''' + Test that any other ZoneFinder.result code is directly + passed on. + ''' + diff = Diff(self, Name('example.org'), single_update_mode=True) + + # Add and delete some data to make sure it's not used + diff.add_data(self.__rrset_soa) + diff.add_data(self.__rrset3) + diff.delete_data(self.__rrset_soa) + diff.delete_data(self.__rrset2) + + for rcode in [ ZoneFinder.DELEGATION, + ZoneFinder.CNAME, + ZoneFinder.DNAME ]: + # override the actual find method + self.__create_find(rcode, None, 0) + self.__check_find_call(diff.find, self.__rrset3, rcode) + self.__check_find_call(diff.find_updated, self.__rrset3, rcode) + + def test_find_all_existing_data(self): + diff = Diff(self, Name('example.org'), single_update_mode=True) + diff.add_data(self.__rrset_soa) + diff.delete_data(self.__rrset_soa) + + # override the actual find method + self.__create_find_all(ZoneFinder.SUCCESS, [ self.__rrset3 ], 0) + + # Sanity check + result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name()) + self.assertEqual(ZoneFinder.SUCCESS, result) + self.assertEqual([self.__rrset3], rrsets) + + self.__check_find_all_call(diff.find_all_updated, self.__rrset3, + ZoneFinder.SUCCESS, [self.__rrset3]) + self.__check_find_all_call(diff.find_all, self.__rrset3, + ZoneFinder.SUCCESS, [self.__rrset3]) + + # Add a second rr with different type at same name + add_rrset = RRset(self.__rrset3.get_name(), self.__rrclass, + RRType.A(), self.__ttl) + add_rrset.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2")) + diff.add_data(add_rrset) + + self.__check_find_all_call(diff.find_all_updated, self.__rrset3, + ZoneFinder.SUCCESS, + [self.__rrset3, add_rrset]) + self.__check_find_all_call(diff.find_all, self.__rrset3, + ZoneFinder.SUCCESS, [self.__rrset3]) + + # Remove original one + diff.delete_data(self.__rrset3) + self.__check_find_all_call(diff.find_all_updated, self.__rrset3, + ZoneFinder.SUCCESS, [add_rrset]) + self.__check_find_all_call(diff.find_all, self.__rrset3, + ZoneFinder.SUCCESS, [self.__rrset3]) + + # And remove new one, result should then become NXDOMAIN + diff.delete_data(add_rrset) + result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name()) + + self.assertEqual(ZoneFinder.NXDOMAIN, result) + self.assertEqual([ ], rrsets) + self.__check_find_all_call(diff.find_all_updated, self.__rrset3, + ZoneFinder.NXDOMAIN) + self.__check_find_all_call(diff.find_all, self.__rrset3, + ZoneFinder.SUCCESS, [self.__rrset3]) + + def test_find_all_nonexistent_data(self): + diff = Diff(self, Name('example.org'), single_update_mode=True) + diff.add_data(self.__rrset_soa) + diff.delete_data(self.__rrset_soa) + + self.__create_find_all(ZoneFinder.NXDOMAIN, [], 0) + + # Sanity check + self.__check_find_all_call(diff.find_all_updated, self.__rrset2, + ZoneFinder.NXDOMAIN) + self.__check_find_all_call(diff.find_all, self.__rrset2, + ZoneFinder.NXDOMAIN) + + # Adding data should change the result + diff.add_data(self.__rrset2) + self.__check_find_all_call(diff.find_all_updated, self.__rrset2, + ZoneFinder.SUCCESS, [ self.__rrset2 ]) + self.__check_find_all_call(diff.find_all, self.__rrset2, + ZoneFinder.NXDOMAIN) + + # Adding data at other name should not + diff.add_data(self.__rrset3) + self.__check_find_all_call(diff.find_all_updated, self.__rrset2, + ZoneFinder.SUCCESS, [ self.__rrset2 ]) + self.__check_find_all_call(diff.find_all, self.__rrset2, + ZoneFinder.NXDOMAIN) + + # Deleting it should revert to original + diff.delete_data(self.__rrset2) + self.__check_find_all_call(diff.find_all_updated, self.__rrset2, + ZoneFinder.NXDOMAIN) + self.__check_find_all_call(diff.find_all, self.__rrset2, + ZoneFinder.NXDOMAIN) + + def test_find_all_other_results(self): + ''' + Any result code other than SUCCESS and NXDOMAIN should cause + the results to be passed on directly + ''' + diff = Diff(self, Name('example.org'), single_update_mode=True) + + # Add and delete some data to make sure it's not used + diff.add_data(self.__rrset_soa) + diff.add_data(self.__rrset3) + diff.delete_data(self.__rrset_soa) + diff.delete_data(self.__rrset2) + + for rcode in [ ZoneFinder.NXRRSET, + ZoneFinder.DELEGATION, + ZoneFinder.CNAME, + ZoneFinder.DNAME ]: + # override the actual find method + self.__create_find_all(rcode, [], 0) + self.__check_find_all_call(diff.find_all_updated, self.__rrset2, + rcode) + self.__check_find_all_call(diff.find_all_updated, self.__rrset3, + rcode) + self.__check_find_all_call(diff.find_all, self.__rrset2, + rcode) + self.__check_find_all_call(diff.find_all, self.__rrset3, + rcode) + if __name__ == "__main__": isc.log.init("bind10") isc.log.resetUnitTestRootLogger() diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes index b59fd8cce5..1df544a02e 100644 --- a/src/lib/resolve/resolve_messages.mes +++ b/src/lib/resolve/resolve_messages.mes @@ -84,11 +84,11 @@ the specified name contained multiple RRsets in the answer and not all were of the same class. This is a violation of the standard and so a SERVFAIL will be returned. -% RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1> -A debug message, this indicates that a response was received for the specified -query and was categorized as a referral. However, the received message did -not contain any NS RRsets. This may indicate a programming error in the -response classification code. +% RESLIB_NOTSINGLE_RESPONSE response to query for <%1> was not a response +A debug message, the response to the specified query from an upstream +nameserver was a CNAME that had mutiple RRs in the RRset. This is +an invalid response according to the standards so a SERVFAIL will be +returned to the system making the original query. % RESLIB_NOT_ONE_QNAME_RESPONSE not one question in response to query for <%1> A debug message, the response to the specified query from an upstream @@ -102,16 +102,21 @@ nameserver (as identified by the ID of the response) did not have the QR bit set (thus indicating that the packet was a query, not a response). A SERVFAIL will be returned to the system making the original query. -% RESLIB_NOTSINGLE_RESPONSE response to query for <%1> was not a response -A debug message, the response to the specified query from an upstream -nameserver was a CNAME that had mutiple RRs in the RRset. This is -an invalid response according to the standards so a SERVFAIL will be -returned to the system making the original query. +% RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1> +A debug message, this indicates that a response was received for the specified +query and was categorized as a referral. However, the received message did +not contain any NS RRsets. This may indicate a programming error in the +response classification code. % RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS A debug message, the RunningQuery object is querying the NSAS for the nameservers for the specified zone. +% RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1> +A debug message recording that either a NXDOMAIN or an NXRRSET response has +been received to an upstream query for the specified question. Previous debug +messages will have indicated the server to which the question was sent. + % RESLIB_OPCODE_RESPONSE response to query for <%1> did not have query opcode A debug message, the response to the specified query from an upstream nameserver was a response that did not have the opcode set to that of @@ -119,11 +124,6 @@ a query. According to the standards, this is an invalid response to the query that was made, so a SERVFAIL will be returned to the system making the original query. -% RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1> -A debug message recording that either a NXDOMAIN or an NXRRSET response has -been received to an upstream query for the specified question. Previous debug -messages will have indicated the server to which the question was sent. - % RESLIB_PROTOCOL protocol error in answer for %1: %3 A debug message indicating that a protocol error was received. As there are no retries left, an error will be reported. diff --git a/src/lib/resolve/tests/Makefile.am b/src/lib/resolve/tests/Makefile.am index e7c59f45a1..4ee38118d5 100644 --- a/src/lib/resolve/tests/Makefile.am +++ b/src/lib/resolve/tests/Makefile.am @@ -8,6 +8,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/resolve/tests/recursive_query_unittest.cc b/src/lib/resolve/tests/recursive_query_unittest.cc index a8b805712e..02721f1d5d 100644 --- a/src/lib/resolve/tests/recursive_query_unittest.cc +++ b/src/lib/resolve/tests/recursive_query_unittest.cc @@ -175,6 +175,10 @@ protected: resolver_.reset(); } + void SetUp() { + callback_.reset(new ASIOCallBack(this)); + } + // Send a test UDP packet to a mock server void sendUDP(const int family) { ScopedAddrInfo sai(resolveAddress(family, IPPROTO_UDP, false)); @@ -190,7 +194,7 @@ protected: if (cc != sizeof(test_data)) { isc_throw(IOError, "unexpected sendto result: " << cc); } - io_service_->run(); + io_service_.run(); } // Send a test TCP packet to a mock server @@ -210,7 +214,7 @@ protected: if (cc != sizeof(test_data)) { isc_throw(IOError, "unexpected send result: " << cc); } - io_service_->run(); + io_service_.run(); } // Receive a UDP packet from a mock server; used for testing @@ -233,10 +237,10 @@ protected: // The IO service queue should have a RecursiveQuery object scheduled // to run at this point. This call will cause it to begin an // async send, then return. - io_service_->run_one(); + io_service_.run_one(); // ... and this one will block until the send has completed - io_service_->run_one(); + io_service_.run_one(); // Now we attempt to recv() whatever was sent. // XXX: there's no guarantee the receiving socket can immediately get @@ -326,9 +330,7 @@ protected: // Set up empty DNS Service // Set up an IO Service queue without any addresses void setDNSService() { - io_service_.reset(new IOService()); - callback_.reset(new ASIOCallBack(this)); - dns_service_.reset(new DNSService(*io_service_, callback_.get(), NULL, + dns_service_.reset(new DNSService(io_service_, callback_.get(), NULL, NULL)); } @@ -491,12 +493,10 @@ private: static_cast(io_message.getData()), static_cast(io_message.getData()) + io_message.getDataSize()); - io_service_->stop(); + io_service_.stop(); } protected: - // We use a pointer for io_service_, because for some tests we - // need to recreate a new one within one onstance of this class - scoped_ptr io_service_; + IOService io_service_; scoped_ptr dns_service_; scoped_ptr nsas_; isc::cache::ResolverCache cache_; @@ -513,24 +513,26 @@ RecursiveQueryTest::RecursiveQueryTest() : dns_service_(NULL), callback_(NULL), callback_protocol_(0), callback_native_(-1), resolver_(new isc::util::unittests::TestResolver()) { - io_service_.reset(new IOService()); - setDNSService(true, true); nsas_.reset(new isc::nsas::NameserverAddressStore(resolver_)); } TEST_F(RecursiveQueryTest, v6UDPSend) { + setDNSService(true, true); doTest(AF_INET6, IPPROTO_UDP); } TEST_F(RecursiveQueryTest, v6TCPSend) { + setDNSService(true, true); doTest(AF_INET6, IPPROTO_TCP); } TEST_F(RecursiveQueryTest, v4UDPSend) { + setDNSService(true, true); doTest(AF_INET, IPPROTO_UDP); } TEST_F(RecursiveQueryTest, v4TCPSend) { + setDNSService(true, true); doTest(AF_INET, IPPROTO_TCP); } @@ -643,7 +645,7 @@ TEST_F(RecursiveQueryTest, forwarderSend) { // to the same port as the actual server uint16_t port = boost::lexical_cast(TEST_CLIENT_PORT); - MockServer server(*io_service_); + MockServer server(io_service_); RecursiveQuery rq(*dns_service_, *nsas_, cache_, singleAddress(TEST_IPV4_ADDR, port), @@ -766,7 +768,7 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) { // Prepare the server bool done(true); - MockServerStop server(*io_service_, &done); + MockServerStop server(io_service_, &done); // Do the answer const uint16_t port = boost::lexical_cast(TEST_CLIENT_PORT); @@ -784,7 +786,7 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) { boost::shared_ptr callback(new MockResolverCallback(&server)); query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback); // Run the test - io_service_->run(); + io_service_.run(); EXPECT_EQ(callback->result, MockResolverCallback::FAILURE); } @@ -800,7 +802,7 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) { // Prepare the server bool done1(true); - MockServerStop server(*io_service_, &done1); + MockServerStop server(io_service_, &done1); MessagePtr answer(new Message(Message::RENDER)); @@ -819,7 +821,7 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) { boost::shared_ptr callback(new MockResolverCallback(&server)); query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback); // Run the test - io_service_->run(); + io_service_.run(); EXPECT_EQ(callback->result, MockResolverCallback::FAILURE); } @@ -834,7 +836,7 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) { // Prepare the server bool done(true); - MockServerStop server(*io_service_, &done); + MockServerStop server(io_service_, &done); MessagePtr answer(new Message(Message::RENDER)); @@ -854,7 +856,7 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) { boost::shared_ptr callback(new MockResolverCallback(&server)); query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback); // Run the test - io_service_->run(); + io_service_.run(); EXPECT_EQ(callback->result, MockResolverCallback::FAILURE); } @@ -869,7 +871,7 @@ TEST_F(RecursiveQueryTest, lowtimeouts) { // Prepare the server bool done(true); - MockServerStop server(*io_service_, &done); + MockServerStop server(io_service_, &done); MessagePtr answer(new Message(Message::RENDER)); @@ -889,7 +891,7 @@ TEST_F(RecursiveQueryTest, lowtimeouts) { boost::shared_ptr callback(new MockResolverCallback(&server)); query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback); // Run the test - io_service_->run(); + io_service_.run(); EXPECT_EQ(callback->result, MockResolverCallback::FAILURE); } @@ -903,7 +905,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendOk) { setDNSService(true, false); bool done; - MockServerStop server(*io_service_, &done); + MockServerStop server(io_service_, &done); vector > empty_vector; RecursiveQuery rq(*dns_service_, *nsas_, cache_, empty_vector, empty_vector, 10000, 0); @@ -912,7 +914,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendOk) { OutputBufferPtr buffer(new OutputBuffer(0)); MessagePtr answer(new Message(Message::RENDER)); rq.resolve(q, answer, buffer, &server); - io_service_->run(); + io_service_.run(); // Check that the answer we got matches the one we wanted EXPECT_EQ(Rcode::NOERROR(), answer->getRcode()); @@ -929,7 +931,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendNXDOMAIN) { setDNSService(true, false); bool done; - MockServerStop server(*io_service_, &done); + MockServerStop server(io_service_, &done); vector > empty_vector; RecursiveQuery rq(*dns_service_, *nsas_, cache_, empty_vector, empty_vector, 10000, 0); @@ -938,7 +940,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendNXDOMAIN) { OutputBufferPtr buffer(new OutputBuffer(0)); MessagePtr answer(new Message(Message::RENDER)); rq.resolve(q, answer, buffer, &server); - io_service_->run(); + io_service_.run(); // Check that the answer we got matches the one we wanted EXPECT_EQ(Rcode::NXDOMAIN(), answer->getRcode()); @@ -1012,7 +1014,7 @@ TEST_F(RecursiveQueryTest, CachedNS) { OutputBufferPtr buffer(new OutputBuffer(0)); MessagePtr answer(new Message(Message::RENDER)); // The server is here so we have something to pass there - MockServer server(*io_service_); + MockServer server(io_service_); rq.resolve(q, answer, buffer, &server); // We don't need to run the service in this test. We are interested only // in the place it starts resolving at diff --git a/src/lib/server_common/server_common_messages.mes b/src/lib/server_common/server_common_messages.mes index 08179281bf..0e4efa5197 100644 --- a/src/lib/server_common/server_common_messages.mes +++ b/src/lib/server_common/server_common_messages.mes @@ -89,11 +89,6 @@ This is probably a bug in the code, but it could be caused by other unusual conditions (like insufficient memory, deleted socket file used for communication). -% SRVCOMM_UNKNOWN_EXCEPTION_ALLOC unknown exception when allocating a socket -The situation is the same as in the SRVCOMM_EXCEPTION_ALLOC case, but further -details about the error are unknown, because it was signaled by throwing -something not being an exception. This is definitely a bug. - % SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring Debug message indicating that the server is deinitializing the TSIG keyring. @@ -112,3 +107,8 @@ specification is outside the valid range of 0 to 65535. % SRVCOMM_SET_LISTEN setting addresses to listen to Debug message, noting that the server is about to start listening on a different set of IP addresses and ports than before. + +% SRVCOMM_UNKNOWN_EXCEPTION_ALLOC unknown exception when allocating a socket +The situation is the same as in the SRVCOMM_EXCEPTION_ALLOC case, but further +details about the error are unknown, because it was signaled by throwing +something not being an exception. This is definitely a bug. diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am index b059d4766b..366c68e5fd 100644 --- a/src/lib/server_common/tests/Makefile.am +++ b/src/lib/server_common/tests/Makefile.am @@ -22,6 +22,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/server_common/tests/socket_requestor_test.cc b/src/lib/server_common/tests/socket_requestor_test.cc index 829b6d974d..9adf84d076 100644 --- a/src/lib/server_common/tests/socket_requestor_test.cc +++ b/src/lib/server_common/tests/socket_requestor_test.cc @@ -144,6 +144,7 @@ createExpectedRequest(const std::string& address, packet->add(Element::create("Boss")); packet->add(Element::create("*")); packet->add(createCommand("get_socket", command_args)); + packet->add(Element::create(-1)); return (packet); } @@ -284,6 +285,7 @@ createExpectedRelease(const std::string& token) { packet->add(Element::create("Boss")); packet->add(Element::create("*")); packet->add(createCommand("drop_socket", command_args)); + packet->add(Element::create(-1)); return (packet); } diff --git a/src/lib/statistics/counter.cc b/src/lib/statistics/counter.cc index 9cb1a6f4a7..53dc58e862 100644 --- a/src/lib/statistics/counter.cc +++ b/src/lib/statistics/counter.cc @@ -1,3 +1,17 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + #include #include diff --git a/src/lib/statistics/counter.h b/src/lib/statistics/counter.h index b077616fb6..9e467ce9c3 100644 --- a/src/lib/statistics/counter.h +++ b/src/lib/statistics/counter.h @@ -1,3 +1,17 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + #ifndef __COUNTER_H #define __COUNTER_H 1 diff --git a/src/lib/statistics/counter_dict.cc b/src/lib/statistics/counter_dict.cc index da6aacebe4..55353b29b9 100644 --- a/src/lib/statistics/counter_dict.cc +++ b/src/lib/statistics/counter_dict.cc @@ -1,3 +1,17 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + #include #include #include diff --git a/src/lib/statistics/counter_dict.h b/src/lib/statistics/counter_dict.h index 4a4cab179b..e32211990e 100644 --- a/src/lib/statistics/counter_dict.h +++ b/src/lib/statistics/counter_dict.h @@ -1,3 +1,17 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + #ifndef __COUNTER_DICT_H #define __COUNTER_DICT_H 1 diff --git a/src/lib/statistics/tests/Makefile.am b/src/lib/statistics/tests/Makefile.am index d66acdf857..c0f0295473 100644 --- a/src/lib/statistics/tests/Makefile.am +++ b/src/lib/statistics/tests/Makefile.am @@ -15,6 +15,9 @@ AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG) CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/testutils/socket_request.h b/src/lib/testutils/socket_request.h index 5c76d3064a..0ae15f3310 100644 --- a/src/lib/testutils/socket_request.h +++ b/src/lib/testutils/socket_request.h @@ -55,7 +55,7 @@ public: /// \param expect_port The port which is expected to be requested. If /// the application requests a different port, it is considered /// a failure. - /// \param expeted_app The share name for which all the requests should + /// \param expected_app The share name for which all the requests should /// be made. This is not the usual app_name - the requestSocket does /// not fall back to this value if its share_name is left empty, if /// you want to check the code relies on the requestor to use the diff --git a/src/lib/testutils/srv_test.cc b/src/lib/testutils/srv_test.cc index 03aec01cdc..d686da69a8 100644 --- a/src/lib/testutils/srv_test.cc +++ b/src/lib/testutils/srv_test.cc @@ -34,6 +34,7 @@ using namespace isc::asiolink; namespace isc { namespace testutils { const char* const DEFAULT_REMOTE_ADDRESS = "192.0.2.1"; +const uint16_t DEFAULT_REMOTE_PORT = 53210; SrvTestBase::SrvTestBase() : request_message(Message::RENDER), parse_message(new Message(Message::PARSE)), @@ -62,7 +63,8 @@ SrvTestBase::createDataFromFile(const char* const datafile, delete endpoint; endpoint = IOEndpoint::create(protocol, - IOAddress(DEFAULT_REMOTE_ADDRESS), 53210); + IOAddress(DEFAULT_REMOTE_ADDRESS), + DEFAULT_REMOTE_PORT); UnitTestUtil::readWireData(datafile, data); io_sock = (protocol == IPPROTO_UDP) ? &IOSocket::getDummyUDPSocket() : &IOSocket::getDummyTCPSocket(); @@ -71,7 +73,9 @@ SrvTestBase::createDataFromFile(const char* const datafile, void SrvTestBase::createRequestPacket(Message& message, - const int protocol, TSIGContext* context) + const int protocol, TSIGContext* context, + const char* const remote_address, + uint16_t remote_port) { if (context == NULL) { message.toWire(request_renderer); @@ -81,8 +85,8 @@ SrvTestBase::createRequestPacket(Message& message, delete io_message; - endpoint = IOEndpoint::create(protocol, - IOAddress(DEFAULT_REMOTE_ADDRESS), 53210); + endpoint = IOEndpoint::create(protocol, IOAddress(remote_address), + remote_port); io_sock = (protocol == IPPROTO_UDP) ? &IOSocket::getDummyUDPSocket() : &IOSocket::getDummyTCPSocket(); @@ -96,9 +100,10 @@ void SrvTestBase::unsupportedRequest() { for (unsigned int i = 0; i < 16; ++i) { // set Opcode to 'i', which iterators over all possible codes except - // the standard query and notify + // the standard opcodes we support. if (i == isc::dns::Opcode::QUERY().getCode() || - i == isc::dns::Opcode::NOTIFY().getCode()) { + i == isc::dns::Opcode::NOTIFY().getCode() || + i == isc::dns::Opcode::UPDATE().getCode()) { continue; } createDataFromFile("simplequery_fromWire.wire"); diff --git a/src/lib/testutils/srv_test.h b/src/lib/testutils/srv_test.h index b5f64b5d6a..a5c516e896 100644 --- a/src/lib/testutils/srv_test.h +++ b/src/lib/testutils/srv_test.h @@ -35,6 +35,7 @@ class IOEndpoint; namespace isc { namespace testutils { extern const char* const DEFAULT_REMOTE_ADDRESS; +extern const uint16_t DEFAULT_REMOTE_PORT; // These are flags to indicate whether the corresponding flag bit of the // DNS header is to be set in the test cases. (The flag values @@ -88,7 +89,9 @@ protected: /// The existing content of \c io_message, if any, will be deleted. void createRequestPacket(isc::dns::Message& message, const int protocol = IPPROTO_UDP, - isc::dns::TSIGContext* context = NULL); + isc::dns::TSIGContext* context = NULL, + const char* const address = DEFAULT_REMOTE_ADDRESS, + uint16_t port = DEFAULT_REMOTE_PORT); MockSession notify_session; MockServer dnsserv; diff --git a/src/lib/testutils/testdata/rwtest.sqlite3 b/src/lib/testutils/testdata/rwtest.sqlite3 index 5eeb2c333e..558bc3f52e 100644 Binary files a/src/lib/testutils/testdata/rwtest.sqlite3 and b/src/lib/testutils/testdata/rwtest.sqlite3 differ diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am index c2b3020b68..fad24651d1 100644 --- a/src/lib/util/Makefile.am +++ b/src/lib/util/Makefile.am @@ -4,6 +4,7 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util AM_CPPFLAGS += -I$(top_srcdir)/src/lib/exceptions -I$(top_builddir)/src/lib/exceptions AM_CPPFLAGS += $(BOOST_INCLUDES) +AM_CPPFLAGS += -DLOCKFILE_DIR=\"${localstatedir}/${PACKAGE_NAME}\" AM_CXXFLAGS = $(B10_CXXFLAGS) lib_LTLIBRARIES = libutil.la @@ -12,6 +13,9 @@ libutil_la_SOURCES += locks.h lru_list.h libutil_la_SOURCES += strutil.h strutil.cc libutil_la_SOURCES += buffer.h io_utilities.h libutil_la_SOURCES += time_utilities.h time_utilities.cc +libutil_la_SOURCES += interprocess_sync.h +libutil_la_SOURCES += interprocess_sync_file.h interprocess_sync_file.cc +libutil_la_SOURCES += interprocess_sync_null.h interprocess_sync_null.cc libutil_la_SOURCES += range_utilities.h libutil_la_SOURCES += hash/sha1.h hash/sha1.cc libutil_la_SOURCES += encode/base16_from_binary.h diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h index 5cd216a6d3..7e881089cd 100644 --- a/src/lib/util/buffer.h +++ b/src/lib/util/buffer.h @@ -18,8 +18,6 @@ #include #include #include -#include -#include #include @@ -101,17 +99,6 @@ public: /// \param len The length of the data in bytes. InputBuffer(const void* data, size_t len) : position_(0), data_(static_cast(data)), len_(len) {} - - /// @brief Constructor from vector - /// - /// It is caller's responsibility to ensure that the data is valid as long - /// as the buffer exists. - /// - /// @param begin iterator to beginning of the vector - /// @param end iterator to end of the vector - InputBuffer(std::vector::const_iterator begin, - std::vector::const_iterator end) : - position_(0), data_(&(*begin)), len_(std::distance(begin, end)) {} //@} /// @@ -209,7 +196,7 @@ public: throwError("read beyond end of buffer"); } - memcpy(data, &data_[position_], len); + std::memcpy(data, &data_[position_], len); position_ += len; } //@} @@ -219,8 +206,8 @@ public: /// If specified buffer is too short, it will be expanded /// using vector::resize() method. /// - /// @param Reference to a buffer (data will be stored there). - /// @param Size specified number of bytes to read in a vector. + /// @param data Reference to a buffer (data will be stored there). + /// @param len Size specified number of bytes to read in a vector. /// void readVector(std::vector& data, size_t len) { if (position_ + len > len_) { @@ -344,7 +331,7 @@ public: if (buffer_ == NULL && allocated_ != 0) { throw std::bad_alloc(); } - memcpy(buffer_, other.buffer_, size_); + std::memcpy(buffer_, other.buffer_, size_); } /// \brief Destructor @@ -363,7 +350,7 @@ public: buffer_ = newbuff; size_ = other.size_; allocated_ = other.allocated_; - memcpy(buffer_, other.buffer_, size_); + std::memcpy(buffer_, other.buffer_, size_); return (*this); } @@ -504,7 +491,7 @@ public: void writeData(const void *data, size_t len) { ensureAllocated(size_ + len); - memcpy(buffer_ + size_, data, len); + std::memcpy(buffer_ + size_, data, len); size_ += len; } //@} diff --git a/src/lib/util/interprocess_sync.h b/src/lib/util/interprocess_sync.h new file mode 100644 index 0000000000..e4fa7afcfe --- /dev/null +++ b/src/lib/util/interprocess_sync.h @@ -0,0 +1,149 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __INTERPROCESS_SYNC_H__ +#define __INTERPROCESS_SYNC_H__ + +#include + +namespace isc { +namespace util { + +class InterprocessSyncLocker; // forward declaration + +/// \brief Interprocess Sync Class +/// +/// This class specifies an interface for mutual exclusion among +/// co-operating processes. This is an abstract class and a real +/// implementation such as InterprocessSyncFile should be used +/// in code. Usage is as follows: +/// +/// 1. Client instantiates a sync object of an implementation (such as +/// InterprocessSyncFile). +/// 2. Client then creates an automatic (stack) object of +/// InterprocessSyncLocker around the sync object. Such an object +/// destroys itself and releases any acquired lock when it goes out of extent. +/// 3. Client calls lock() method on the InterprocessSyncLocker. +/// 4. Client performs task that needs mutual exclusion. +/// 5. Client frees lock with unlock(), or simply returns from the basic +/// block which forms the scope for the InterprocessSyncLocker. +/// +/// NOTE: All implementations of InterprocessSync should keep the +/// is_locked_ member variable updated whenever their +/// lock()/tryLock()/unlock() implementations are called. +class InterprocessSync { + // InterprocessSyncLocker is the only code outside this class that + // should be allowed to call the lock(), tryLock() and unlock() + // methods. + friend class InterprocessSyncLocker; + +public: + /// \brief Constructor + /// + /// Creates an interprocess synchronization object + /// + /// \param task_name Name of the synchronization task. This has to be + /// identical among the various processes that need to be + /// synchronized for the same task. + InterprocessSync(const std::string& task_name) : + task_name_(task_name), is_locked_(false) + {} + + /// \brief Destructor + virtual ~InterprocessSync() {} + +protected: + /// \brief Acquire the lock (blocks if something else has acquired a + /// lock on the same task name) + /// + /// \return Returns true if the lock was acquired, false otherwise. + virtual bool lock() = 0; + + /// \brief Try to acquire a lock (doesn't block) + /// + /// \return Returns true if the lock was acquired, false otherwise. + virtual bool tryLock() = 0; + + /// \brief Release the lock + /// + /// \return Returns true if the lock was released, false otherwise. + virtual bool unlock() = 0; + + const std::string task_name_; ///< The task name + bool is_locked_; ///< Is the lock taken? +}; + +/// \brief Interprocess Sync Locker Class +/// +/// This class is used for making automatic stack objects to manage +/// locks that are released automatically when the block is exited +/// (RAII). It is meant to be used along with InterprocessSync objects. See +/// the description of InterprocessSync. +class InterprocessSyncLocker { +public: + /// \brief Constructor + /// + /// Creates a lock manager around a interprocess synchronization object + /// + /// \param sync The sync object which has to be locked/unlocked by + /// this locker object. + InterprocessSyncLocker(InterprocessSync& sync) : + sync_(sync) + {} + + /// \brief Destructor + ~InterprocessSyncLocker() { + if (isLocked()) + unlock(); + } + + /// \brief Acquire the lock (blocks if something else has acquired a + /// lock on the same task name) + /// + /// \return Returns true if the lock was acquired, false otherwise. + bool lock() { + return (sync_.lock()); + } + + /// \brief Try to acquire a lock (doesn't block) + /// + /// \return Returns true if a new lock could be acquired, false + /// otherwise. + bool tryLock() { + return (sync_.tryLock()); + } + + /// \brief Check if the lock is taken + /// + /// \return Returns true if a lock is currently acquired, false + /// otherwise. + bool isLocked() const { + return (sync_.is_locked_); + } + + /// \brief Release the lock + /// + /// \return Returns true if the lock was released, false otherwise. + bool unlock() { + return (sync_.unlock()); + } + +protected: + InterprocessSync& sync_; ///< Ref to underlying sync object +}; + +} // namespace util +} // namespace isc + +#endif // __INTERPROCESS_SYNC_H__ diff --git a/src/lib/util/interprocess_sync_file.cc b/src/lib/util/interprocess_sync_file.cc new file mode 100644 index 0000000000..d045449025 --- /dev/null +++ b/src/lib/util/interprocess_sync_file.cc @@ -0,0 +1,130 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "interprocess_sync_file.h" + +#include + +#include +#include +#include +#include +#include +#include + +namespace isc { +namespace util { + +InterprocessSyncFile::~InterprocessSyncFile() { + if (fd_ != -1) { + // This will also release any applied locks. + close(fd_); + // The lockfile will continue to exist, and we must not delete + // it. + } +} + +bool +InterprocessSyncFile::do_lock(int cmd, short l_type) { + // Open lock file only when necessary (i.e., here). This is so that + // if a default InterprocessSync object is replaced with another + // implementation, it doesn't attempt any opens. + if (fd_ == -1) { + std::string lockfile_path = LOCKFILE_DIR; + + const char* const env = getenv("B10_FROM_BUILD"); + if (env != NULL) { + lockfile_path = env; + } + + const char* const env2 = getenv("B10_FROM_BUILD_LOCALSTATEDIR"); + if (env2 != NULL) { + lockfile_path = env2; + } + + const char* const env3 = getenv("B10_LOCKFILE_DIR_FROM_BUILD"); + if (env3 != NULL) { + lockfile_path = env3; + } + + lockfile_path += "/" + task_name_ + "_lockfile"; + + // Open the lockfile in the constructor so it doesn't do the access + // checks every time a message is logged. + const mode_t mode = umask(0111); + fd_ = open(lockfile_path.c_str(), O_CREAT | O_RDWR, 0660); + umask(mode); + + if (fd_ == -1) { + isc_throw(InterprocessSyncFileError, + "Unable to use interprocess sync lockfile: " + + lockfile_path); + } + } + + struct flock lock; + + memset(&lock, 0, sizeof (lock)); + lock.l_type = l_type; + lock.l_whence = SEEK_SET; + lock.l_start = 0; + lock.l_len = 1; + + return (fcntl(fd_, cmd, &lock) == 0); +} + +bool +InterprocessSyncFile::lock() { + if (is_locked_) { + return (true); + } + + if (do_lock(F_SETLKW, F_WRLCK)) { + is_locked_ = true; + return (true); + } + + return (false); +} + +bool +InterprocessSyncFile::tryLock() { + if (is_locked_) { + return (true); + } + + if (do_lock(F_SETLK, F_WRLCK)) { + is_locked_ = true; + return (true); + } + + return (false); +} + +bool +InterprocessSyncFile::unlock() { + if (!is_locked_) { + return (true); + } + + if (do_lock(F_SETLKW, F_UNLCK)) { + is_locked_ = false; + return (true); + } + + return (false); +} + +} // namespace util +} // namespace isc diff --git a/src/lib/util/interprocess_sync_file.h b/src/lib/util/interprocess_sync_file.h new file mode 100644 index 0000000000..fd8da1b438 --- /dev/null +++ b/src/lib/util/interprocess_sync_file.h @@ -0,0 +1,91 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __INTERPROCESS_SYNC_FILE_H__ +#define __INTERPROCESS_SYNC_FILE_H__ + +#include +#include + +namespace isc { +namespace util { + +/// \brief InterprocessSyncFileError +/// +/// Exception that is thrown if it's not possible to open the +/// lock file. +class InterprocessSyncFileError : public Exception { +public: + InterprocessSyncFileError(const char* file, size_t line, + const char* what) : + isc::Exception(file, line, what) {} +}; + +/// \brief File-based Interprocess Sync Class +/// +/// This class specifies a concrete implementation for a file-based +/// interprocess synchronization mechanism. Please see the +/// InterprocessSync class documentation for usage. +/// +/// An InterprocessSyncFileError exception may be thrown if there is an +/// issue opening the lock file. +/// +/// Lock files are created typically in the local state directory +/// (var). They are typically named like "_lockfile". +/// This implementation opens lock files lazily (only when +/// necessary). It also leaves the lock files lying around as multiple +/// processes may have locks on them. +class InterprocessSyncFile : public InterprocessSync { +public: + /// \brief Constructor + /// + /// Creates a file-based interprocess synchronization object + /// + /// \param name Name of the synchronization task. This has to be + /// identical among the various processes that need to be + /// synchronized for the same task. + InterprocessSyncFile(const std::string& task_name) : + InterprocessSync(task_name), fd_(-1) + {} + + /// \brief Destructor + virtual ~InterprocessSyncFile(); + +protected: + /// \brief Acquire the lock (blocks if something else has acquired a + /// lock on the same task name) + /// + /// \return Returns true if the lock was acquired, false otherwise. + bool lock(); + + /// \brief Try to acquire a lock (doesn't block) + /// + /// \return Returns true if the lock was acquired, false otherwise. + bool tryLock(); + + /// \brief Release the lock + /// + /// \return Returns true if the lock was released, false otherwise. + bool unlock(); + +private: + bool do_lock(int cmd, short l_type); + + int fd_; ///< The descriptor for the open file +}; + +} // namespace util +} // namespace isc + +#endif // __INTERPROCESS_SYNC_FILE_H__ diff --git a/src/lib/util/interprocess_sync_null.cc b/src/lib/util/interprocess_sync_null.cc new file mode 100644 index 0000000000..5355d5727e --- /dev/null +++ b/src/lib/util/interprocess_sync_null.cc @@ -0,0 +1,42 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "interprocess_sync_null.h" + +namespace isc { +namespace util { + +InterprocessSyncNull::~InterprocessSyncNull() { +} + +bool +InterprocessSyncNull::lock() { + is_locked_ = true; + return (true); +} + +bool +InterprocessSyncNull::tryLock() { + is_locked_ = true; + return (true); +} + +bool +InterprocessSyncNull::unlock() { + is_locked_ = false; + return (true); +} + +} // namespace util +} // namespace isc diff --git a/src/lib/util/interprocess_sync_null.h b/src/lib/util/interprocess_sync_null.h new file mode 100644 index 0000000000..6ac032245e --- /dev/null +++ b/src/lib/util/interprocess_sync_null.h @@ -0,0 +1,64 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __INTERPROCESS_SYNC_NULL_H__ +#define __INTERPROCESS_SYNC_NULL_H__ + +#include + +namespace isc { +namespace util { + +/// \brief Null Interprocess Sync Class +/// +/// This class specifies a concrete implementation for a null (no effect) +/// interprocess synchronization mechanism. Please see the +/// InterprocessSync class documentation for usage. +class InterprocessSyncNull : public InterprocessSync { +public: + /// \brief Constructor + /// + /// Creates a null interprocess synchronization object + /// + /// \param name Name of the synchronization task. This has to be + /// identical among the various processes that need to be + /// synchronized for the same task. + InterprocessSyncNull(const std::string& task_name) : + InterprocessSync(task_name) + {} + + /// \brief Destructor + virtual ~InterprocessSyncNull(); + +protected: + /// \brief Acquire the lock (never blocks) + /// + /// \return Always returns true + bool lock(); + + /// \brief Try to acquire a lock (doesn't block) + /// + /// \return Always returns true + bool tryLock(); + + /// \brief Release the lock + /// + /// \return Always returns true + bool unlock(); +}; + +} // namespace util +} // namespace isc + +#endif // __INTERPROCESS_SYNC_NULL_H__ diff --git a/src/lib/util/io/sockaddr_util.h b/src/lib/util/io/sockaddr_util.h index 9b4a0cbc12..0cd7c7b10d 100644 --- a/src/lib/util/io/sockaddr_util.h +++ b/src/lib/util/io/sockaddr_util.h @@ -15,6 +15,7 @@ #ifndef __SOCKADDR_UTIL_H_ #define __SOCKADDR_UTIL_H_ 1 +#include #include #include diff --git a/src/lib/util/io/socketsession.h b/src/lib/util/io/socketsession.h index 77f18a3036..48b7f19fe0 100644 --- a/src/lib/util/io/socketsession.h +++ b/src/lib/util/io/socketsession.h @@ -15,12 +15,14 @@ #ifndef __SOCKETSESSION_H_ #define __SOCKETSESSION_H_ 1 -#include - #include #include +#include + +#include + namespace isc { namespace util { namespace io { @@ -156,6 +158,35 @@ public: isc::Exception(file, line, what) {} }; +/// The "base" class of \c SocketSessionForwarder +/// +/// This class defines abstract interfaces of the \c SocketSessionForwarder +/// class. Although \c SocketSessionForwarder is not intended to be used in +/// a polymorphic way, it's not easy to use in tests because it will require +/// various low level network operations. So it would be useful if we +/// provide a framework for defining a fake or mock version of it. +/// An application that needs to use \c SocketSessionForwarder would actually +/// refer to this base class, and tests for the application would define +/// and use a fake version of the forwarder class. +/// +/// Normal applications are not expected to define and use their own derived +/// version of this base class, while it's not prohibited at the API level. +/// +/// See description of \c SocketSessionForwarder for the expected interface. +class BaseSocketSessionForwarder { +protected: + BaseSocketSessionForwarder() {} + +public: + virtual ~BaseSocketSessionForwarder() {} + virtual void connectToReceiver() = 0; + virtual void close() = 0; + virtual void push(int sock, int family, int type, int protocol, + const struct sockaddr& local_end, + const struct sockaddr& remote_end, + const void* data, size_t data_len) = 0; +}; + /// The forwarder of socket sessions /// /// An object of this class maintains a UNIX domain socket (normally expected @@ -164,7 +195,9 @@ public: /// /// See the description of \ref SocketSessionUtility for other details of how /// the session forwarding works. -class SocketSessionForwarder : boost::noncopyable { +class SocketSessionForwarder : boost::noncopyable, + public BaseSocketSessionForwarder +{ public: /// The constructor. /// @@ -212,7 +245,7 @@ public: /// /// If a connection has been established, it's automatically closed in /// the destructor. - ~SocketSessionForwarder(); + virtual ~SocketSessionForwarder(); /// Establish a connection to the receiver. /// @@ -224,7 +257,7 @@ public: /// \exception BadValue The method is called while an already /// established connection is still active. /// \exception SocketSessionError A system error in socket operation. - void connectToReceiver(); + virtual void connectToReceiver(); /// Close the connection to the receiver. /// @@ -232,7 +265,7 @@ public: /// As long as it's met this method is exception free. /// /// \exception BadValue The connection hasn't been established. - void close(); + virtual void close(); /// Forward a socket session to the receiver. /// @@ -276,10 +309,10 @@ public: /// \param data A pointer to the beginning of the memory region for the /// session data /// \param data_len The size of the session data in bytes. - void push(int sock, int family, int type, int protocol, - const struct sockaddr& local_end, - const struct sockaddr& remote_end, - const void* data, size_t data_len); + virtual void push(int sock, int family, int type, int protocol, + const struct sockaddr& local_end, + const struct sockaddr& remote_end, + const void* data, size_t data_len); private: struct ForwarderImpl; diff --git a/src/lib/util/locks.h b/src/lib/util/locks.h index daaf216b15..da9e9cd89c 100644 --- a/src/lib/util/locks.h +++ b/src/lib/util/locks.h @@ -16,7 +16,7 @@ /// It also contains code to use boost/threads locks: /// /// -/// All locks are dummy classes that don't actually do anything. At this moment, +/// All locks are dummy classes that don't actually do anything. At this moment, /// only the very minimal set of methods that we actually use is defined. /// /// Note that we need to include in our .cc files for that diff --git a/src/lib/util/tests/Makefile.am b/src/lib/util/tests/Makefile.am index 4aea9512a4..a7811d5d43 100644 --- a/src/lib/util/tests/Makefile.am +++ b/src/lib/util/tests/Makefile.am @@ -15,6 +15,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests @@ -28,6 +31,8 @@ run_unittests_SOURCES += filename_unittest.cc run_unittests_SOURCES += hex_unittest.cc run_unittests_SOURCES += io_utilities_unittest.cc run_unittests_SOURCES += lru_list_unittest.cc +run_unittests_SOURCES += interprocess_sync_file_unittest.cc +run_unittests_SOURCES += interprocess_sync_null_unittest.cc run_unittests_SOURCES += qid_gen_unittest.cc run_unittests_SOURCES += random_number_generator_unittest.cc run_unittests_SOURCES += sha1_unittest.cc @@ -39,12 +44,12 @@ run_unittests_SOURCES += range_utilities_unittest.cc run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) -run_unittests_LDADD = $(GTEST_LDADD) -run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la +run_unittests_LDADD = $(top_builddir)/src/lib/util/libutil.la run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la run_unittests_LDADD += \ $(top_builddir)/src/lib/util/unittests/libutil_unittests.la run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la +run_unittests_LDADD += $(GTEST_LDADD) endif noinst_PROGRAMS = $(TESTS) diff --git a/src/lib/util/tests/buffer_unittest.cc b/src/lib/util/tests/buffer_unittest.cc index 9d924b3a5d..9af3d57c6e 100644 --- a/src/lib/util/tests/buffer_unittest.cc +++ b/src/lib/util/tests/buffer_unittest.cc @@ -287,19 +287,4 @@ TEST_F(BufferTest, inputBufferReadVectorChunks) { EXPECT_EQ(0, memcmp(&vec[0], testdata+3, 2)); } -TEST_F(BufferTest, inputBufferConstructorVector) { - std::vector vec(17); - for (int i = 0; i < vec.size(); i++) { - vec[i] = i; - } - - InputBuffer buf(vec.begin(), vec.end()); - - EXPECT_EQ(buf.getLength(), 17); - - std::vector vec2; - EXPECT_NO_THROW(buf.readVector(vec2, 17)); - EXPECT_TRUE(vec == vec2); -} - } diff --git a/src/lib/util/tests/interprocess_sync_file_unittest.cc b/src/lib/util/tests/interprocess_sync_file_unittest.cc new file mode 100644 index 0000000000..9a1b02511a --- /dev/null +++ b/src/lib/util/tests/interprocess_sync_file_unittest.cc @@ -0,0 +1,174 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "util/interprocess_sync_file.h" +#include +#include + +using namespace std; + +namespace isc { +namespace util { + +namespace { +unsigned char +parentReadLockedState (int fd) { + unsigned char locked = 0xff; + + fd_set rfds; + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + // We use select() here to wait for new data on the input end of + // the pipe. We wait for 5 seconds (an arbitrary value) for input + // data, and continue if no data is available. This is done so + // that read() is not blocked due to some issue in the child + // process (and the tests continue running). + + struct timeval tv; + tv.tv_sec = 5; + tv.tv_usec = 0; + + const int nfds = select(fd + 1, &rfds, NULL, NULL, &tv); + EXPECT_EQ(1, nfds); + + if (nfds == 1) { + // Read status + ssize_t bytes_read = read(fd, &locked, sizeof(locked)); + EXPECT_EQ(sizeof(locked), bytes_read); + } + + return (locked); +} + +TEST(InterprocessSyncFileTest, TestLock) { + InterprocessSyncFile sync("test"); + InterprocessSyncLocker locker(sync); + + EXPECT_FALSE(locker.isLocked()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.isLocked()); + + int fds[2]; + + // Here, we check that a lock has been taken by forking and + // checking from the child that a lock exists. This has to be + // done from a separate process as we test by trying to lock the + // range again on the lock file. The lock attempt would pass if + // done from the same process for the granted range. The lock + // attempt must fail to pass our check. + + EXPECT_EQ(0, pipe(fds)); + + if (fork() == 0) { + unsigned char locked = 0; + // Child writes to pipe + close(fds[0]); + + InterprocessSyncFile sync2("test"); + InterprocessSyncLocker locker2(sync2); + + if (!locker2.tryLock()) { + EXPECT_FALSE(locker2.isLocked()); + locked = 1; + } else { + EXPECT_TRUE(locker2.isLocked()); + } + + ssize_t bytes_written = write(fds[1], &locked, sizeof(locked)); + EXPECT_EQ(sizeof(locked), bytes_written); + + close(fds[1]); + exit(0); + } else { + // Parent reads from pipe + close(fds[1]); + + const unsigned char locked = parentReadLockedState(fds[0]); + + close(fds[0]); + + EXPECT_EQ(1, locked); + } + + EXPECT_TRUE(locker.unlock()); + EXPECT_FALSE(locker.isLocked()); + + EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test_lockfile")); +} + +TEST(InterprocessSyncFileTest, TestMultipleFilesDirect) { + InterprocessSyncFile sync("test1"); + InterprocessSyncLocker locker(sync); + + EXPECT_TRUE(locker.lock()); + + InterprocessSyncFile sync2("test2"); + InterprocessSyncLocker locker2(sync2); + EXPECT_TRUE(locker2.lock()); + EXPECT_TRUE(locker2.unlock()); + + EXPECT_TRUE(locker.unlock()); + + EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test1_lockfile")); + EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test2_lockfile")); +} + +TEST(InterprocessSyncFileTest, TestMultipleFilesForked) { + InterprocessSyncFile sync("test1"); + InterprocessSyncLocker locker(sync); + + EXPECT_TRUE(locker.lock()); + + int fds[2]; + + EXPECT_EQ(0, pipe(fds)); + + if (fork() == 0) { + unsigned char locked = 0xff; + // Child writes to pipe + close(fds[0]); + + InterprocessSyncFile sync2("test2"); + InterprocessSyncLocker locker2(sync2); + + if (locker2.tryLock()) { + locked = 0; + } + + ssize_t bytes_written = write(fds[1], &locked, sizeof(locked)); + EXPECT_EQ(sizeof(locked), bytes_written); + + close(fds[1]); + exit(0); + } else { + // Parent reads from pipe + close(fds[1]); + + const unsigned char locked = parentReadLockedState(fds[0]); + + close(fds[0]); + + EXPECT_EQ(0, locked); + } + + EXPECT_TRUE(locker.unlock()); + + EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test1_lockfile")); + EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test2_lockfile")); +} +} + +} // namespace util +} // namespace isc diff --git a/src/lib/util/tests/interprocess_sync_null_unittest.cc b/src/lib/util/tests/interprocess_sync_null_unittest.cc new file mode 100644 index 0000000000..70e2b07f0e --- /dev/null +++ b/src/lib/util/tests/interprocess_sync_null_unittest.cc @@ -0,0 +1,76 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include "util/interprocess_sync_null.h" +#include + +using namespace std; + +namespace isc { +namespace util { + +TEST(InterprocessSyncNullTest, TestNull) { + InterprocessSyncNull sync("test1"); + InterprocessSyncLocker locker(sync); + + // Check if the is_locked_ flag is set correctly during lock(). + EXPECT_FALSE(locker.isLocked()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.isLocked()); + + // lock() must always return true (this is called 4 times, just an + // arbitrary number) + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.lock()); + + // Check if the is_locked_ flag is set correctly during unlock(). + EXPECT_TRUE(locker.isLocked()); + EXPECT_TRUE(locker.unlock()); + EXPECT_FALSE(locker.isLocked()); + + // unlock() must always return true (this is called 4 times, just an + // arbitrary number) + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.unlock()); + + // Check if the is_locked_ flag is set correctly during tryLock(). + EXPECT_FALSE(locker.isLocked()); + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.isLocked()); + + // tryLock() must always return true (this is called 4 times, just an + // arbitrary number) + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.tryLock()); + + // Random order (should all return true) + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.lock()); + EXPECT_TRUE(locker.tryLock()); + EXPECT_TRUE(locker.unlock()); + EXPECT_TRUE(locker.unlock()); +} + +} // namespace util +} // namespace isc diff --git a/src/lib/util/tests/run_unittests.cc b/src/lib/util/tests/run_unittests.cc index a2181cf47d..8789a9c116 100644 --- a/src/lib/util/tests/run_unittests.cc +++ b/src/lib/util/tests/run_unittests.cc @@ -14,10 +14,12 @@ #include #include +#include int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); + setenv("B10_LOCKFILE_DIR_FROM_BUILD", TEST_DATA_TOPBUILDDIR, 1); return (isc::util::unittests::run_all()); } diff --git a/src/lib/util/tests/socketsession_unittest.cc b/src/lib/util/tests/socketsession_unittest.cc index b9f2667692..e83c140cdc 100644 --- a/src/lib/util/tests/socketsession_unittest.cc +++ b/src/lib/util/tests/socketsession_unittest.cc @@ -53,6 +53,7 @@ namespace { const char* const TEST_UNIX_FILE = TEST_DATA_TOPBUILDDIR "/test.unix"; const char* const TEST_PORT = "53535"; +const char* const TEST_PORT2 = "53536"; // use this in case we need 2 ports const char TEST_DATA[] = "BIND10 test"; // A simple helper structure to automatically close test sockets on return @@ -540,8 +541,12 @@ ForwardTest::checkPushAndPop(int family, int type, int protocol, } TEST_F(ForwardTest, pushAndPop) { - // Pass a UDP/IPv6 session. + // Pass a UDP/IPv6 session. We use different ports for different UDP + // tests because Solaris 11 seems to prohibit reusing the same port for + // some short period once the socket FD is forwarded, even if the sockets + // are closed. See Trac #2028. const SockAddrInfo sai_local6(getSockAddr("::1", TEST_PORT)); + const SockAddrInfo sai_local6_alt(getSockAddr("::1", TEST_PORT2)); const SockAddrInfo sai_remote6(getSockAddr("2001:db8::1", "5300")); { SCOPED_TRACE("Passing UDP/IPv6 session"); @@ -559,6 +564,7 @@ TEST_F(ForwardTest, pushAndPop) { // receiver, which should be usable for multiple attempts of passing, // regardless of family of the passed session const SockAddrInfo sai_local4(getSockAddr("127.0.0.1", TEST_PORT)); + const SockAddrInfo sai_local4_alt(getSockAddr("127.0.0.1", TEST_PORT2)); const SockAddrInfo sai_remote4(getSockAddr("192.0.2.2", "5300")); { SCOPED_TRACE("Passing UDP/IPv4 session"); @@ -575,7 +581,7 @@ TEST_F(ForwardTest, pushAndPop) { // Also try large data { SCOPED_TRACE("Passing UDP/IPv6 session with large data"); - checkPushAndPop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, sai_local6, + checkPushAndPop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, sai_local6_alt, sai_remote6, large_text_.c_str(), large_text_.length(), false); } @@ -587,7 +593,7 @@ TEST_F(ForwardTest, pushAndPop) { } { SCOPED_TRACE("Passing UDP/IPv4 session with large data"); - checkPushAndPop(AF_INET, SOCK_DGRAM, IPPROTO_UDP, sai_local4, + checkPushAndPop(AF_INET, SOCK_DGRAM, IPPROTO_UDP, sai_local4_alt, sai_remote4, large_text_.c_str(), large_text_.length(), false); } diff --git a/src/lib/util/unittests/Makefile.am b/src/lib/util/unittests/Makefile.am index 28274712a6..3007a839d8 100644 --- a/src/lib/util/unittests/Makefile.am +++ b/src/lib/util/unittests/Makefile.am @@ -11,6 +11,9 @@ libutil_unittests_la_SOURCES += run_all.h run_all.cc libutil_unittests_la_SOURCES += textdata.h endif +# For now, this isn't needed for libutil_unittests +EXTRA_DIST = mock_socketsession.h + libutil_unittests_la_CPPFLAGS = $(AM_CPPFLAGS) if HAVE_GTEST libutil_unittests_la_CPPFLAGS += $(GTEST_INCLUDES) diff --git a/src/lib/util/unittests/mock_socketsession.h b/src/lib/util/unittests/mock_socketsession.h new file mode 100644 index 0000000000..8078265deb --- /dev/null +++ b/src/lib/util/unittests/mock_socketsession.h @@ -0,0 +1,154 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __UTIL_UNITTESTS_MOCKSOCKETSESSION_H +#define __UTIL_UNITTESTS_MOCKSOCKETSESSION_H 1 + +#include + +#include +#include + +#include +#include +#include + +#include +#include + +namespace isc { +namespace util { +namespace unittests { + +/// \brief Mock socket session forwarder. +/// +/// It emulates the behavior of SocketSessionForwarder without involving +/// network communication, and allowing the tester to customize the behavior +/// and to examine forwarded data afterwards. +class MockSocketSessionForwarder : + public isc::util::io::BaseSocketSessionForwarder +{ +public: + MockSocketSessionForwarder() : + is_connected_(false), connect_ok_(true), push_ok_(true), + close_ok_(true) + {} + + virtual void connectToReceiver() { + if (!connect_ok_) { + isc_throw(isc::util::io::SocketSessionError, "socket session " + "forwarding connection disabled for test"); + } + if (is_connected_) { + isc_throw(isc::util::io::SocketSessionError, "duplicate connect"); + } + is_connected_ = true; + } + virtual void close() { + if (!is_connected_) { + isc_throw(isc::util::io::SocketSessionError, "duplicate close"); + } + is_connected_ = false; + } + + // Pushing a socket session. It copies the given session data + // so that the test code can check the values later via the getter + // methods. Complete deep copy will be created, so the caller doesn't + // have to keep the parameters valid after the call to this method. + virtual void push(int sock, int family, int type, int protocol, + const struct sockaddr& local_end, + const struct sockaddr& remote_end, + const void* data, size_t data_len) + { + if (!push_ok_) { + isc_throw(isc::util::io::SocketSessionError, + "socket session forwarding is disabled for test"); + } + if (!is_connected_) { + isc_throw(isc::util::io::SocketSessionError, + "socket session is being pushed before connected"); + } + + // Copy parameters for later checks + pushed_sock_ = sock; + pushed_family_ = family; + pushed_type_ = type; + pushed_protocol_ = protocol; + assert(io::internal::getSALength(local_end) <= + sizeof(pushed_local_end_ss_)); + std::memcpy(&pushed_local_end_ss_, &local_end, + io::internal::getSALength(local_end)); + assert(io::internal::getSALength(remote_end) <= + sizeof(pushed_remote_end_ss_)); + std::memcpy(&pushed_remote_end_ss_, &remote_end, + io::internal::getSALength(remote_end)); + pushed_data_.resize(data_len); + std::memcpy(&pushed_data_[0], data, data_len); + } + + // Allow the test code to check if the connection is established. + bool isConnected() const { return (is_connected_); } + + // Allow the test code to customize the forwarder behavior wrt whether + // a specific operation should succeed or fail. + void disableConnect() { connect_ok_ = false; } + void enableConnect() { connect_ok_ = true; } + void disableClose() { close_ok_ = false; } + void disablePush() { push_ok_ = false; } + void enablePush() { push_ok_ = true; } + + // Read-only accessors to recorded parameters to the previous successful + // call to push(). Return values are undefined if there has been no + // successful call to push(). + // Note that we use convertSockAddr() to convert sockaddr_storage to + // sockaddr. It should be safe since we use the storage in its literal + // sense; it was originally filled with the binary image of another + // sockaddr structure, and we are going to return the image opaquely + // as a sockaddr structure without touching the data. + int getPushedSock() const { return (pushed_sock_); } + int getPushedFamily() const { return (pushed_family_); } + int getPushedType() const { return (pushed_type_); } + int getPushedProtocol() const { return (pushed_protocol_); } + const struct sockaddr& getPushedLocalend() const { + return (*io::internal::convertSockAddr(&pushed_local_end_ss_)); + } + const struct sockaddr& getPushedRemoteend() const { + return (*io::internal::convertSockAddr(&pushed_remote_end_ss_)); + } + const std::vector& getPushedData() const { + return (pushed_data_); + } + +private: + bool is_connected_; + bool connect_ok_; + bool push_ok_; + bool close_ok_; + int pushed_sock_; + int pushed_family_; + int pushed_type_; + int pushed_protocol_; + struct sockaddr_storage pushed_local_end_ss_; + struct sockaddr_storage pushed_remote_end_ss_; + std::vector pushed_data_; +}; + +} // end of unittests +} // end of util +} // end of isc +#endif // __UTIL_UNITTESTS_MOCKSOCKETSESSION_H + +// Local Variables: +// mode: c++ +// End: diff --git a/src/lib/xfr/tests/Makefile.am b/src/lib/xfr/tests/Makefile.am index 4abb456c8e..d8f39330e8 100644 --- a/src/lib/xfr/tests/Makefile.am +++ b/src/lib/xfr/tests/Makefile.am @@ -8,6 +8,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/src/lib/xfr/xfrout_client.cc b/src/lib/xfr/xfrout_client.cc index 227ffc4d96..af0c1b51c3 100644 --- a/src/lib/xfr/xfrout_client.cc +++ b/src/lib/xfr/xfrout_client.cc @@ -82,8 +82,14 @@ XfroutClient::sendXfroutRequestInfo(const int tcp_sock, // TODO: this shouldn't be blocking send, even though it's unlikely to // block. - // converting the 16-bit word to network byte order. - const uint8_t lenbuf[2] = { msg_len >> 8, msg_len & 0xff }; + // Converting the 16-bit word to network byte order. + + // Splitting msg_len below performs something called a 'narrowing + // conversion' (conversion of uint16_t to uint8_t). C++0x (and GCC + // 4.7) requires explicit casting when a narrowing conversion is + // performed. For reference, see 8.5.4/6 of n3225. + const uint8_t lenbuf[2] = { static_cast(msg_len >> 8), + static_cast(msg_len & 0xff) }; if (send(impl_->socket_.native(), lenbuf, sizeof(lenbuf), 0) != sizeof(lenbuf)) { isc_throw(XfroutError, diff --git a/src/valgrind-suppressions b/src/valgrind-suppressions new file mode 100644 index 0000000000..06257e51be --- /dev/null +++ b/src/valgrind-suppressions @@ -0,0 +1,11 @@ +# Valgrind suppressions file. Place permanent suppressions that we never +# want to reconsider again into this file. For temporary suppressions +# that we want to revisit in the future, use +# valgrind-suppressions.revisit. +# +# Don't add any "obj:" lines in suppressions as these are likely +# site-specific. Use "..." instead to match these. Look at the other +# suppressions as examples. +# +# In case you want to make sense of the following symbols, demangle them +# with a command like: c++filt < valgrind-suppressions diff --git a/src/valgrind-suppressions.revisit b/src/valgrind-suppressions.revisit new file mode 100644 index 0000000000..8b4a8c734a --- /dev/null +++ b/src/valgrind-suppressions.revisit @@ -0,0 +1,17 @@ +# Place temporary suppressions that we want to revisit in the future +# into this file. For permanent suppressions that we don't want to look +# at again, use valgrind-suppressions. +# +# Don't add any "obj:" lines in suppressions as these are likely +# site-specific. Use "..." instead to match these. Look at the other +# suppressions as examples. +# +# In case you want to make sense of the following symbols, demangle them +# with a command like: c++filt < valgrind-suppressions.revisit + +############################################################################ +#### beginning of suppressions for existing issues that we want to fix. #### + + +####### end of suppressions for existing issues that we want to fix. ####### +############################################################################ diff --git a/tests/lettuce/configurations/NOTES b/tests/lettuce/configurations/NOTES new file mode 100644 index 0000000000..a9b7976894 --- /dev/null +++ b/tests/lettuce/configurations/NOTES @@ -0,0 +1,4 @@ +- In some configuration we intentionally use an IPv6 address (::1) with + port 47807. DO NOT CHANGE THAT; at least do not change it to + 127.0.0.1:47807. See git e3f4b290d17a68db728166cdffcbe93517966e8b + for why. diff --git a/tests/lettuce/configurations/bindctl/.gitignore b/tests/lettuce/configurations/bindctl/.gitignore new file mode 100644 index 0000000000..e14ae76759 --- /dev/null +++ b/tests/lettuce/configurations/bindctl/.gitignore @@ -0,0 +1 @@ +/bindctl.config diff --git a/tests/lettuce/configurations/ddns/.gitignore b/tests/lettuce/configurations/ddns/.gitignore new file mode 100644 index 0000000000..f4f3945b61 --- /dev/null +++ b/tests/lettuce/configurations/ddns/.gitignore @@ -0,0 +1,2 @@ +/ddns.config +/noddns.config diff --git a/tests/lettuce/configurations/ddns/ddns.config.orig b/tests/lettuce/configurations/ddns/ddns.config.orig new file mode 100644 index 0000000000..80b92f79df --- /dev/null +++ b/tests/lettuce/configurations/ddns/ddns.config.orig @@ -0,0 +1,78 @@ +{ + "version": 2, + "Logging": { + "loggers": [ + { + "debuglevel": 99, + "severity": "DEBUG", + "name": "*" + } + ] + }, + "Zonemgr": { + "secondary_zones": [ + { + "class": "IN", + "name": "secondary.org" + } + ] + }, + "Auth": { + "database_file": "data/ddns/example.org.sqlite3", + "listen_on": [ + { + "port": 47806, + "address": + "127.0.0.1" + } + ] + }, + "Boss": { + "components": { + "b10-xfrout": { + "kind": "dispensable", + "address": "Xfrout" + }, + "b10-zonemgr": { + "kind": "dispensable", + "address": "ZoneMgr" + }, + "b10-ddns": { + "kind": "dispensable", + "address": "DDNS" + }, + "b10-auth": { + "kind": "needed", + "special": "auth" + }, + "b10-cmdctl": { + "kind": "needed", + "special": "cmdctl" + } + } + }, + "DDNS": { + "zones": [ + { + "origin": "example.org.", + "update_acl": [ + { + "action": "ACCEPT", + "from": "127.0.0.1" + } + ], + "class": "IN" + }, + { + "origin": "secondary.org.", + "update_acl": [ + { + "action": "ACCEPT", + "from": "127.0.0.1" + } + ], + "class": "IN" + } + ] + } +} diff --git a/tests/lettuce/configurations/ddns/noddns.config.orig b/tests/lettuce/configurations/ddns/noddns.config.orig new file mode 100644 index 0000000000..bf89537b4b --- /dev/null +++ b/tests/lettuce/configurations/ddns/noddns.config.orig @@ -0,0 +1,43 @@ +{ + "version": 2, + "Logging": { + "loggers": [ + { + "severity": "DEBUG", + "name": "*", + "debuglevel": 99 + } + ] + }, + "DDNS": {"zones": []}, + "Auth": { + "database_file": "data/ddns/example.org.sqlite3", + "listen_on": [ + { + "port": 47806, + "address": "127.0.0.1" + } + ], + "datasources": [ + { + "type": "memory", + "class": "IN", + "zones": [ + { + "origin": "example.org", + "filetype": "sqlite3", + "file": "data/ddns/example.org.sqlite3" + } + ] + } + ] + }, + "Boss": { + "components": { + "b10-xfrout": {"kind": "dispensable"}, + "b10-auth": {"kind": "needed", "special": "auth"}, + "b10-zonemgr": {"kind": "dispensable", "address": "ZoneMgr" }, + "b10-cmdctl": {"kind": "needed", "special": "cmdctl"} + } + } +} diff --git a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf index a104726e52..8571015c92 100644 --- a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf +++ b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf @@ -4,7 +4,7 @@ "loggers": [ { "debuglevel": 99, "severity": "DEBUG", - "name": "auth" + "name": "*" } ] }, "Auth": { diff --git a/tests/lettuce/configurations/xfrin/inmem_slave.conf b/tests/lettuce/configurations/xfrin/inmem_slave.conf new file mode 100644 index 0000000000..a6d88ee013 --- /dev/null +++ b/tests/lettuce/configurations/xfrin/inmem_slave.conf @@ -0,0 +1,34 @@ +{ + "version": 2, + "Logging": { + "loggers": [ { + "debuglevel": 99, + "severity": "DEBUG", + "name": "*" + } ] + }, + "Auth": { + "database_file": "data/inmem-xfrin.sqlite3", + "datasources": [ { + "type": "memory", + "class": "IN", + "zones": [ { + "origin": "example.org", + "file": "data/inmem-xfrin.sqlite3", + "filetype": "sqlite3" + } ] + } ], + "listen_on": [ { + "port": 47806, + "address": "127.0.0.1" + } ] + }, + "Boss": { + "components": { + "b10-auth": { "kind": "needed", "special": "auth" }, + "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" }, + "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" }, + "b10-cmdctl": { "special": "cmdctl", "kind": "needed" } + } + } +} diff --git a/tests/lettuce/data/ddns/.gitignore b/tests/lettuce/data/ddns/.gitignore new file mode 100644 index 0000000000..60c9224777 --- /dev/null +++ b/tests/lettuce/data/ddns/.gitignore @@ -0,0 +1 @@ +/example.org.sqlite3 diff --git a/tests/lettuce/data/ddns/example.org.sqlite3.orig b/tests/lettuce/data/ddns/example.org.sqlite3.orig new file mode 100644 index 0000000000..427fa24d57 Binary files /dev/null and b/tests/lettuce/data/ddns/example.org.sqlite3.orig differ diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3 index 427fa24d57..f79a4e27f1 100644 Binary files a/tests/lettuce/data/example.org.sqlite3 and b/tests/lettuce/data/example.org.sqlite3 differ diff --git a/tests/lettuce/data/inmem-xfrin b/tests/lettuce/data/inmem-xfrin new file mode 100644 index 0000000000..9e025913d7 --- /dev/null +++ b/tests/lettuce/data/inmem-xfrin @@ -0,0 +1,7 @@ +example.org. 3600 IN SOA ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200 +example.org. 3600 IN NS ns1.example.org. +example.org. 3600 IN NS ns2.example.org. +example.org. 3600 IN MX 10 mail.example.org. +www.example.org. 3600 IN A 192.0.2.63 +ns1.example.org. 3600 IN A 192.0.2.3 +ns2.example.org. 3600 IN A 192.0.2.4 diff --git a/tests/lettuce/data/inmem-xfrin.sqlite3.orig b/tests/lettuce/data/inmem-xfrin.sqlite3.orig new file mode 100644 index 0000000000..287d980f05 Binary files /dev/null and b/tests/lettuce/data/inmem-xfrin.sqlite3.orig differ diff --git a/tests/lettuce/features/bindctl_commands.feature b/tests/lettuce/features/bindctl_commands.feature index 1ab506d90d..20a28fc990 100644 --- a/tests/lettuce/features/bindctl_commands.feature +++ b/tests/lettuce/features/bindctl_commands.feature @@ -109,7 +109,7 @@ Feature: control with bindctl # nested_command contains another execute script When I send bind10 the command execute file data/commands/nested last bindctl output should contain shouldshow - last bindctl output should not contain Error + last bindctl output should not contain Error # show commands from a file When I send bind10 the command execute file data/commands/bad_command show diff --git a/tests/lettuce/features/ddns_system.feature b/tests/lettuce/features/ddns_system.feature new file mode 100644 index 0000000000..327ef96d9b --- /dev/null +++ b/tests/lettuce/features/ddns_system.feature @@ -0,0 +1,144 @@ +Feature: DDNS System + A number of BIND10-specific DDNS tests, that do not fall under the + 'compliance' category; specific ACL checks, module checks, etc. + + Scenario: Module tests + # The given config has b10-ddns disabled + Given I have bind10 running with configuration ddns/noddns.config + And wait for bind10 stderr message BIND10_STARTED_CC + And wait for bind10 stderr message AUTH_SERVER_STARTED + + # Sanity check + bind10 module DDNS should not be running + + # Test 1 + When I use DDNS to set the SOA serial to 1235 + # Note: test spec says refused here, system returns SERVFAIL + #The DDNS response should be REFUSED + The DDNS response should be SERVFAIL + And the SOA serial for example.org should be 1234 + + # Test 2 + When I configure bind10 to run DDNS + And wait for new bind10 stderr message DDNS_STARTED + bind10 module DDNS should be running + + # Test 3 + When I use DDNS to set the SOA serial to 1236 + The DDNS response should be REFUSED + And the SOA serial for example.org should be 1234 + + # Test 4 + When I send bind10 the following commands + """ + config add DDNS/zones + config set DDNS/zones[0]/origin example.org + config add DDNS/zones[0]/update_acl {"action": "ACCEPT", "from": "127.0.0.1"} + config commit + """ + + # Test 5 + When I use DDNS to set the SOA serial to 1237 + # also check if Auth server reloaded + And wait for new bind10 stderr message AUTH_LOAD_ZONE + The DDNS response should be SUCCESS + And the SOA serial for example.org should be 1237 + + # Test 6 + When I send bind10 the command DDNS shutdown + And wait for new bind10 stderr message DDNS_STOPPED + + # Test 7 + # BoB should restart it + And wait for new bind10 stderr message DDNS_STARTED + + # Test 8 + # Known issue: after shutdown, first new attempt results in SERVFAIL + When I use DDNS to set the SOA serial to 1238 + The DDNS response should be SERVFAIL + And the SOA serial for example.org should be 1237 + + When I use DDNS to set the SOA serial to 1238 + And wait for new bind10 stderr message AUTH_LOAD_ZONE + The DDNS response should be SUCCESS + And the SOA serial for example.org should be 1238 + + # Test 9 + When I send bind10 the command Auth shutdown + And wait for new bind10 stderr message AUTH_SHUTDOWN + # BoB should restart it automatically + And wait for new bind10 stderr message AUTH_SERVER_STARTED + + # Test 10 + When I use DDNS to set the SOA serial to 1239 + And wait for new bind10 stderr message AUTH_LOAD_ZONE + The DDNS response should be SUCCESS + And the SOA serial for example.org should be 1239 + + # Test 11 + When I configure BIND10 to stop running DDNS + And wait for new bind10 stderr message DDNS_STOPPED + + bind10 module DDNS should not be running + + # Test 12 + When I use DDNS to set the SOA serial to 1240 + # should this be REFUSED again? + The DDNS response should be SERVFAIL + And the SOA serial for example.org should be 1239 + + Scenario: ACL + Given I have bind10 running with configuration ddns/ddns.config + And wait for bind10 stderr message BIND10_STARTED_CC + And wait for bind10 stderr message AUTH_SERVER_STARTED + And wait for bind10 stderr message DDNS_STARTED + + # Sanity check + A query for new1.example.org should have rcode NXDOMAIN + A query for new2.example.org should have rcode NXDOMAIN + A query for new3.example.org should have rcode NXDOMAIN + The SOA serial for example.org should be 1234 + + # Test 1 + When I use DDNS to add a record new1.example.org. 3600 IN A 192.0.2.1 + The DDNS response should be SUCCESS + A query for new1.example.org should have rcode NOERROR + The SOA serial for example.org should be 1235 + + # Test 2 + When I set DDNS ACL 0 for 127.0.0.1 to REJECT + Then use DDNS to add a record new2.example.org. 3600 IN A 192.0.2.2 + The DDNS response should be REFUSED + A query for new2.example.org should have rcode NXDOMAIN + The SOA serial for example.org should be 1235 + + # Test 3 + When I set DDNS ACL 0 for 127.0.0.1 to ACCEPT + Then use DDNS to add a record new3.example.org. 3600 IN A 192.0.2.3 + The DDNS response should be SUCCESS + A query for new3.example.org should have rcode NOERROR + The SOA serial for example.org should be 1236 + + #Scenario: DDNS and Xfrout + ## Unfortunately, Xfrout can only notify to inzone slaves, and hence only + ## to port 53, which we do not want to use for Lettuce tests (for various + ## reasons). So for now this test is only an outline, the configs + ## themselves have not been set up yet + # When I start bind10 with configuration ddns/primary.config as primary + # And wait for primary stderr message AUTH_SERVER_STARTED + # And wait for primary stderr message XFROUT_STARTED + # And wait for primary stderr message DDNS_STARTED + + # And I start bind10 with configuration example2.org.config with cmdctl port 47804 as secondary + # And wait for secondary stderr message AUTH_SERVER_STARTED + # And wait for secondary stderr message XFRIN_STARTED + + # # Sanity check + # The SOA serial for example.org should be 1234 + # The SOA serial for example.org at 127.0.0.1:47807 should be 1234 + + # When I use DDNS to set the SOA serial to 1235 + # The DDNS response should be SUCCESS + + # The SOA serial for example.org should be 1235 + # The SOA serial for example.org at 127.0.0.1:47807 should be 1235 diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature index ca5ffbf388..685cf8b907 100644 --- a/tests/lettuce/features/example.feature +++ b/tests/lettuce/features/example.feature @@ -5,7 +5,7 @@ Feature: Example feature is intentionally uncommented. The later scenarios have comments to show what the test steps do and support - + Scenario: A simple example Given I have bind10 running with configuration example.org.config And wait for bind10 stderr message BIND10_STARTED_CC @@ -29,9 +29,9 @@ Feature: Example feature # Underwater, we take advantage of our intialization routines so # that we are sure this file does not exist, see # features/terrain/terrain.py - - # Standard check to test (non-)existence of a file - # This file is actually automatically + + # Standard check to test (non-)existence of a file. + # This file is actually automatically created. The file data/test_nonexistent_db.sqlite3 should not exist # In the first scenario, we used 'given I have bind10 running', which @@ -43,6 +43,9 @@ Feature: Example feature And wait for bind10 stderr message CMDCTL_STARTED And wait for bind10 stderr message AUTH_SERVER_STARTED + # Now we use the first step again to see if the file has been created + The file data/test_nonexistent_db.sqlite3 should exist + bind10 module Auth should be running And bind10 module Resolver should not be running And bind10 module Xfrout should not be running @@ -56,16 +59,13 @@ Feature: Example feature # use in the start step (for bind 10, that is 'I start bind10 with') # See scenario 'Multiple instances' for more. Then stop process bind10 - - # Now we use the first step again to see if the file has been created - The file data/test_nonexistent_db.sqlite3 should exist Scenario: example.org queries # This scenario performs a number of queries and inspects the results # Simple queries have already been show, but after we have sent a query, # we can also do more extensive checks on the result. # See querying.py for more information on these steps. - + # note: lettuce can group similar checks by using tables, but we # intentionally do not make use of that here @@ -182,6 +182,9 @@ Feature: Example feature A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR A query for www.example.org to [::1]:47807 should have rcode NOERROR + The SOA serial for example.org should be 1234 + The SOA serial for example.org at 127.0.0.1:47806 should be 1234 + The SOA serial for example.org at ::1:47807 should be 1234 Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3 And wait for bind10_one stderr message DATASRC_SQLITE_OPEN diff --git a/tests/lettuce/features/inmemory_over_sqlite3.feature b/tests/lettuce/features/inmemory_over_sqlite3.feature index 85737e9916..2e48689564 100644 --- a/tests/lettuce/features/inmemory_over_sqlite3.feature +++ b/tests/lettuce/features/inmemory_over_sqlite3.feature @@ -3,8 +3,41 @@ Feature: In-memory zone using SQLite3 backend data source that uses the SQLite3 data source as the backend, and tests scenarios that update the zone via incoming zone transfers. - Scenario: Load and response + Scenario: 1. Load and response Given I have bind10 running with configuration inmemory_over_sqlite3/secondary.conf + And wait for bind10 stderr message BIND10_STARTED_CC + And wait for bind10 stderr message CMDCTL_STARTED And wait for bind10 stderr message AUTH_SERVER_STARTED A query for www.example.org should have rcode NOERROR The SOA serial for example.org should be 1234 + + Scenario: 2. In-memory datasource backed by sqlite3 + Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master + And wait for master stderr message BIND10_STARTED_CC + And wait for master stderr message CMDCTL_STARTED + And wait for master stderr message AUTH_SERVER_STARTED + And wait for master stderr message XFROUT_STARTED + And wait for master stderr message ZONEMGR_STARTED + + And I have bind10 running with configuration xfrin/inmem_slave.conf + And wait for bind10 stderr message BIND10_STARTED_CC + And wait for bind10 stderr message CMDCTL_STARTED + And wait for bind10 stderr message AUTH_SERVER_STARTED + And wait for bind10 stderr message XFRIN_STARTED + And wait for bind10 stderr message ZONEMGR_STARTED + + A query for www.example.org should have rcode NOERROR + """ + www.example.org. 3600 IN A 192.0.2.63 + """ + A query for mail.example.org should have rcode NXDOMAIN + When I send bind10 the command Xfrin retransfer example.org IN ::1 47807 + Then wait for new bind10 stderr message XFRIN_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE + Then wait for new bind10 stderr message AUTH_LOAD_ZONE + + A query for www.example.org should have rcode NOERROR + The answer section of the last query response should be + """ + www.example.org. 3600 IN A 192.0.2.1 + """ + A query for mail.example.org should have rcode NOERROR diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py index c56afb7c1d..a08a887ca4 100644 --- a/tests/lettuce/features/terrain/bind10_control.py +++ b/tests/lettuce/features/terrain/bind10_control.py @@ -52,7 +52,7 @@ def start_bind10(step, config_file, cmdctl_port, msgq_sockfile, process_name): It will also fail if there is a running process with the given process_name already. """ - args = [ 'bind10', '-v' ] + args = [ 'bind10', '-n', '-v' ] if config_file is not None: args.append('-p') args.append("configurations/") @@ -334,3 +334,32 @@ def module_is_running(step, name, not_str): not_str = "" step.given('send bind10 the command help') step.given('last bindctl output should' + not_str + ' contain ' + name + ' exactly') + +@step('Configure BIND10 to run DDNS') +def configure_ddns_on(step): + """ + Convenience compound step to enable the b10-ddns module. + """ + step.behave_as(""" + When I send bind10 the following commands + \"\"\" + config add Boss/components b10-ddns + config set Boss/components/b10-ddns/kind dispensable + config set Boss/components/b10-ddns/address DDNS + config commit + \"\"\" + """) + +@step('Configure BIND10 to stop running DDNS') +def configure_ddns_off(step): + """ + Convenience compound step to disable the b10-ddns module. + """ + step.behave_as(""" + When I send bind10 the following commands + \"\"\" + config remove Boss/components b10-ddns + config commit + \"\"\" + """) + diff --git a/tests/lettuce/features/terrain/nsupdate.py b/tests/lettuce/features/terrain/nsupdate.py new file mode 100644 index 0000000000..946439d1bc --- /dev/null +++ b/tests/lettuce/features/terrain/nsupdate.py @@ -0,0 +1,168 @@ +# Copyright (C) 2012 Internet Systems Consortium. +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM +# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from lettuce import * +import subprocess +import re + +def run_nsupdate(commands): + """Run nsupdate. + Parameters: + commands: a sequence of strings which will be sent. + update_address: adress to send the update to + update_port: port to send the update to + zone: zone to update + + Appends 'send' and 'quit' as final commands. + + nsupdate's stdout and stderr streams are stored (as one multiline string + in world.last_nsupdate_stdout/stderr. + + The return code is stored in world.last_nsupdate_returncode + (it is not checked here, since a number of tests intentionally + result in a non-zero return code). + """ + commands.append('send') + commands.append('quit') + args = ['nsupdate' ] + nsupdate = subprocess.Popen(args, 1, None, subprocess.PIPE, + subprocess.PIPE, subprocess.PIPE) + for line in commands: + nsupdate.stdin.write(line + "\n") + (stdout, stderr) = nsupdate.communicate() + world.last_nsupdate_returncode = nsupdate.returncode + world.last_nsupdate_stdout = stdout + world.last_nsupdate_stderr = stderr + +@step('send a DDNS update for (\S+) with the following commands:') +def send_multiple_commands(step, zone): + """ + Run nsupdate, and send it the given multiline set of commands. + A send and quit command is always appended. + + This is the most 'raw' wrapper around the nsupdate call; every + command except the final send needs to be specified. Intended + for those tests that have unique properties. + """ + commands = step.multiline.split("\n") + run_nsupdate(commands, zone) + +@step('DDNS response should be ([A-Z]+)') +def check_ddns_response(step, response): + """ + Checks the result of the last call to nsupdate. + + If the given response argument is SUCCESS, it simply checks whether + the return code from nsupdate is 0 (there is no output in that case). + If not, it checks whether it is not 0, and if the given response string + matches a line 'update failed: ' in the stderr output of + nsupdate. + + Prints exit code, stdout and stderr streams of nsupdate if it fails. + """ + # For success, nsupdate is silent, only check result code 0 + if response == "SUCCESS": + assert 0 == world.last_nsupdate_returncode,\ + "nsupdate exit code: " + str(world.last_nsupdate_returncode) +\ + "\nstdout:\n" + str(world.last_nsupdate_stdout) +\ + "stderr:\n" + str(world.last_nsupdate_stderr) + else: + found = False + for line in world.last_nsupdate_stderr.split('\n'): + if line == "update failed: " + response: + found = True + assert found and (0 != world.last_nsupdate_returncode),\ + "Response " + response + " not found in nsupdate output\n" +\ + "nsupdate exit code: " + str(world.last_nsupdate_returncode) +\ + "\nstdout:\n" + str(world.last_nsupdate_stdout) +\ + "stderr:\n" + str(world.last_nsupdate_stderr) + + +# Individual steps to create a DDNS update packet through nsupdate +@step('Prepare a DDNS update(?: for (\S+))?(?: to (\S+)(?: port ([0-9]+)))?') +def prepare_update(step, zone, server, port): + """ + Prepares an nsupdate command that sets up an update to a server + for a zone. The update is not sent yet, but the commands + are stored in world.nsupdate_commands. + """ + commands = [] + if server is not None: + commands.append("server " + server) + else: + commands.append("server 127.0.0.1") + if port is not None: + commands[0] = commands[0] + " " + port + else: + commands[0] = commands[0] + " 47806" + if zone is not None: + commands.append("zone " + zone) + world.nsupdate_commands = commands + +@step('Add to the DDNS update: (.*)') +def add_line_to_ddns_update(step, line): + """ + Adds a single line to the prepared nsupdate. It is not sent yet. + The line must conform to nsupdate syntax. + """ + world.nsupdate_commands.append(line) + +@step('Add the following lines to the DDNS update:') +def add_lines_to_ddns_update(step, line): + """ + Adds multiple lines to the prepared nsupdate. It is not sent yet. + The lines must conform to nsupdate syntax. + """ + world.nsupdate_commands.extend(step.multiline.split('\n')) + +@step('Send the DDNS update') +def run_ddns_update(step): + """ + Runs the prepared nsupdate, see run_nsupdate() for more information. + """ + run_nsupdate(world.nsupdate_commands) + +@step('use DDNS to set the SOA SERIAL to ([0-9]+)') +def set_serial_to(step, new_serial): + """ + Convenience compound step; prepare an update for example.org, + which sets the SERIAL to the given value, and send it. + It makes no other changes, and has hardcoded values for the other + SOA rdata fields. + """ + step.given('Prepare a DDNS update') + step.given('add to the DDNS update: update add example.org 3600 IN SOA ns1.example.org. admin.example.org. ' + new_serial + ' 3600 1800 2419200 7200') + step.given('Send the DDNS update') + +@step('use DDNS to add a record (.*)') +def add_record(step, new_record): + """ + Convenience compound step; prepare an update for example.org, + which adds one record, then send it. + Apart from the update addition, the update will not contain anything else. + """ + step.given('Prepare a DDNS update') + step.given('add to the DDNS update: update add ' + new_record) + step.given('Send the DDNS update') + +@step('set DDNS ACL ([0-9]+) for ([0-9.]+) to ([A-Z]+)') +def set_ddns_acl_to(step, nr, address, action): + """ + Convenience step to update a single ACL for DDNS. + Replaces the ACL at the given index for the given + address, to the given action + """ + step.given('set bind10 configuration DDNS/zones[' + nr + ']/update_acl to [{"action": "' + action + '", "from": "' + address + '"}]') + step.given('last bindctl output should not contain Error') diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py index a5470147aa..abd7c18059 100644 --- a/tests/lettuce/features/terrain/querying.py +++ b/tests/lettuce/features/terrain/querying.py @@ -240,8 +240,8 @@ def query(step, dnssec, query_name, qtype, qclass, addr, port, rcode): "Expected: " + rcode + ", got " + query_result.rcode world.last_query_result = query_result -@step('The SOA serial for ([\w.]+) should be ([0-9]+)') -def query_soa(step, query_name, serial): +@step('The SOA serial for ([\S.]+) (?:at (\S+)(?::([0-9]+)) )?should be ([0-9]+)') +def query_soa(step, query_name, address, port, serial=None): """ Convenience function to check the SOA SERIAL value of the given zone at the nameserver at the default address (127.0.0.1:47806). @@ -251,7 +251,11 @@ def query_soa(step, query_name, serial): If the rcode is not NOERROR, or the answer section does not contain the SOA record, this step fails. """ - query_result = QueryResult(query_name, "SOA", "IN", "127.0.0.1", "47806") + if address is None: + address = "127.0.0.1" + if port is None: + port = "47806" + query_result = QueryResult(query_name, "SOA", "IN", address, port) assert "NOERROR" == query_result.rcode,\ "Got " + query_result.rcode + ", expected NOERROR" assert len(query_result.answer_section) == 1,\ diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py index 9a04bf5e5c..a35d0de2b2 100644 --- a/tests/lettuce/features/terrain/terrain.py +++ b/tests/lettuce/features/terrain/terrain.py @@ -52,7 +52,15 @@ copylist = [ ["configurations/resolver/resolver_basic.config.orig", "configurations/resolver/resolver_basic.config"], ["configurations/multi_instance/multi_auth.config.orig", - "configurations/multi_instance/multi_auth.config"] + "configurations/multi_instance/multi_auth.config"], + ["configurations/ddns/ddns.config.orig", + "configurations/ddns/ddns.config"], + ["configurations/ddns/noddns.config.orig", + "configurations/ddns/noddns.config"], + ["data/inmem-xfrin.sqlite3.orig", + "data/inmem-xfrin.sqlite3"], + ["data/ddns/example.org.sqlite3.orig", + "data/ddns/example.org.sqlite3"] ] # This is a list of files that, if present, will be removed before a scenario @@ -256,7 +264,7 @@ class RunningProcesses: Initialize with no running processes. """ self.processes = {} - + def add_process(self, step, process_name, args): """ Start a process with the given arguments, and store it under the given @@ -295,14 +303,14 @@ class RunningProcesses: "Process " + name + " unknown" self.processes[process_name].stop_process() del self.processes[process_name] - + def stop_all_processes(self): """ Stop all running processes. """ for process in self.processes.values(): process.stop_process() - + def keep_files(self): """ Keep the redirection files for stdout/stderr output of all processes diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature index 2a563560aa..27dfb83f5b 100644 --- a/tests/lettuce/features/xfrin_bind10.feature +++ b/tests/lettuce/features/xfrin_bind10.feature @@ -2,6 +2,10 @@ Feature: Xfrin Tests for Xfrin, specific for BIND 10 behaviour. Scenario: Retransfer command + # Standard check to test (non-)existence of a file. + # This file is actually automatically created. + The file data/test_nonexistent_db.sqlite3 should not exist + Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master And wait for master stderr message BIND10_STARTED_CC And wait for master stderr message CMDCTL_STARTED @@ -16,10 +20,13 @@ Feature: Xfrin And wait for bind10 stderr message XFRIN_STARTED And wait for bind10 stderr message ZONEMGR_STARTED + # Now we use the first step again to see if the file has been created + The file data/test_nonexistent_db.sqlite3 should exist + A query for www.example.org should have rcode REFUSED - Wait for bind10 stderr message CMDCTL_STARTED When I send bind10 the command Xfrin retransfer example.org IN ::1 47807 Then wait for new bind10 stderr message XFRIN_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE + Then wait for new bind10 stderr message ZONEMGR_RECEIVE_XFRIN_SUCCESS A query for www.example.org should have rcode NOERROR # The transferred zone should have 11 non-NSEC3 RRs and 1 NSEC3 RR. @@ -27,5 +34,6 @@ Feature: Xfrin # should be 13, counting the duplicated SOA. # At this point we can confirm both in and out of AXFR for a zone # containing an NSEC3 RR. - When I do an AXFR transfer of example.org from ::1 47807 + # We don't have to specify the address/port here; the defaults will work. + When I do an AXFR transfer of example.org Then transfer result should have 13 rrs diff --git a/tests/tools/badpacket/tests/Makefile.am b/tests/tools/badpacket/tests/Makefile.am index 2daa6646f4..bf70669f45 100644 --- a/tests/tools/badpacket/tests/Makefile.am +++ b/tests/tools/badpacket/tests/Makefile.am @@ -10,6 +10,9 @@ endif CLEANFILES = *.gcno *.gcda +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + TESTS = if HAVE_GTEST TESTS += run_unittests diff --git a/tests/tools/perfdhcp/Makefile.am b/tests/tools/perfdhcp/Makefile.am index bbad595f31..6ebc00f92a 100644 --- a/tests/tools/perfdhcp/Makefile.am +++ b/tests/tools/perfdhcp/Makefile.am @@ -1,12 +1,40 @@ -SUBDIRS = . +SUBDIRS = . tests + +AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib +AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log +AM_CPPFLAGS += $(BOOST_INCLUDES) AM_CXXFLAGS = $(B10_CXXFLAGS) +# Some versions of GCC warn about some versions of Boost regarding +# missing initializer for members in its posix_time. +# https://svn.boost.org/trac/boost/ticket/3477 +# But older GCC compilers don't have the flag. +AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG) + AM_LDFLAGS = $(CLOCK_GETTIME_LDFLAGS) AM_LDFLAGS += -lm if USE_STATIC_LINK AM_LDFLAGS += -static endif +lib_LTLIBRARIES = libperfdhcp++.la +libperfdhcp___la_SOURCES = command_options.cc command_options.h +libperfdhcp___la_SOURCES += localized_option.h +libperfdhcp___la_SOURCES += perf_pkt6.cc perf_pkt6.h +libperfdhcp___la_SOURCES += perf_pkt4.cc perf_pkt4.h +libperfdhcp___la_SOURCES += pkt_transform.cc pkt_transform.h + +libperfdhcp___la_CXXFLAGS = $(AM_CXXFLAGS) +if USE_CLANGPP +# Disable unused parameter warning caused by some of the +# Boost headers when compiling with clang. +libperfdhcp___la_CXXFLAGS += -Wno-unused-parameter +endif + +libperfdhcp___la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la +libperfdhcp___la_LIBADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la +libperfdhcp___la_LIBADD += $(top_builddir)/src/lib/asiolink/libasiolink.la + pkglibexec_PROGRAMS = perfdhcp perfdhcp_SOURCES = perfdhcp.c diff --git a/tests/tools/perfdhcp/command_options.cc b/tests/tools/perfdhcp/command_options.cc new file mode 100644 index 0000000000..5b4f4249fb --- /dev/null +++ b/tests/tools/perfdhcp/command_options.cc @@ -0,0 +1,681 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include + +#include +#include +#include + +#include "exceptions/exceptions.h" + +#include "command_options.h" + +using namespace std; +using namespace isc; + +namespace isc { +namespace perfdhcp { + +CommandOptions& +CommandOptions::instance() { + static CommandOptions options; + return (options); +} + +void +CommandOptions::reset() { + // Default mac address used in DHCP messages + // if -b mac= was not specified + uint8_t mac[6] = { 0x0, 0xC, 0x1, 0x2, 0x3, 0x4 }; + + // Default packet drop time if -D parameter + // was not specified + double dt[2] = { 1., 1. }; + + // We don't use constructor initialization list because we + // will need to reset all members many times to perform unit tests + ipversion_ = 0; + exchange_mode_ = DORA_SARR; + rate_ = 0; + report_delay_ = 0; + clients_num_ = 0; + mac_prefix_.assign(mac, mac + 6); + base_.resize(0); + num_request_.resize(0); + period_ = 0; + drop_time_set_ = 0; + drop_time_.assign(dt, dt + 2); + max_drop_.clear(); + max_pdrop_.clear(); + localname_.clear(); + is_interface_ = false; + preload_ = 0; + aggressivity_ = 1; + local_port_ = 0; + seeded_ = false; + seed_ = 0; + broadcast_ = false; + rapid_commit_ = false; + use_first_ = false; + template_file_.clear(); + rnd_offset_.clear(); + xid_offset_.clear(); + elp_offset_ = -1; + sid_offset_ = -1; + rip_offset_ = -1; + diags_.clear(); + wrapped_.clear(); + server_name_.clear(); +} + +void +CommandOptions::parse(int argc, char** const argv) { + // Reset internal variables used by getopt + // to eliminate undefined behavior when + // parsing different command lines multiple times + optind = 1; + opterr = 0; + + // Reset values of class members + reset(); + + initialize(argc, argv); + validate(); +} + +void +CommandOptions::initialize(int argc, char** argv) { + char opt = 0; // Subsequent options returned by getopt() + std::string drop_arg; // Value of -Dargument + size_t percent_loc = 0; // Location of % sign in -D + double drop_percent = 0; // % value (1..100) in -D + int num_drops = 0; // Max number of drops specified in -D + int num_req = 0; // Max number of dropped requests in -n + int offset_arg = 0; // Temporary variable holding offset arguments + std::string sarg; // Temporary variable for string args + + // In this section we collect argument values from command line + // they will be tuned and validated elsewhere + while((opt = getopt(argc, argv, "hv46r:t:R:b:n:p:d:D:l:P:a:L:s:iBc1T:X:O:E:S:I:x:w:")) != -1) { + switch (opt) { + case 'v': + version(); + return; + + case '1': + use_first_ = true; + break; + + case '4': + check(ipversion_ == 6, "IP version already set to 6"); + ipversion_ = 4; + break; + + case '6': + check(ipversion_ == 4, "IP version already set to 4"); + ipversion_ = 6; + break; + + case 'a': + aggressivity_ = positiveInteger("value of aggressivity: -a must be a positive integer"); + break; + + case 'b': + check(base_.size() > 3, "-b already specified, unexpected occurence of 5th -b"); + base_.push_back(optarg); + decodeBase(base_.back()); + break; + + case 'B': + broadcast_ = true; + break; + + case 'c': + rapid_commit_ = true; + break; + + case 'd': + check(drop_time_set_ > 1, "maximum number of drops already specified, " + "unexpected 3rd occurence of -d"); + try { + drop_time_[drop_time_set_] = boost::lexical_cast(optarg); + } catch (boost::bad_lexical_cast&) { + isc_throw(isc::InvalidParameter, + "value of drop time: -d must be positive number"); + } + check(drop_time_[drop_time_set_] <= 0., "drop-time must be a positive number"); + drop_time_set_ = true; + break; + + case 'D': + drop_arg = std::string(optarg); + percent_loc = drop_arg.find('%'); + check(max_pdrop_.size() > 1 || max_drop_.size() > 1, "values of maximum drops: -D already " + "specified, unexpected 3rd occurence of -D,value>"); + if ((percent_loc) != std::string::npos) { + try { + drop_percent = boost::lexical_cast(drop_arg.substr(0, percent_loc)); + } catch (boost::bad_lexical_cast&) { + isc_throw(isc::InvalidParameter, + "value of drop percentage: -D must be 0..100"); + } + check((drop_percent <= 0) || (drop_percent >= 100), + "value of drop percentage: -D must be 0..100"); + max_pdrop_.push_back(drop_percent); + } else { + num_drops = positiveInteger("value of max drops number: -d must be a positive integer"); + max_drop_.push_back(num_drops); + } + break; + + case 'E': + elp_offset_ = nonNegativeInteger("value of time-offset: -E must not be a negative integer"); + break; + + case 'h': + usage(); + return; + + case 'i': + exchange_mode_ = DO_SA; + break; + + case 'I': + rip_offset_ = positiveInteger("value of ip address offset: -I must be a positive integer"); + break; + + case 'l': + localname_ = std::string(optarg); + break; + + case 'L': + local_port_ = nonNegativeInteger("value of local port: -L must not be a negative integer"); + check(local_port_ > static_cast(std::numeric_limits::max()), + "local-port must be lower than " + + boost::lexical_cast(std::numeric_limits::max())); + break; + + case 'n': + num_req = positiveInteger("value of num-request: -n must be a positive integer"); + if (num_request_.size() >= 2) { + isc_throw(isc::InvalidParameter,"value of maximum number of requests: -n " + "already specified, unexpected 3rd occurence of -n"); + } + num_request_.push_back(num_req); + break; + + case 'O': + if (rnd_offset_.size() < 2) { + offset_arg = positiveInteger("value of random offset: -O must be greater than 3"); + } else { + isc_throw(isc::InvalidParameter, + "random offsets already specified, unexpected 3rd occurence of -O"); + } + check(offset_arg < 3, "value of random random-offset: -O must be greater than 3 "); + rnd_offset_.push_back(offset_arg); + break; + + case 'p': + period_ = positiveInteger("value of test period: -p must be a positive integer"); + break; + + case 'P': + preload_ = nonNegativeInteger("number of preload packets: -P must not be " + "a negative integer"); + break; + + case 'r': + rate_ = positiveInteger("value of rate: -r must be a positive integer"); + break; + + case 'R': + initClientsNum(); + break; + + case 's': + seed_ = static_cast + (nonNegativeInteger("value of seed: -s must be non-negative integer")); + seeded_ = seed_ > 0 ? true : false; + break; + + case 'S': + sid_offset_ = positiveInteger("value of server id offset: -S must be a positive integer"); + break; + + case 't': + report_delay_ = positiveInteger("value of report delay: -t must be a positive integer"); + break; + + case 'T': + if (template_file_.size() < 2) { + sarg = nonEmptyString("template file name not specified, expected -T"); + template_file_.push_back(sarg); + } else { + isc_throw(isc::InvalidParameter, + "template files are already specified, unexpected 3rd -T occurence"); + } + break; + + case 'w': + wrapped_ = nonEmptyString("command for wrapped mode: -w must be specified"); + break; + + case 'x': + diags_ = nonEmptyString("value of diagnostics selectors: -x must be specified"); + break; + + case 'X': + if (xid_offset_.size() < 2) { + offset_arg = positiveInteger("value of transaction id: -X must be a positive integer"); + } else { + isc_throw(isc::InvalidParameter, + "transaction ids already specified, unexpected 3rd -X occurence"); + } + xid_offset_.push_back(offset_arg); + break; + + default: + isc_throw(isc::InvalidParameter, "unknown command line option"); + } + } + + // If the IP version was not specified in the + // command line, assume IPv4. + if (ipversion_ == 0) { + ipversion_ = 4; + } + + // If template packet files specified for both DISCOVER/SOLICIT + // and REQUEST/REPLY exchanges make sure we have transaction id + // and random duid offsets for both exchanges. We will duplicate + // value specified as -X and -R for second + // exchange if user did not specified otherwise. + if (template_file_.size() > 1) { + if (xid_offset_.size() == 1) { + xid_offset_.push_back(xid_offset_[0]); + } + if (rnd_offset_.size() == 1) { + rnd_offset_.push_back(rnd_offset_[0]); + } + } + + // Get server argument + // NoteFF02::1:2 and FF02::1:3 are defined in RFC3315 as + // All_DHCP_Relay_Agents_and_Servers and All_DHCP_Servers + // addresses + check(optind < argc -1, "extra arguments?"); + if (optind == argc - 1) { + server_name_ = argv[optind]; + // Decode special cases + if ((ipversion_ == 4) && (server_name_.compare("all") == 0)) { + broadcast_ = 1; + // 255.255.255.255 is IPv4 broadcast address + server_name_ = "255.255.255.255"; + } else if ((ipversion_ == 6) && (server_name_.compare("all") == 0)) { + server_name_ = "FF02::1:2"; + } else if ((ipversion_ == 6) && (server_name_.compare("servers") == 0)) { + server_name_ = "FF05::1:3"; + } + } + + // TODO handle -l option with IfaceManager when it is created +} + +void +CommandOptions::initClientsNum() { + const std::string errmsg = "value of -R must be non-negative integer"; + + // Declare clients_num as as 64-bit signed value to + // be able to detect negative values provided + // by user. We would not detect negative values + // if we casted directly to unsigned value. + long long clients_num = 0; + try { + clients_num = boost::lexical_cast(optarg); + } catch (boost::bad_lexical_cast&) { + isc_throw(isc::InvalidParameter, errmsg.c_str()); + } + check(clients_num < 0, errmsg); + try { + clients_num_ = boost::lexical_cast(optarg); + } catch (boost::bad_lexical_cast&) { + isc_throw(isc::InvalidParameter, errmsg); + } +} + +void +CommandOptions::decodeBase(const std::string& base) { + std::string b(base); + boost::algorithm::to_lower(b); + + // Currently we only support mac and duid + if ((b.substr(0, 4) == "mac=") || (b.substr(0, 6) == "ether=")) { + decodeMac(b); + } else if (b.substr(0, 5) == "duid=") { + decodeDuid(b); + } else { + isc_throw(isc::InvalidParameter, + "base value not provided as -b, expected -b mac= or -b duid="); + } +} + +void +CommandOptions::decodeMac(const std::string& base) { + // Strip string from mac= + size_t found = base.find('='); + static const char* errmsg = "expected -b format for mac address is -b mac=00::0C::01::02::03::04"; + check(found == std::string::npos, errmsg); + + // Decode mac address to vector of uint8_t + std::istringstream s1(base.substr(found + 1)); + std::string token; + mac_prefix_.clear(); + // Get pieces of MAC address separated with : (or even ::) + while (std::getline(s1, token, ':')) { + unsigned int ui = 0; + // Convert token to byte value using std::istringstream + if (token.length() > 0) { + try { + // Do actual conversion + ui = convertHexString(token); + } catch (isc::InvalidParameter&) { + isc_throw(isc::InvalidParameter, + "invalid characters in MAC provided"); + + } + // If conversion succeeded store byte value + mac_prefix_.push_back(ui); + } + } + // MAC address must consist of 6 octets, otherwise it is invalid + check(mac_prefix_.size() != 6, errmsg); +} + +void +CommandOptions::decodeDuid(const std::string& base) { + // Strip argument from duid= + size_t found = base.find('='); + check(found == std::string::npos, "expected -b format for duid is -b duid="); + std::string b = base.substr(found + 1); + + // DUID must have even number of digits and must not be longer than 64 bytes + check(b.length() & 1, "odd number of hexadecimal digits in duid"); + check(b.length() > 128, "duid too large"); + check(b.length() == 0, "no duid specified"); + + // Turn pairs of hexadecimal digits into vector of octets + for (int i = 0; i < b.length(); i += 2) { + unsigned int ui = 0; + try { + // Do actual conversion + ui = convertHexString(b.substr(i, 2)); + } catch (isc::InvalidParameter&) { + isc_throw(isc::InvalidParameter, + "invalid characters in DUID provided, exepected hex digits"); + } + duid_prefix_.push_back(static_cast(ui)); + } +} + +uint8_t +CommandOptions::convertHexString(const std::string& text) const { + unsigned int ui = 0; + // First, check if we are dealing with hexadecimal digits only + for (int i = 0; i < text.length(); ++i) { + if (!std::isxdigit(text[i])) { + isc_throw(isc::InvalidParameter, + "The following digit: " << text[i] << " in " + << text << "is not hexadecimal"); + } + } + // If we are here, we have valid string to convert to octet + std::istringstream text_stream(text); + text_stream >> std::hex >> ui >> std::dec; + // Check if for some reason we have overflow - this should never happen! + if (ui > 0xFF) { + isc_throw(isc::InvalidParameter, "Can't convert more than two hex digits to byte"); + } + return ui; +} + +void +CommandOptions::validate() const { + check((getIpVersion() != 4) && (isBroadcast() != 0), + "-B is not compatible with IPv6 (-6)"); + check((getIpVersion() != 6) && (isRapidCommit() != 0), + "-6 (IPv6) must be set to use -c"); + check((getExchangeMode() == DO_SA) && (getNumRequests().size() > 1), + "second -n is not compatible with -i"); + check((getExchangeMode() == DO_SA) && (getDropTime()[1] != 1.), + "second -d is not compatible with -i"); + check((getExchangeMode() == DO_SA) && + ((getMaxDrop().size() > 1) || (getMaxDropPercentage().size() > 1)), + "second -D is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (isUseFirst()), + "-1 is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (getTemplateFiles().size() > 1), + "second -T is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (getTransactionIdOffset().size() > 1), + "second -X is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (getRandomOffset().size() > 1), + "second -O= 0), + "-E is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (getServerIdOffset() >= 0), + "-S is not compatible with -i\n"); + check((getExchangeMode() == DO_SA) && (getRequestedIpOffset() >= 0), + "-I is not compatible with -i\n"); + check((getExchangeMode() != DO_SA) && (isRapidCommit() != 0), + "-i must be set to use -c\n"); + check((getRate() == 0) && (getReportDelay() != 0), + "-r must be set to use -t\n"); + check((getRate() == 0) && (getNumRequests().size() > 0), + "-r must be set to use -n\n"); + check((getRate() == 0) && (getPeriod() != 0), + "-r must be set to use -p\n"); + check((getRate() == 0) && + ((getMaxDrop().size() > 0) || getMaxDropPercentage().size() > 0), + "-r must be set to use -D\n"); + check((getTemplateFiles().size() < getTransactionIdOffset().size()), + "-T must be set to use -X\n"); + check((getTemplateFiles().size() < getRandomOffset().size()), + "-T must be set to use -O\n"); + check((getTemplateFiles().size() < 2) && (getElapsedTimeOffset() >= 0), + "second/request -T must be set to use -E\n"); + check((getTemplateFiles().size() < 2) && (getServerIdOffset() >= 0), + "second/request -T must be set to " + "use -S\n"); + check((getTemplateFiles().size() < 2) && (getRequestedIpOffset() >= 0), + "second/request -T must be set to " + "use -I\n"); + +} + +void +CommandOptions::check(bool condition, const std::string& errmsg) const { + // The same could have been done with macro or just if statement but + // we prefer functions to macros here + if (condition) { + isc_throw(isc::InvalidParameter, errmsg); + } +} + +int +CommandOptions::positiveInteger(const std::string& errmsg) const { + try { + int value = boost::lexical_cast(optarg); + check(value <= 0, errmsg); + return (value); + } catch (boost::bad_lexical_cast&) { + isc_throw(InvalidParameter, errmsg); + } +} + +int +CommandOptions::nonNegativeInteger(const std::string& errmsg) const { + try { + int value = boost::lexical_cast(optarg); + check(value < 0, errmsg); + return (value); + } catch (boost::bad_lexical_cast&) { + isc_throw(InvalidParameter, errmsg); + } +} + +std::string +CommandOptions::nonEmptyString(const std::string& errmsg) const { + std::string sarg = optarg; + if (sarg.length() == 0) { + isc_throw(isc::InvalidParameter, errmsg); + } + return sarg; +} + +void +CommandOptions::usage() const { + fprintf(stdout, "%s", +"perfdhcp [-hv] [-4|-6] [-r] [-t] [-R] [-b]\n" +" [-n] [-p] [-d] [-D]\n" +" [-l] [-P] [-a]\n" +" [-L] [-s] [-i] [-B] [-c] [-1]\n" +" [-T] [-X] [-O] [-S] [-I]\n" +" [-x] [-w] [server]\n" +"\n" +"The [server] argument is the name/address of the DHCP server to\n" +"contact. For DHCPv4 operation, exchanges are initiated by\n" +"transmitting a DHCP DISCOVER to this address.\n" +"\n" +"For DHCPv6 operation, exchanges are initiated by transmitting a DHCP\n" +"SOLICIT to this address. In the DHCPv6 case, the special name 'all'\n" +"can be used to refer to All_DHCP_Relay_Agents_and_Servers (the\n" +"multicast address FF02::1:2), or the special name 'servers' to refer\n" +"to All_DHCP_Servers (the multicast address FF05::1:3). The [server]\n" +"argument is optional only in the case that -l is used to specify an\n" +"interface, in which case [server] defaults to 'all'.\n" +"\n" +"The default is to perform a single 4-way exchange, effectively pinging\n" +"the server.\n" +"The -r option is used to set up a performance test, without\n" +"it exchanges are initiated as fast as possible.\n" +"\n" +"Options:\n" +"-1: Take the server-ID option from the first received message.\n" +"-4: DHCPv4 operation (default). This is incompatible with the -6 option.\n" +"-6: DHCPv6 operation. This is incompatible with the -4 option.\n" +"-a: When the target sending rate is not yet reached,\n" +" control how many exchanges are initiated before the next pause.\n" +"-b: The base mac, duid, IP, etc, used to simulate different\n" +" clients. This can be specified multiple times, each instance is\n" +" in the = form, for instance:\n" +" (and default) mac=00:0c:01:02:03:04.\n" +"-d: Specify the time after which a request is treated as\n" +" having been lost. The value is given in seconds and may contain a\n" +" fractional component. The default is 1 second.\n" +"-E: Offset of the (DHCPv4) secs field / (DHCPv6)\n" +" elapsed-time option in the (second/request) template.\n" +" The value 0 disables it.\n" +"-h: Print this help.\n" +"-i: Do only the initial part of an exchange: DO or SA, depending on\n" +" whether -6 is given.\n" +"-I: Offset of the (DHCPv4) IP address in the requested-IP\n" +" option / (DHCPv6) IA_NA option in the (second/request) template.\n" +"-l: For DHCPv4 operation, specify the local\n" +" hostname/address to use when communicating with the server. By\n" +" default, the interface address through which traffic would\n" +" normally be routed to the server is used.\n" +" For DHCPv6 operation, specify the name of the network interface\n" +" via which exchanges are initiated.\n" +"-L: Specify the local port to use\n" +" (the value 0 means to use the default).\n" +"-O: Offset of the last octet to randomize in the template.\n" +"-P: Initiate first exchanges back to back at startup.\n" +"-r: Initiate DORA/SARR (or if -i is given, DO/SA)\n" +" exchanges per second. A periodic report is generated showing the\n" +" number of exchanges which were not completed, as well as the\n" +" average response latency. The program continues until\n" +" interrupted, at which point a final report is generated.\n" +"-R: Specify how many different clients are used. With 1\n" +" (the default), all requests seem to come from the same client.\n" +"-s: Specify the seed for randomization, making it repeatable.\n" +"-S: Offset of the server-ID option in the\n" +" (second/request) template.\n" +"-T: The name of a file containing the template to use\n" +" as a stream of hexadecimal digits.\n" +"-v: Report the version number of this program.\n" +"-w: Command to call with start/stop at the beginning/end of\n" +" the program.\n" +"-x: Include extended diagnostics in the output.\n" +" is a string of single-keywords specifying\n" +" the operations for which verbose output is desired. The selector\n" +" keyletters are:\n" +" * 'a': print the decoded command line arguments\n" +" * 'e': print the exit reason\n" +" * 'i': print rate processing details\n" +" * 'r': print randomization details\n" +" * 's': print first server-id\n" +" * 't': when finished, print timers of all successful exchanges\n" +" * 'T': when finished, print templates\n" +"-X: Transaction ID (aka. xid) offset in the template.\n" +"\n" +"DHCPv4 only options:\n" +"-B: Force broadcast handling.\n" +"\n" +"DHCPv6 only options:\n" +"-c: Add a rapid commit option (exchanges will be SA).\n" +"\n" +"The remaining options are used only in conjunction with -r:\n" +"\n" +"-D: Abort the test if more than requests have\n" +" been dropped. Use -D0 to abort if even a single request has been\n" +" dropped. If includes the suffix '%', it specifies a\n" +" maximum percentage of requests that may be dropped before abort.\n" +" In this case, testing of the threshold begins after 10 requests\n" +" have been expected to be received.\n" +"-n: Initiate transactions. No report is\n" +" generated until all transactions have been initiated/waited-for,\n" +" after which a report is generated and the program terminates.\n" +"-p: Send requests for the given test period, which is\n" +" specified in the same manner as -d. This can be used as an\n" +" alternative to -n, or both options can be given, in which case the\n" +" testing is completed when either limit is reached.\n" +"-t: Delay in seconds between two periodic reports.\n" +"\n" +"Errors:\n" +"- tooshort: received a too short message\n" +"- orphans: received a message which doesn't match an exchange\n" +" (duplicate, late or not related)\n" +"- locallimit: reached to local system limits when sending a message.\n" +"\n" +"Exit status:\n" +"The exit status is:\n" +"0 on complete success.\n" +"1 for a general error.\n" +"2 if an error is found in the command line arguments.\n" +"3 if there are no general failures in operation, but one or more\n" +" exchanges are not successfully completed.\n"); +} + +void +CommandOptions::version() const { + fprintf(stdout, "version 0.01\n"); +} + + +} // namespace perfdhcp +} // namespace isc diff --git a/tests/tools/perfdhcp/command_options.h b/tests/tools/perfdhcp/command_options.h new file mode 100644 index 0000000000..9196857d7c --- /dev/null +++ b/tests/tools/perfdhcp/command_options.h @@ -0,0 +1,412 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __COMMAND_OPTIONS_H +#define __COMMAND_OPTIONS_H + +#include +#include + +#include + +namespace isc { +namespace perfdhcp { + +/// \brief Command Options +/// +/// This class is responsible for parsing the command-line and storing the +/// specified options. +/// +class CommandOptions : public boost::noncopyable { +public: + /// 2-way (cmd line param -i) or 4-way exchanges + enum ExchangeMode { + DO_SA, + DORA_SARR + }; + + /// CommandOptions is a singleton class. This method returns reference + /// to its sole instance. + /// + /// \return the only existing instance of command options + static CommandOptions& instance(); + + /// \brief Reset to defaults + /// + /// Reset data members to default values. This is specifically + /// useful when unit tests are performed using different + /// command line options. + void reset(); + + /// \brief Parse command line + /// + /// Parses the command line and stores the selected options + /// in class data members. + /// + /// \param argc Argument count passed to main(). + /// \param argv Argument value array passed to main(). + /// \throws isc::InvalidParameter if parse fails + void parse(int argc, char** const argv); + + /// \brief Returns IP version + /// + /// \return IP version to be used + uint8_t getIpVersion() const { return ipversion_; } + + /// \brief Returns packet exchange mode + /// + /// \return packet exchange mode + ExchangeMode getExchangeMode() const { return exchange_mode_; } + + /// \brief Returns echange rate + /// + /// \return exchange rate per second + int getRate() const { return rate_; } + + /// \brief Returns delay between two performance reports + /// + /// \return delay between two consecutive performance reports + int getReportDelay() const { return report_delay_; } + + /// \brief Returns number of simulated clients + /// + /// \return number of simulated clients + uint32_t getClientsNum() const { return clients_num_; } + + /// \brief Returns MAC address prefix + /// + /// \ return MAC address prefix to simulate different clients + std::vector getMacPrefix() const { return mac_prefix_; } + + /// \brief Returns DUID prefix + /// + /// \return DUID prefix to simulate different clients + std::vector getDuidPrefix() const { return duid_prefix_; } + + /// \brief Returns base values + /// + /// \return all base values specified + std::vector getBase() const { return base_; } + + /// \brief Returns maximum number of exchanges + /// + /// \return number of exchange requests before test is aborted + std::vector getNumRequests() const { return num_request_; } + + /// \brief Returns test period + /// + /// \return test period before it is aborted + int getPeriod() const { return period_; } + + /// \brief Returns drop time + /// + /// The method returns maximum time elapsed from + /// sending the packet before it is assumed dropped. + /// + /// \return return time before request is assumed dropped + std::vector getDropTime() const { return drop_time_; } + + /// \brief Returns maximum drops number + /// + /// Returns maximum number of packet drops before + /// aborting a test. + /// + /// \return maximum number of dropped requests + std::vector getMaxDrop() const { return max_drop_; } + + /// \brief Returns maximal percentage of drops + /// + /// Returns maximal percentage of packet drops + /// before aborting a test. + /// + /// \return maximum percentage of lost requests + std::vector getMaxDropPercentage() const { return max_pdrop_; } + + /// \brief Returns local address or interface name + /// + /// \return local address or interface name + std::string getLocalName() const { return localname_; } + + /// \brief Checks if interface name was used + /// + /// The method checks if interface name was used + /// rather than address. + /// + /// \return true if interface name was used + bool isInterface() const { return is_interface_; } + + /// \brief Returns number of preload exchanges + /// + /// \return number of preload exchanges + int getPreload() const { return preload_; } + + /// \brief Returns aggressivity value + /// + /// \return aggressivity value + int getAggressivity() const { return aggressivity_; } + + /// \brief Returns local port number + /// + /// \return local port number + int getLocalPort() const { return local_port_; } + + /// \brief Checks if seed provided + /// + /// \return true if seed was provided + bool isSeeded() const { return seeded_; } + + /// \brief Returns radom seed + /// + /// \return random seed + uint32_t getSeed() const { return seed_; } + + /// \brief Checks if broadcast address is to be used + /// + /// \return true if broadcast address is to be used + bool isBroadcast() const { return broadcast_; } + + /// \brief Check if rapid commit option used + /// + /// \return true if rapid commit option is used + bool isRapidCommit() const { return rapid_commit_; } + + /// \brief Check if server-ID to be taken from first package + /// + /// \return true if server-iD to be taken from first package + bool isUseFirst() const { return use_first_; } + + /// \brief Returns template file names + /// + /// \return template file names + std::vector getTemplateFiles() const { return template_file_; } + + /// brief Returns template offsets for xid + /// + /// \return template offsets for xid + std::vector getTransactionIdOffset() const { return xid_offset_; } + + /// \brief Returns template offsets for rnd + /// + /// \return template offsets for rnd + std::vector getRandomOffset() const { return rnd_offset_; } + + /// \brief Returns template offset for elapsed time + /// + /// \return template offset for elapsed time + int getElapsedTimeOffset() const { return elp_offset_; } + + /// \brief Returns template offset for server-ID + /// + /// \return template offset for server-ID + int getServerIdOffset() const { return sid_offset_; } + + /// \brief Returns template offset for requested IP + /// + /// \return template offset for requested IP + int getRequestedIpOffset() const { return rip_offset_; } + + /// \brief Returns diagnostic selectors + /// + /// \return diagnostics selector + std::string getDiags() const { return diags_; } + + /// \brief Returns wrapped command + /// + /// \return wrapped command (start/stop) + std::string getWrapped() const { return wrapped_; } + + /// \brief Returns server name + /// + /// \return server name + std::string getServerName() const { return server_name_; } + + /// \brief Print usage + /// + /// Prints perfdhcp usage + void usage() const; + + /// \brief Print program version + /// + /// Prints perfdhcp version + void version() const; + +private: + + /// \brief Default Constructor + /// + /// Private constructor as this is a singleton class. + /// Use CommandOptions::instance() to get instance of it. + CommandOptions() { + reset(); + } + + /// \brief Initializes class members based command line + /// + /// Reads each command line parameter and sets class member values + /// + /// \param argc Argument count passed to main(). + /// \param argv Argument value array passed to main(). + /// \throws isc::InvalidParameter if command line options initialization fails + void initialize(int argc, char** argv); + + /// \brief Validates initialized options + /// + /// \throws isc::InvalidParameter if command line validation fails + void validate() const; + + /// \brief Throws !InvalidParameter exception if condition is true + /// + /// Convenience function that throws an InvalidParameter exception if + /// the condition argument is true + /// + /// \param condition Condition to be checked + /// \param errmsg Error message in exception + /// \throws isc::InvalidParameter if condition argument true + inline void check(bool condition, const std::string& errmsg) const; + + /// \brief Casts command line argument to positive integer + /// + /// \param errmsg Error message if lexical cast fails + /// \throw InvalidParameter if lexical cast fails + int positiveInteger(const std::string& errmsg) const; + + /// \brief Casts command line argument to non-negative integer + /// + /// \param errmsg Error message if lexical cast fails + /// \throw InvalidParameter if lexical cast fails + int nonNegativeInteger(const std::string& errmsg) const; + + /// \brief Returns command line string if it is not empty + /// + /// \param errmsg Error message if string is empty + /// \throw InvalidParameter if string is empty + std::string nonEmptyString(const std::string& errmsg) const; + + /// \brief Set number of clients + /// + /// Interprets the getopt() "opt" global variable as the number of clients + /// (a non-negative number). This value is specified by the "-R" switch. + /// + /// \throw InvalidParameter if -R is wrong + void initClientsNum(); + + /// \brief Decodes base provided with -b + /// + /// Function decodes argument of -b switch, which + /// specifies a base value used to generate unique + /// mac or duid values in packets sent to system + /// under test. + /// The following forms of switch arguments are supported: + /// - -b mac=00:01:02:03:04:05 + /// - -b duid=0F1234 (duid can be up to 128 hex digits) + // Function will decode 00:01:02:03:04:05 and/or + /// 0F1234 respectively and initialize mac_prefix_ + /// and/or duid_prefix_ members + /// + /// \param base Base in string format + /// \throws isc::InvalidParameter if base is invalid + void decodeBase(const std::string& base); + + /// \brief Decodes base MAC address provided with -b + /// + /// Function decodes parameter given as -b mac=00:01:02:03:04:05 + /// The function will decode 00:01:02:03:04:05 initialize mac_prefix_ + /// class member. + /// Provided MAC address is for example only + /// + /// \param base Base string given as -b mac=00:01:02:03:04:05 + /// \throws isc::InvalidParameter if mac address is invalid + void decodeMac(const std::string& base); + + /// \brief Decodes base DUID provided with -b + /// + /// Function decodes parameter given as -b duid=0F1234 + /// The function will decode 0F1234 and initialize duid_prefix_ + /// class member. + /// Provided DUID is for example only. + /// + /// \param base Base string given as -b duid=0F1234 + /// \throws isc::InvalidParameter if DUID is invalid + void decodeDuid(const std::string& base); + + /// \brief Converts two-digit hexadecimal string to a byte + /// + /// \param hex_text Hexadecimal string e.g. AF + /// \throw isc::InvalidParameter if string does not represent hex byte + uint8_t convertHexString(const std::string& hex_text) const; + + uint8_t ipversion_; ///< IP protocol version to be used, expected values are: + ///< 4 for IPv4 and 6 for IPv6, default value 0 means "not set" + ExchangeMode exchange_mode_; ///< Packet exchange mode (e.g. DORA/SARR) + int rate_; ///< Rate in exchange per second + int report_delay_; ///< Delay between generation of two consecutive + ///< performance reports + uint32_t clients_num_; ///< Number of simulated clients (aka randomization range). + std::vector mac_prefix_; ///< MAC address prefix used to generate unique DUIDs + ///< for simulated clients. + std::vector duid_prefix_; ///< DUID prefix used to generate unique DUIDs for + ///< simulated clients + std::vector base_; ///< Collection of base values specified with -b + ///< options. Supported "bases" are mac= and duid= + std::vector num_request_; ///< Number of 2 or 4-way exchanges to perform + int period_; ///< Test period in seconds + uint8_t drop_time_set_; ///< Indicates number of -d parameters specified by user. + ///< If this value goes above 2, command line parsing fails. + std::vector drop_time_; ///< Time to elapse before request is lost. The fisrt value of + ///< two-element vector refers to DO/SA exchanges, + ///< second value refers to RA/RR. Default values are { 1, 1 } + std::vector max_drop_; ///< Maximum number of drops request before aborting test. + ///< First value of two-element vector specifies maximum + ///< number of drops for DO/SA exchange, second value + ///< specifies maximum number of drops for RA/RR. + std::vector max_pdrop_; ///< Maximal percentage of lost requests before aborting test. + ///< First value of two-element vector specifies percentage for + ///< DO/SA exchanges, second value for RA/RR. + std::string localname_; ///< Local address or interface specified with -l option. + bool is_interface_; ///< Indicates that specified value with -l is + ///< rather interface (not address) + int preload_; ///< Number of preload packets. Preload packets are used to + ///< initiate communication with server before doing performance + ///< measurements. + int aggressivity_; ///< Number of exchanges sent before next pause. + int local_port_; ///< Local port number (host endian) + bool seeded_; ///< Indicates that randomization seed was provided. + uint32_t seed_; ///< Randomization seed. + bool broadcast_; ///< Indicates that we use broadcast address. + bool rapid_commit_; ///< Indicates that we do rapid commit option. + bool use_first_; ///< Indicates that we take server id from first received packet. + std::vector template_file_; ///< Packet template file names. These files store template packets + ///< that are used for initiating echanges. Template packets + ///< read from files are later tuned with variable data. + std::vector xid_offset_; ///< Offset of transaction id in template files. First vector + ///< element points to offset for DISCOVER/SOLICIT messages, + ///< second element points to trasaction id offset for + ///< REQUEST messages + std::vector rnd_offset_; ///< Random value offset in templates. Random value offset + ///< points to last octet of DUID. Up to 4 last octets of + ///< DUID are randomized to simulate differnt clients. + int elp_offset_; ///< Offset of elapsed time option in template packet. + int sid_offset_; ///< Offset of server id option in template packet. + int rip_offset_; ///< Offset of requested ip data in template packet/ + std::string diags_; ///< String representing diagnostic selectors specified + ///< by user with -x. + std::string wrapped_; ///< Wrapped command specified as -w. Expected + ///< values are start and stop. + std::string server_name_; ///< Server name specified as last argument of command line. +}; + +} // namespace perfdhcp +} // namespace isc + +#endif // __COMMAND_OPTIONS_H diff --git a/tests/tools/perfdhcp/localized_option.h b/tests/tools/perfdhcp/localized_option.h new file mode 100644 index 0000000000..5374684f89 --- /dev/null +++ b/tests/tools/perfdhcp/localized_option.h @@ -0,0 +1,123 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __LOCALIZED_OPTION_H +#define __LOCALIZED_OPTION_H + +#include + +namespace isc { +namespace perfdhcp { + +/// \brief DHCP option at specific offset +/// +/// This class represents DHCP option with data placed at specified +/// offset in DHCP message. +/// Objects of this type are intended to be used when DHCP packets +/// are created from templates (e.g. read from template file). +/// Such packets have number of options with contents that have to be +/// replaced before sending: e.g. DUID can be randomized. +/// If option of this type is added to \ref PerfPkt6 options collection, +/// \ref perfdhcp::PerfPkt6 will call \ref getOffset on this object +/// to retrieve user-defined option position and replace contents of +/// the output buffer at this offset before packet is sent to the server. +/// (\see perfdhcp::PerfPkt6::rawPack). +/// In order to read on-wire data from incoming packet client class +/// has to specify options of \ref perfdhcp::LocalizedOption type +/// with expected offsets of these options in a packet. The +/// \ref perfdhcp::PerfPkt6 will use offsets to read fragments +/// of packet and store them in options' buffers. +/// (\see perfdhcp::PerfPkt6::rawUnpack). +/// +class LocalizedOption : public dhcp::Option { +public: + /// \brief Constructor, sets default (0) option offset + /// + /// \param u specifies universe (V4 or V6) + /// \param type option type (0-255 for V4 and 0-65535 for V6) + /// \param data content of the option + LocalizedOption(dhcp::Option::Universe u, + uint16_t type, + const dhcp::OptionBuffer& data) : + dhcp::Option(u, type, data), + offset_(0) { + } + + + /// \brief Constructor, used to create localized option from buffer + /// + /// \param u specifies universe (V4 or V6) + /// \param type option type (0-255 for V4 and 0-65535 for V6) + /// \param data content of the option + /// \param offset location of option in a packet (zero is default) + LocalizedOption(dhcp::Option::Universe u, + uint16_t type, + const dhcp::OptionBuffer& data, + const size_t offset) : + dhcp::Option(u, type, data), + offset_(offset) { + } + + /// \brief Constructor, sets default (0) option offset + /// + /// This contructor is similar to the previous one, but it does not take + /// the whole vector, but rather subset of it. + /// + /// \param u specifies universe (V4 or V6) + /// \param type option type (0-255 for V4 and 0-65535 for V6) + /// \param first iterator to the first element that should be copied + /// \param last iterator to the next element after the last one + /// to be copied. + LocalizedOption(dhcp::Option::Universe u, + uint16_t type, + dhcp::OptionBufferConstIter first, + dhcp::OptionBufferConstIter last) : + dhcp::Option(u, type, first, last), + offset_(0) { + } + + + /// \brief Constructor, used to create option from buffer iterators + /// + /// This contructor is similar to the previous one, but it does not take + /// the whole vector, but rather subset of it. + /// + /// \param u specifies universe (V4 or V6) + /// \param type option type (0-255 for V4 and 0-65535 for V6) + /// \param first iterator to the first element that should be copied + /// \param last iterator to the next element after the last one + /// to be copied. + /// \param offset offset of option in a packet (zero is default) + LocalizedOption(dhcp::Option::Universe u, + uint16_t type, + dhcp::OptionBufferConstIter first, + dhcp::OptionBufferConstIter last, const size_t offset) : + dhcp::Option(u, type, first, last), + offset_(offset) { + } + + /// \brief Returns offset of an option in a DHCP packet. + /// + /// \return option offset in a packet + size_t getOffset() const { return offset_; }; + +private: + size_t offset_; ///< Offset of DHCP option in a packet +}; + + +} // namespace perfdhcp +} // namespace isc + +#endif // __LOCALIZED_OPTION_H diff --git a/tests/tools/perfdhcp/perf_pkt4.cc b/tests/tools/perfdhcp/perf_pkt4.cc new file mode 100644 index 0000000000..3f733afacc --- /dev/null +++ b/tests/tools/perfdhcp/perf_pkt4.cc @@ -0,0 +1,62 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include + +#include "perf_pkt4.h" +#include "pkt_transform.h" + +using namespace std; +using namespace isc; +using namespace dhcp; + +namespace isc { +namespace perfdhcp { + +PerfPkt4::PerfPkt4(const uint8_t* buf, + size_t len, + size_t transid_offset, + uint32_t transid) : + Pkt4(buf, len), + transid_offset_(transid_offset) { + setTransid(transid); +} + +bool +PerfPkt4::rawPack() { + return (PktTransform::pack(dhcp::Option::V4, + data_, + options_, + getTransidOffset(), + getTransid(), + bufferOut_)); +} + +bool +PerfPkt4::rawUnpack() { + uint32_t transid = getTransid(); + bool res = PktTransform::unpack(dhcp::Option::V4, + data_, + options_, + getTransidOffset(), + transid); + if (res) { + setTransid(transid); + } + return (res); +} + +} // namespace perfdhcp +} // namespace isc diff --git a/tests/tools/perfdhcp/perf_pkt4.h b/tests/tools/perfdhcp/perf_pkt4.h new file mode 100644 index 0000000000..f4cc440773 --- /dev/null +++ b/tests/tools/perfdhcp/perf_pkt4.h @@ -0,0 +1,113 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __PERF_PKT4_H +#define __PERF_PKT4_H + +#include +#include +#include + +#include "localized_option.h" + +namespace isc { +namespace perfdhcp { + +/// \brief PerfPkt4 (DHCPv4 packet) +/// +/// This class extends the functionality of \ref isc::dhcp::Pkt4 by adding the +/// ability to specify an options offset in the DHCP message and to override +/// options' contents. This is particularly useful when we create a packet +/// object using a template file (i.e. do not build it dynamically). The client +/// class should read data from the template file and pass it to this class in +/// a buffer. +/// +/// The contents of such a packet can be later partially replaced, notably the +/// selected options and the transaction ID. (The transaction ID and its +/// offset in the template file are passed via the constructor.) +/// +/// In order to replace contents of the options, the client class has to +/// create a collection of \ref LocalizedOption, adding them using +/// \ref dhcp::Pkt4::addOption. +/// +/// \note If you don't use template files simply use constructors +/// inherited from parent class and isc::dhcp::Option type instead + +class PerfPkt4 : public dhcp::Pkt4 { +public: + + /// Localized option pointer type. + typedef boost::shared_ptr LocalizedOptionPtr; + + /// \brief Constructor, used to create messages from packet + /// template files. + /// + /// Creates a new DHCPv4 message using the provided buffer. + /// The transaction ID and its offset are specified via this + /// constructor. The transaction ID is stored in outgoing message + /// when client class calls \ref PerfPkt4::rawPack. Transaction id + /// offset value is used for incoming and outgoing messages to + /// identify transaction ID field's position in incoming and outgoing + /// messages. + /// + /// \param buf buffer holding contents of the message (this can + /// be directly read from template file). + /// \param len length of the data in the buffer. + /// \param transid_offset transaction id offset in a message. + /// \param transid transaction id to be stored in outgoing message. + PerfPkt4(const uint8_t* buf, + size_t len, + size_t transid_offset = 1, + uint32_t transid = 0); + + /// \brief Returns transaction id offset in packet buffer + /// + /// \return Transaction ID offset in packet buffer + size_t getTransidOffset() const { return transid_offset_; }; + + /// \brief Prepares on-wire format from raw buffer. + /// + /// The method copies the buffer provided in the constructor to the + /// output buffer and replaces the transaction ID and selected + /// options with new data. + /// + /// \note Use this method to prepare an on-wire DHCPv4 message + /// when you use template packets that require replacement + /// of selected options' contents before sending. + /// + /// \return false ID pack operation failed. + bool rawPack(); + + /// \brief Handles limited binary packet parsing for packets with + /// custom offsets of options and transaction ID + /// + /// This method handles the parsing of packets that have custom offsets + /// of options or transaction ID. Use + /// \ref isc::dhcp::Pkt4::addOption to specify which options to parse. + /// Options should be of the \ref isc::perfdhcp::LocalizedOption + /// type with offset values provided. Each added option will + /// be updated with actual data read from the binary packet buffer. + /// + /// \return false If unpack operation failed. + bool rawUnpack(); + +private: + size_t transid_offset_; ///< transaction id offset + +}; + +} // namespace perfdhcp +} // namespace isc + +#endif // __PERF_PKT4_H diff --git a/tests/tools/perfdhcp/perf_pkt6.cc b/tests/tools/perfdhcp/perf_pkt6.cc new file mode 100644 index 0000000000..24cfb931a9 --- /dev/null +++ b/tests/tools/perfdhcp/perf_pkt6.cc @@ -0,0 +1,64 @@ +// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include + +#include "perf_pkt6.h" +#include "pkt_transform.h" + +using namespace std; +using namespace isc; +using namespace dhcp; + +namespace isc { +namespace perfdhcp { + +PerfPkt6::PerfPkt6(const uint8_t* buf, + size_t len, + size_t transid_offset, + uint32_t transid) : + Pkt6(buf, len, Pkt6::UDP), + transid_offset_(transid_offset) { + setTransid(transid); +} + +bool +PerfPkt6::rawPack() { + return (PktTransform::pack(dhcp::Option::V6, + data_, + options_, + getTransidOffset(), + getTransid(), + bufferOut_)); +} + +bool +PerfPkt6::rawUnpack() { + uint32_t transid = getTransid(); + bool res = PktTransform::unpack(dhcp::Option::V6, + data_, + options_, + getTransidOffset(), + transid); + if (res) { + setTransid(transid); + } + return (res); +} + +} // namespace perfdhcp +} // namespace isc diff --git a/tests/tools/perfdhcp/perf_pkt6.h b/tests/tools/perfdhcp/perf_pkt6.h new file mode 100644 index 0000000000..94fe47bada --- /dev/null +++ b/tests/tools/perfdhcp/perf_pkt6.h @@ -0,0 +1,113 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __PERF_PKT6_H +#define __PERF_PKT6_H + +#include +#include +#include + +#include "localized_option.h" + +namespace isc { +namespace perfdhcp { + +/// \brief PerfPkt6 (DHCPv6 packet) +/// +/// This class extends the functionality of \ref isc::dhcp::Pkt6 by +/// adding the ability to specify an options offset in the DHCP message +/// and so override the options' contents. This is particularly useful when we +/// create a packet object using a template file (i.e. do not build it +/// dynamically). The client class should read the data from the template file +/// and pass it to this class as a buffer. +/// +/// The contents of such packet can be later partially replaced: in particular, +/// selected options and the transaction ID can be altered. (The transaction +/// ID and its offset in the template file is passed via the constructor.) +/// +/// In order to replace the contents of options, the client class has to +/// create a collection of \ref LocalizedOption by adding them using +/// \ref dhcp::Pkt6::addOption. +/// +/// \note If you don't use template files, simply use constructors +/// inherited from parent class and the \ref isc::dhcp::Option type instead. + +class PerfPkt6 : public dhcp::Pkt6 { +public: + + /// Localized option pointer type. + typedef boost::shared_ptr LocalizedOptionPtr; + + /// \brief Constructor, used to create messages from packet + /// template files. + /// + /// Creates a new DHCPv6 message using the provided buffer. + /// The transaction ID and its offset are specified via this + /// constructor. The transaction ID is stored in outgoing message + /// when client class calls \ref PerfPkt6::rawPack. Transaction id + /// offset value is used for incoming and outgoing messages to + /// identify transaction ID field's position in incoming and outgoing + /// messages. + /// + /// \param buf buffer holding contents of the message (this can + /// be directly read from template file). + /// \param len length of the data in the buffer. + /// \param transid_offset transaction id offset in a message. + /// \param transid transaction id to be stored in outgoing message. + PerfPkt6(const uint8_t* buf, + size_t len, + size_t transid_offset = 1, + uint32_t transid = 0); + + /// \brief Returns transaction id offset in packet buffer + /// + /// \return Transaction ID offset in the packet buffer. + size_t getTransidOffset() const { return transid_offset_; }; + + /// \brief Prepares on-wire format from raw buffer + /// + /// The method copies the buffer provided in constructor to the + /// output buffer and replaces the transaction ID and selected + /// options with new data. + /// + /// \note Use this method to prepare an on-wire DHCPv6 message + /// when you use template packets that require replacement + /// of selected options' contents before sending. + /// + /// \return false ID pack operation failed. + bool rawPack(); + + /// \brief Handles limited binary packet parsing for packets with + /// custom offsets of options and transaction id + /// + /// This methoid handles the parsing of packets that have custom offsets + /// of options or transaction ID. Use + /// \ref isc::dhcp::Pkt4::addOption to specify which options to parse. + /// Options should be of the \ref isc::perfdhcp::LocalizedOption + /// type with offset values provided. Each added option will + /// be updated with actual data read from the binary packet buffer. + /// + /// \return false if unpack operation failed. + bool rawUnpack(); + +private: + size_t transid_offset_; ///< transaction id offset + +}; + +} // namespace perfdhcp +} // namespace isc + +#endif // __PERF_PKT6_H diff --git a/tests/tools/perfdhcp/pkt_transform.cc b/tests/tools/perfdhcp/pkt_transform.cc new file mode 100644 index 0000000000..5ed39bfc5f --- /dev/null +++ b/tests/tools/perfdhcp/pkt_transform.cc @@ -0,0 +1,222 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include + +#include +#include +#include +#include + +#include "pkt_transform.h" +#include "localized_option.h" + +using namespace std; +using namespace isc; +using namespace dhcp; + +namespace isc { +namespace perfdhcp { + +bool +PktTransform::pack(const Option::Universe universe, + const OptionBuffer& in_buffer, + const Option::OptionCollection& options, + const size_t transid_offset, + const uint32_t transid, + util::OutputBuffer& out_buffer) { + + // Always override the packet if function is called. + out_buffer.clear(); + // Write whole buffer to output buffer. + out_buffer.writeData(&in_buffer[0], in_buffer.size()); + + uint8_t transid_len = (universe == Option::V6) ? 3 : 4; + + if ((transid_offset + transid_len >= in_buffer.size()) || + (transid_offset == 0)) { + cout << "Failed to build packet: provided transaction id offset: " + << transid_offset << " is out of bounds (expected 1.." + << in_buffer.size()-1 << ")." << endl; + return (false); + } + + try { + size_t offset_ptr = transid_offset; + if (universe == Option::V4) { + out_buffer.writeUint8At(transid >> 24 & 0xFF, offset_ptr++); + } + out_buffer.writeUint8At(transid >> 16 & 0xFF, offset_ptr++); + out_buffer.writeUint8At(transid >> 8 & 0xFF, offset_ptr++); + out_buffer.writeUint8At(transid & 0xFF, offset_ptr++); + + // We already have packet template stored in output buffer + // but still some options have to be updated if client + // specified them along with their offsets in the buffer. + PktTransform::packOptions(in_buffer, options, out_buffer); + } catch (const isc::BadValue& e) { + cout << "Building packet failed: " << e.what() << endl; + return (false); + } + return (true); +} + +bool +PktTransform::unpack(const Option::Universe universe, + const OptionBuffer& in_buffer, + const Option::OptionCollection& options, + const size_t transid_offset, + uint32_t& transid) { + + uint8_t transid_len = (universe == Option::V6) ? 3 : 4; + + // Validate transaction id offset. + if ((transid_offset + transid_len + 1 > in_buffer.size()) || + (transid_offset == 0)) { + cout << "Failed to parse packet: provided transaction id offset: " + << transid_offset << " is out of bounds (expected 1.." + << in_buffer.size()-1 << ")." << endl; + return (false); + } + + // Read transaction id from the buffer. + // For DHCPv6 we transaction id is 3 bytes long so the high byte + // of transid will be zero. + OptionBufferConstIter it = in_buffer.begin() + transid_offset; + transid = 0; + for (int i = 0; i < transid_len; ++i, ++it) { + // Read next byte and shift it left to its position in + // transid (shift by the number of bytes read so far. + transid += *it << (transid_len - i - 1) * 8; + } + + try { + PktTransform::unpackOptions(in_buffer, options); + } catch (const isc::BadValue& e) { + cout << "Packet parsing failed: " << e.what() << endl; + return (false); + } + + return (true); +} + +void +PktTransform::packOptions(const OptionBuffer& in_buffer, + const Option::OptionCollection& options, + util::OutputBuffer& out_buffer) { + try { + // If there are any options on the list, we will use provided + // options offsets to override them in the output buffer + // with new contents. + for (Option::OptionCollection::const_iterator it = options.begin(); + it != options.end(); ++it) { + // Get options with their position (offset). + boost::shared_ptr option = + boost::dynamic_pointer_cast(it->second); + if (option == NULL) { + isc_throw(isc::BadValue, "option is null"); + } + uint32_t offset = option->getOffset(); + if ((offset == 0) || + (offset + option->len() > in_buffer.size())) { + isc_throw(isc::BadValue, + "option offset for option: " << option->getType() + << " is out of bounds (expected 1.." + << in_buffer.size() - option->len() << ")"); + } + + // Create temporary buffer to store option contents. + util::OutputBuffer buf(option->len()); + // Pack option contents into temporary buffer. + option->pack(buf); + // OutputBuffer class has nice functions that write + // data at the specified position so we can use it to + // inject contents of temporary buffer to output buffer. + const uint8_t *buf_data = + static_cast(buf.getData()); + for (int i = 0; i < buf.getLength(); ++i) { + out_buffer.writeUint8At(buf_data[i], offset + i); + } + } + } + catch (const Exception&) { + isc_throw(isc::BadValue, "failed to pack options into buffer."); + } +} + +void +PktTransform::unpackOptions(const OptionBuffer& in_buffer, + const Option::OptionCollection& options) { + for (Option::OptionCollection::const_iterator it = options.begin(); + it != options.end(); ++it) { + + boost::shared_ptr option = + boost::dynamic_pointer_cast(it->second); + if (option == NULL) { + isc_throw(isc::BadValue, "option is null"); + } + size_t opt_pos = option->getOffset(); + if (opt_pos == 0) { + isc_throw(isc::BadValue, "failed to unpack packet from raw buffer " + "(Option position not specified)"); + } else if (opt_pos + option->getHeaderLen() > in_buffer.size()) { + isc_throw(isc::BadValue, + "failed to unpack options from from raw buffer " + "(Option position out of bounds)"); + } + + size_t offset = opt_pos; + size_t offset_step = 1; + uint16_t opt_type = 0; + if (option->getUniverse() == Option::V6) { + offset_step = 2; + // For DHCPv6 option type is in first two octets. + opt_type = in_buffer[offset] * 256 + in_buffer[offset + 1]; + } else { + // For DHCPv4 option type is in first octet. + opt_type = in_buffer[offset]; + } + // Check if we got expected option type. + if (opt_type != option->getType()) { + isc_throw(isc::BadValue, + "failed to unpack option from raw buffer " + "(option type mismatch)"); + } + + // Get option length which is supposed to be after option type. + offset += offset_step; + uint16_t opt_len = in_buffer[offset] * 256 + in_buffer[offset + 1]; + if (option->getUniverse() == Option::V6) { + opt_len = in_buffer[offset] * 256 + in_buffer[offset + 1]; + } else { + opt_len = in_buffer[offset]; + } + + // Check if packet is not truncated. + if (offset + option->getHeaderLen() + opt_len > in_buffer.size()) { + isc_throw(isc::BadValue, + "failed to unpack option from raw buffer " + "(option truncated)"); + } + + // Seek to actual option data and replace it. + offset += offset_step; + option->setData(in_buffer.begin() + offset, + in_buffer.begin() + offset + opt_len); + } +} + + +} // namespace perfdhcp +} // namespace isc diff --git a/tests/tools/perfdhcp/pkt_transform.h b/tests/tools/perfdhcp/pkt_transform.h new file mode 100644 index 0000000000..7fb19f48c4 --- /dev/null +++ b/tests/tools/perfdhcp/pkt_transform.h @@ -0,0 +1,139 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#ifndef __PKT_TRANSFORM_H +#define __PKT_TRANSFORM_H + +#include + +#include "localized_option.h" + +namespace isc { +namespace perfdhcp { + +/// \brief Read and write raw data to DHCP packets. +/// +/// This class provides static functions to read/write raw data from/to the +/// packet buffer. When reading data with the unpack() method, the +/// corresponding options objects are updated. When writing to the packet +/// buffer with pack(), options objects carry input data to be written. +/// +/// This class is used both by \ref PerfPkt4 and +/// \ref PerfPkt6 classes in case DHCP packets are created +/// from template files. In this case, some of the template +/// packet's options are replaced before sending it to the +/// server. Offset of specific options are provided from the +/// command line by the perfdhcp tool user, and passed in an +/// options collection. +class PktTransform { +public: + + /// \brief Prepares on-wire format from raw buffer. + /// + /// The method copies the input buffer and options contents + /// to the output buffer. The input buffer must contain whole + /// initial packet data. Parts of this data will be + /// overriden by options data specified in an options + /// collection. Such options must have their offsets within + /// a packet specified (see \ref LocalizedOption to find out + /// how to specify options offset). + /// + /// \note The specified options must fit into the size of the + /// initial packet data. A call to this method will fail + /// if the option's offset + its size is beyond the packet's size. + /// + /// \param universe Universe used, V4 or V6 + /// \param in_buffer Input buffer holding intial packet + /// data, this can be directly read from template file + /// \param options Options collection with offsets + /// \param transid_offset offset of transaction id in a packet, + /// transaction ID will be written to output buffer at this + /// offset + /// \param transid Transaction ID value + /// \param out_buffer Output buffer holding "packed" data + /// + /// \return false, if pack operation failed. + static bool pack(const dhcp::Option::Universe universe, + const dhcp::OptionBuffer& in_buffer, + const dhcp::Option::OptionCollection& options, + const size_t transid_offset, + const uint32_t transid, + util::OutputBuffer& out_buffer); + + /// \brief Handles selective binary packet parsing. + /// + /// This method handles the parsing of packets that have non-default + /// options or transaction ID offsets. The client class has to use + /// \ref isc::dhcp::Pkt6::addOption to specify which options to parse. + /// Each option should be of the \ref isc::perfdhcp::LocalizedOption + /// type with the offset value specified. + /// + /// \param universe universe used, V4 or V6 + /// \param in_buffer input buffer to be parsed + /// \param options options collection with options offsets + /// \param transid_offset offset of transaction id in input buffer + /// \param transid transaction id value read from input buffer + /// + /// \return false, if unpack operation failed. + static bool unpack(const dhcp::Option::Universe universe, + const dhcp::OptionBuffer& in_buffer, + const dhcp::Option::OptionCollection& options, + const size_t transid_offset, + uint32_t& transid); + +private: + /// \brief Replaces contents of options in a buffer. + /// + /// The method uses a localized options collection to + /// replace parts of packet data (e.g. data read + /// from template file). + /// This private method is called from \ref PktTransform::pack + /// + /// \param in_buffer input buffer holding initial packet data. + /// \param out_buffer output buffer with "packed" options. + /// \param options options collection with actual data and offsets. + /// + /// \throw isc::Unexpected if options update failed. + static void packOptions(const dhcp::OptionBuffer& in_buffer, + const dhcp::Option::OptionCollection& options, + util::OutputBuffer& out_buffer); + + /// \brief Reads contents of specified options from buffer. + /// + /// The method reads options data from the input buffer + /// and stores it in options objects. Offsets of the options + /// must be specified (see \ref LocalizedOption to find out how to specify + /// the option offset). + /// This private method is called by \ref PktTransform::unpack. + /// + /// \note This method iterates through all options in an + /// options collection, checks the offset of the option + /// in input buffer and reads data from the buffer to + /// update the option's buffer. If the provided options collection + /// is empty, a call to this method will have no effect. + /// + /// \param universe universe used, V4 or V6 + /// \param in_buffer input buffer to be parsed. + /// \param options oprions collection with their offsets + /// in input buffer specified. + /// + /// \throw isc::Unexpected if options unpack failed. + static void unpackOptions(const dhcp::OptionBuffer& in_buffer, + const dhcp::Option::OptionCollection& options); +}; + +} // namespace perfdhcp +} // namespace isc + +#endif // __PKT_TRANSFORM_H diff --git a/tests/tools/perfdhcp/tests/.gitignore b/tests/tools/perfdhcp/tests/.gitignore new file mode 100644 index 0000000000..d6d1ec87a1 --- /dev/null +++ b/tests/tools/perfdhcp/tests/.gitignore @@ -0,0 +1 @@ +/run_unittests diff --git a/tests/tools/perfdhcp/tests/Makefile.am b/tests/tools/perfdhcp/tests/Makefile.am new file mode 100644 index 0000000000..21e5b46c34 --- /dev/null +++ b/tests/tools/perfdhcp/tests/Makefile.am @@ -0,0 +1,46 @@ +SUBDIRS = . + +AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib +AM_CPPFLAGS += $(BOOST_INCLUDES) +AM_CXXFLAGS = $(B10_CXXFLAGS) + +if USE_STATIC_LINK +AM_LDFLAGS = -static +endif + +CLEANFILES = *.gcno *.gcda + +TESTS_ENVIRONMENT = \ + libtool --mode=execute $(VALGRIND_COMMAND) + +TESTS = +if HAVE_GTEST +TESTS += run_unittests +run_unittests_SOURCES = run_unittests.cc +run_unittests_SOURCES += command_options_unittest.cc +run_unittests_SOURCES += perf_pkt6_unittest.cc +run_unittests_SOURCES += perf_pkt4_unittest.cc +run_unittests_SOURCES += localized_option_unittest.cc +run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/command_options.cc +run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/pkt_transform.cc +run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/perf_pkt6.cc +run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/perf_pkt4.cc + +run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) +run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS) + +if USE_CLANGPP +# Disable unused parameter warning caused by some of the +# Boost headers when compiling with clang. +run_unittests_CXXFLAGS = -Wno-unused-parameter +endif + +run_unittests_LDADD = $(GTEST_LDADD) +run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la +run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la +run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la +run_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la +run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la +endif + +noinst_PROGRAMS = $(TESTS) diff --git a/tests/tools/perfdhcp/tests/command_options_unittest.cc b/tests/tools/perfdhcp/tests/command_options_unittest.cc new file mode 100644 index 0000000000..8e1053dc30 --- /dev/null +++ b/tests/tools/perfdhcp/tests/command_options_unittest.cc @@ -0,0 +1,454 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include + +#include "../command_options.h" + +#include "exceptions/exceptions.h" + +using namespace std; +using namespace isc; +using namespace isc::perfdhcp; + +/// \brief Test Fixture Class +/// +/// This test fixture class is used to perform +/// unit tests on perfdhcp CommandOptions class. +class CommandOptionsTest : public virtual ::testing::Test +{ +public: + /// \brief Default Constructor + CommandOptionsTest() { } + +protected: + /// \brief Parse command line and cleanup + /// + /// The method tokenizes command line to array of C-strings, + /// parses arguments using CommandOptions class to set + /// its data members and de-allocates array of C-strings. + /// + /// \param cmdline Command line to parse + /// \throws std::bad allocation if tokenization failed + void process(const std::string& cmdline) { + CommandOptions& opt = CommandOptions::instance(); + int argc = 0; + char** argv = tokenizeString(cmdline, &argc); + opt.reset(); + opt.parse(argc, argv); + for(int i = 0; i < argc; ++i) { + free(argv[i]); + argv[i] = NULL; + } + free(argv); + } + + /// \brief Check default initialized values + /// + /// Check if initialized values are correct + void checkDefaults() { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp"); + EXPECT_EQ(4, opt.getIpVersion()); + EXPECT_EQ(CommandOptions::DORA_SARR, opt.getExchangeMode()); + EXPECT_EQ(0, opt.getRate()); + EXPECT_EQ(0, opt.getReportDelay()); + EXPECT_EQ(0, opt.getClientsNum()); + + // default mac + uint8_t mac[6] = { 0x00, 0x0C, 0x01, 0x02, 0x03, 0x04 }; + std::vector v1 = opt.getMacPrefix(); + ASSERT_EQ(6, v1.size()); + EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac)); + + EXPECT_EQ(0, opt.getBase().size()); + EXPECT_EQ(0, opt.getNumRequests().size()); + EXPECT_EQ(0, opt.getPeriod()); + for (int i = 0; i < opt.getDropTime().size(); ++i) { + EXPECT_DOUBLE_EQ(1, opt.getDropTime()[i]); + } + ASSERT_EQ(opt.getMaxDrop().size(), opt.getMaxDropPercentage().size()); + for (int i = 0; i < opt.getMaxDrop().size(); ++i) { + EXPECT_EQ(0, opt.getMaxDrop()[i]); + EXPECT_EQ(0, opt.getMaxDropPercentage()[i]); + } + EXPECT_EQ("", opt.getLocalName()); + EXPECT_FALSE(opt.isInterface()); + EXPECT_EQ(0, opt.getPreload()); + EXPECT_EQ(1, opt.getAggressivity()); + EXPECT_EQ(0, opt.getLocalPort()); + EXPECT_FALSE(opt.isSeeded()); + EXPECT_EQ(0, opt.getSeed()); + EXPECT_FALSE(opt.isBroadcast()); + EXPECT_FALSE(opt.isRapidCommit()); + EXPECT_FALSE(opt.isUseFirst()); + EXPECT_EQ(0, opt.getTemplateFiles().size()); + EXPECT_EQ(0, opt.getTransactionIdOffset().size()); + EXPECT_EQ(0, opt.getRandomOffset().size()); + EXPECT_GT(0, opt.getElapsedTimeOffset()); + EXPECT_GT(0, opt.getServerIdOffset()); + EXPECT_GT(0, opt.getRequestedIpOffset()); + EXPECT_EQ("", opt.getDiags()); + EXPECT_EQ("", opt.getWrapped()); + EXPECT_EQ("", opt.getServerName()); + } + + /// \brief Split string to array of C-strings + /// + /// \param s String to split (tokenize) + /// \param num Number of tokens returned + /// \return array of C-strings (tokens) + char** tokenizeString(const std::string& text_to_split, int* num) const { + char** results = NULL; + // Tokenization with std streams + std::stringstream text_stream(text_to_split); + // Iterators to be used for tokenization + std::istream_iterator text_iterator(text_stream); + std::istream_iterator text_end; + // Tokenize string (space is a separator) using begin and end iteratos + std::vector tokens(text_iterator, text_end); + + if (tokens.size() > 0) { + // Allocate array of C-strings where we will store tokens + results = static_cast(malloc(tokens.size() * sizeof(char*))); + if (results == NULL) { + throw std::bad_alloc(); + } + // Store tokens in C-strings array + for (int i = 0; i < tokens.size(); ++i) { + char* cs = static_cast(malloc(tokens[i].length() + 1)); + strcpy(cs, tokens[i].c_str()); + results[i] = cs; + } + // Return number of tokens to calling function + if (num != NULL) { + *num = tokens.size(); + } + } + return results; + } + +}; + +TEST_F(CommandOptionsTest, Defaults) { + process("perfdhcp"); + checkDefaults(); +} + +TEST_F(CommandOptionsTest, UseFirst) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -1 -B -l ethx"); + EXPECT_TRUE(opt.isUseFirst()); +} +TEST_F(CommandOptionsTest, IpVersion) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -6 -l ethx -c -i"); + EXPECT_EQ(6, opt.getIpVersion()); + EXPECT_EQ("ethx", opt.getLocalName()); + EXPECT_TRUE(opt.isRapidCommit()); + EXPECT_FALSE(opt.isBroadcast()); + process("perfdhcp -4 -B -l ethx"); + EXPECT_EQ(4, opt.getIpVersion()); + EXPECT_TRUE(opt.isBroadcast()); + EXPECT_FALSE(opt.isRapidCommit()); + + // Negative test cases + // -4 and -6 must not coexist + EXPECT_THROW(process("perfdhcp -4 -6 -l ethx"), isc::InvalidParameter); + // -6 and -B must not coexist + EXPECT_THROW(process("perfdhcp -6 -B -l ethx"), isc::InvalidParameter); + // -c and -4 (default) must not coexist + EXPECT_THROW(process("perfdhcp -c -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Rate) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -4 -r 10 -l ethx"); + EXPECT_EQ(10, opt.getRate()); + + // Negative test cases + // Rate must not be 0 + EXPECT_THROW(process("perfdhcp -4 -r 0 -l ethx"), isc::InvalidParameter); + // -r must be specified to use -n, -p and -D + EXPECT_THROW(process("perfdhcp -6 -t 5 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -4 -n 150 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -6 -p 120 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -4 -D 1400 -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, ReportDelay) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -r 100 -t 17 -l ethx"); + EXPECT_EQ(17, opt.getReportDelay()); + + // Negative test cases + // -t must be positive integer + EXPECT_THROW(process("perfdhcp -r 10 -t -8 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -r 10 -t 0 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -r 10 -t s -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, ClientsNum) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -R 200 -l ethx"); + EXPECT_EQ(200, opt.getClientsNum()); + process("perfdhcp -R 0 -l ethx"); + EXPECT_EQ(0, opt.getClientsNum()); + + // Negative test cases + // Number of clients must be non-negative integer + EXPECT_THROW(process("perfdhcp -R -5 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -R gs -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Base) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -6 -b MAC=10::20::30::40::50::60 -l ethx -b duiD=1AB7F5670901FF"); + uint8_t mac[6] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60 }; + uint8_t duid[7] = { 0x1A, 0xB7, 0xF5, 0x67, 0x09, 0x01, 0xFF }; + + // Test Mac + std::vector v1 = opt.getMacPrefix(); + ASSERT_EQ(6, v1.size()); + EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac)); + // "3x" is invalid value in MAC address + EXPECT_THROW(process("perfdhcp -b mac=10::2::3x::4::5::6 -l ethx"), isc::InvalidParameter); + + // Test DUID + std::vector v2 = opt.getDuidPrefix(); + ASSERT_EQ(sizeof(duid) / sizeof(uint8_t), v2.size()); + EXPECT_TRUE(std::equal(v2.begin(), v2.end(), duid)); + // "t" is invalid digit in DUID + EXPECT_THROW(process("perfdhcp -6 -l ethx -b duiD=1AB7Ft670901FF"), isc::InvalidParameter); + + // Some more negative test cases + // Base is not specified + EXPECT_THROW(process("perfdhcp -b -l ethx"), isc::InvalidParameter); + // Typo: should be mac= instead of mc= + EXPECT_THROW(process("perfdhcp -l ethx -b mc=00:01:02:03::04:05"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, DropTime) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -l ethx -d 12"); + ASSERT_EQ(2, opt.getDropTime().size()); + EXPECT_DOUBLE_EQ(12, opt.getDropTime()[0]); + EXPECT_DOUBLE_EQ(1, opt.getDropTime()[1]); + + process("perfdhcp -l ethx -d 2 -d 4.7"); + ASSERT_EQ(2, opt.getDropTime().size()); + EXPECT_DOUBLE_EQ(2, opt.getDropTime()[0]); + EXPECT_DOUBLE_EQ(4.7, opt.getDropTime()[1]); + + // Negative test cases + // Drop time must not be negative + EXPECT_THROW(process("perfdhcp -l ethx -d -2 -d 4.7"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -l ethx -d -9.1 -d 0"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, TimeOffset) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -l ethx -T file1.x -T file2.x -E 4"); + EXPECT_EQ(4, opt.getElapsedTimeOffset()); + + // Negative test cases + // Argument -E must be used with -T + EXPECT_THROW(process("perfdhcp -l ethx -E 3 -i"), isc::InvalidParameter); + // Value in -E not specified + EXPECT_THROW(process("perfdhcp -l ethx -T file.x -E -i"), isc::InvalidParameter); + // Value for -E must not be negative + EXPECT_THROW(process("perfdhcp -l ethx -E -3 -T file.x"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, ExchangeMode) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -l ethx -i"); + EXPECT_EQ(CommandOptions::DO_SA, opt.getExchangeMode()); + + // Negative test cases + // No template file specified + EXPECT_THROW(process("perfdhcp -i -l ethx -X 3"), isc::InvalidParameter); + // Offsets can't be used in simple exchanges (-i) + EXPECT_THROW(process("perfdhcp -i -l ethx -O 2 -T file.x"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -i -l ethx -E 3 -T file.x"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -i -l ethx -S 1 -T file.x"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -i -l ethx -I 2 -T file.x"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Offsets) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -E5 -4 -I 2 -S3 -O 30 -X7 -l ethx -X3 -T file1.x -T file2.x"); + EXPECT_EQ(2, opt.getRequestedIpOffset()); + EXPECT_EQ(5, opt.getElapsedTimeOffset()); + EXPECT_EQ(3, opt.getServerIdOffset()); + ASSERT_EQ(2, opt.getRandomOffset().size()); + EXPECT_EQ(30, opt.getRandomOffset()[0]); + EXPECT_EQ(30, opt.getRandomOffset()[1]); + ASSERT_EQ(2, opt.getTransactionIdOffset().size()); + EXPECT_EQ(7, opt.getTransactionIdOffset()[0]); + EXPECT_EQ(3, opt.getTransactionIdOffset()[1]); + + // Negative test cases + // IP offset/IA_NA offset must be positive + EXPECT_THROW(process("perfdhcp -6 -I 0 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -6 -I -4 -l ethx"), isc::InvalidParameter); + + // TODO - other negative cases +} + +TEST_F(CommandOptionsTest, LocalPort) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -l ethx -L 2000"); + EXPECT_EQ(2000, opt.getLocalPort()); + + // Negative test cases + // Local port must be between 0..65535 + EXPECT_THROW(process("perfdhcp -l ethx -L -2"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -l ethx -L"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -l ethx -L 65540"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Preload) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -1 -P 3 -l ethx"); + EXPECT_EQ(3, opt.getPreload()); + + // Negative test cases + // Number of preload packages must not be negative integer + EXPECT_THROW(process("perfdhcp -P -1 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -P -3 -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Seed) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -6 -P 2 -s 23 -l ethx"); + EXPECT_EQ(23, opt.getSeed()); + EXPECT_TRUE(opt.isSeeded()); + + process("perfdhcp -6 -P 2 -s 0 -l ethx"); + EXPECT_EQ(0, opt.getSeed()); + EXPECT_FALSE(opt.isSeeded()); + + // Negtaive test cases + // Seed must be non-negative integer + EXPECT_THROW(process("perfdhcp -6 -P 2 -s -5 -l ethx"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -6 -P 2 -s -l ethx"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, TemplateFiles) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -T file1.x -l ethx"); + ASSERT_EQ(1, opt.getTemplateFiles().size()); + EXPECT_EQ("file1.x", opt.getTemplateFiles()[0]); + + process("perfdhcp -T file1.x -s 12 -w start -T file2.x -4 -l ethx"); + ASSERT_EQ(2, opt.getTemplateFiles().size()); + EXPECT_EQ("file1.x", opt.getTemplateFiles()[0]); + EXPECT_EQ("file2.x", opt.getTemplateFiles()[1]); + + // Negative test cases + // No template file specified + EXPECT_THROW(process("perfdhcp -s 12 -l ethx -T"), isc::InvalidParameter); + // Too many template files specified + EXPECT_THROW(process("perfdhcp -s 12 -l ethx -T file.x -T file.x -T file.x"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Wrapped) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -B -w start -i -l ethx"); + EXPECT_EQ("start", opt.getWrapped()); + + // Negative test cases + // Missing command after -w, expected start/stop + EXPECT_THROW(process("perfdhcp -B -i -l ethx -w"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Diagnostics) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -l ethx -i -x asTe"); + EXPECT_EQ("asTe", opt.getDiags()); + + // Negative test cases + // No diagnostics string specified + EXPECT_THROW(process("perfdhcp -l ethx -i -x"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Aggressivity) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -a 10 -l 192.168.0.1"); + EXPECT_EQ(10, opt.getAggressivity()); + + // Negative test cases + // Aggressivity must be non negative integer + EXPECT_THROW(process("perfdhcp -l ethx -a 0"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -l ethx -a"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -a -2 -l ethx -a 3"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, MaxDrop) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -D 25 -l ethx -r 10"); + EXPECT_EQ(25, opt.getMaxDrop()[0]); + process("perfdhcp -D 25 -l ethx -D 15 -r 10"); + EXPECT_EQ(25, opt.getMaxDrop()[0]); + EXPECT_EQ(15, opt.getMaxDrop()[1]); + + process("perfdhcp -D 15% -l ethx -r 10"); + EXPECT_EQ(15, opt.getMaxDropPercentage()[0]); + process("perfdhcp -D 15% -D25% -l ethx -r 10"); + EXPECT_EQ(15, opt.getMaxDropPercentage()[0]); + EXPECT_EQ(25, opt.getMaxDropPercentage()[1]); + process("perfdhcp -D 1% -D 99% -l ethx -r 10"); + EXPECT_EQ(1, opt.getMaxDropPercentage()[0]); + EXPECT_EQ(99, opt.getMaxDropPercentage()[1]); + + // Negative test cases + // Too many -D options + EXPECT_THROW(process("perfdhcp -D 0% -D 1 -l ethx -r20 -D 3"), isc::InvalidParameter); + // Too many -D options + EXPECT_THROW(process("perfdhcp -D 99% -D 13% -l ethx -r20 -D 10%"), isc::InvalidParameter); + // Percentage is out of bounds + EXPECT_THROW(process("perfdhcp -D101% -D 13% -l ethx -r20"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -D0% -D 13% -l ethx -r20"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, NumRequest) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -n 1000 -r 10 -l ethx"); + EXPECT_EQ(1000, opt.getNumRequests()[0]); + process("perfdhcp -n 5 -r 10 -n 500 -l ethx"); + EXPECT_EQ(5, opt.getNumRequests()[0]); + EXPECT_EQ(500, opt.getNumRequests()[1]); + + // Negative test cases + // Too many -n parameters, expected maximum 2 + EXPECT_THROW(process("perfdhcp -n 1 -n 2 -l ethx -n3 -r 20"), isc::InvalidParameter); + // Num request must be positive integer + EXPECT_THROW(process("perfdhcp -n 1 -n -22 -l ethx -r 10"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -n 0 -l ethx -r 10"), isc::InvalidParameter); +} + +TEST_F(CommandOptionsTest, Period) { + CommandOptions& opt = CommandOptions::instance(); + process("perfdhcp -p 120 -l ethx -r 100"); + EXPECT_EQ(120, opt.getPeriod()); + + // Negative test cases + // Test period must be positive integer + EXPECT_THROW(process("perfdhcp -p 0 -l ethx -r 50"), isc::InvalidParameter); + EXPECT_THROW(process("perfdhcp -p -3 -l ethx -r 50"), isc::InvalidParameter); +} diff --git a/tests/tools/perfdhcp/tests/localized_option_unittest.cc b/tests/tools/perfdhcp/tests/localized_option_unittest.cc new file mode 100644 index 0000000000..e51560eee3 --- /dev/null +++ b/tests/tools/perfdhcp/tests/localized_option_unittest.cc @@ -0,0 +1,48 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include + +#include +#include + +#include "../localized_option.h" + +using namespace std; +using namespace isc; +using namespace isc::dhcp; +using namespace isc::perfdhcp; + +namespace { + +TEST(LocalizedOptionTest, Constructor) { + OptionBuffer opt_buf; + // Create option with default offset. + boost::scoped_ptr opt1(new LocalizedOption(Option::V6, + D6O_CLIENTID, + opt_buf)); + EXPECT_EQ(Option::V6, opt1->getUniverse()); + EXPECT_EQ(D6O_CLIENTID, opt1->getType()); + EXPECT_EQ(0, opt1->getOffset()); + + // Create option with non-default offset. + boost::scoped_ptr opt2(new LocalizedOption(Option::V6, + D6O_CLIENTID, + opt_buf, + 40)); + EXPECT_EQ(40, opt2->getOffset()); +} + +} diff --git a/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc b/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc new file mode 100644 index 0000000000..3863faa111 --- /dev/null +++ b/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc @@ -0,0 +1,384 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../localized_option.h" +#include "../perf_pkt4.h" + +using namespace std; +using namespace isc; +using namespace isc::asiolink; +using namespace isc::dhcp; +using namespace isc::perfdhcp; + +typedef PerfPkt4::LocalizedOptionPtr LocalizedOptionPtr; + +namespace { + +// A dummy MAC address, padded with 0s +const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; + +// Let's use some creative test content here (128 chars + \0) +const uint8_t dummyFile[] = "Lorem ipsum dolor sit amet, consectetur " + "adipiscing elit. Proin mollis placerat metus, at " + "lacinia orci ornare vitae. Mauris amet."; + +// Yet another type of test content (64 chars + \0) +const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur " + "adipiscing elit posuere."; + +class PerfPkt4Test : public ::testing::Test { +public: + PerfPkt4Test() { + } + + /// \brief Returns buffer with sample DHCPDISCOVER message. + /// + /// This method creates buffer containing on-wire data of + /// DHCPDICOSVER message. This buffer is used by tests below + /// to create DHCPv4 test packets. + /// + /// \return vector containing on-wire data + std::vector& capture() { + + // That is only part of the header. It contains all "short" fields, + // larger fields are constructed separately. + uint8_t hdr[] = { + 1, 6, 6, 13, // op, htype, hlen, hops, + 0x12, 0x34, 0x56, 0x78, // transaction-id + 0, 42, 0x80, 0x00, // 42 secs, BROADCAST flags + 192, 0, 2, 1, // ciaddr + 1, 2, 3, 4, // yiaddr + 192, 0, 2, 255, // siaddr + 255, 255, 255, 255, // giaddr + }; + + uint8_t v4Opts[] = { + DHO_HOST_NAME, 3, 0, 1, 2, // Host name option. + DHO_BOOT_SIZE, 3, 10, 11, 12, // Boot file size option + DHO_MERIT_DUMP, 3, 20, 21, 22, // Merit dump file + DHO_DHCP_MESSAGE_TYPE, 1, 1, // DHCP message type. + 128, 3, 30, 31, 32, + 254, 3, 40, 41, 42, + }; + + // Initialize the vector with the header fields defined above. + static std::vector buf(hdr, hdr + sizeof(hdr)); + + // If this is a first call to this function. Initialize + // remaining data. + if (buf.size() == sizeof(hdr)) { + + // Append the large header fields. + std::copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN, + back_inserter(buf)); + std::copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN, + back_inserter(buf)); + std::copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN, + back_inserter(buf)); + + // Append magic cookie. + buf.push_back(0x63); + buf.push_back(0x82); + buf.push_back(0x53); + buf.push_back(0x63); + + // Append options. + std::copy(v4Opts, v4Opts + sizeof(v4Opts), back_inserter(buf)); + } + return buf; + } +}; + +TEST_F(PerfPkt4Test, Constructor) { + // Initialize some dummy payload. + uint8_t data[250]; + for (int i = 0; i < 250; ++i) { + data[i] = i; + } + + // Test constructor to be used for incoming messages. + // Use default (1) offset value and don't specify transaction id. + const size_t offset_transid[] = { 1, 10 }; + boost::scoped_ptr pkt1(new PerfPkt4(data, + sizeof(data), + offset_transid[0])); + EXPECT_EQ(1, pkt1->getTransidOffset()); + + // Test constructor to be used for outgoing messages. + // Use non-zero offset and specify transaction id. + const uint32_t transid = 0x010203; + boost::scoped_ptr pkt2(new PerfPkt4(data, sizeof(data), + offset_transid[1], + transid)); + EXPECT_EQ(transid, pkt2->getTransid()); + EXPECT_EQ(offset_transid[1], pkt2->getTransidOffset()); + + // Test default constructor. Transaction id offset is expected to be 1. + boost::scoped_ptr pkt3(new PerfPkt4(data, sizeof(data))); + EXPECT_EQ(1, pkt3->getTransidOffset()); +} + +TEST_F(PerfPkt4Test, RawPack) { + // Create new packet. + std::vector buf = capture(); + boost::scoped_ptr pkt(new PerfPkt4(&buf[0], buf.size())); + + // Initialize options data. + uint8_t buf_hostname[] = { DHO_HOST_NAME, 3, 4, 5, 6 }; + uint8_t buf_boot_filesize[] = { DHO_BOOT_SIZE, 3, 1, 2, 3 }; + OptionBuffer vec_hostname(buf_hostname + 2, + buf_hostname + sizeof(buf_hostname)); + OptionBuffer vec_boot_filesize(buf_boot_filesize + 2, + buf_boot_filesize + sizeof(buf_hostname)); + + // Create options objects. + const size_t offset_hostname = 240; + LocalizedOptionPtr pkt_hostname(new LocalizedOption(Option::V4, + DHO_HOST_NAME, + vec_hostname, + offset_hostname)); + const size_t offset_boot_filesize = 245; + LocalizedOptionPtr pkt_boot_filesize(new LocalizedOption(Option::V4, + DHO_BOOT_SIZE, + vec_boot_filesize, + offset_boot_filesize)); + + // Try to add options to packet. + ASSERT_NO_THROW(pkt->addOption(pkt_boot_filesize)); + ASSERT_NO_THROW(pkt->addOption(pkt_hostname)); + + // We have valid options addedwith valid offsets so + // pack operation should succeed. + ASSERT_TRUE(pkt->rawPack()); + + // Buffer should now contain new values of DHO_HOST_NAME and + // DHO_BOOT_SIZE options. + util::OutputBuffer pkt_output = pkt->getBuffer(); + ASSERT_EQ(buf.size(), pkt_output.getLength()); + const uint8_t* out_buf_data = + static_cast(pkt_output.getData()); + + // Check if options we read from buffer is valid. + EXPECT_EQ(0, memcmp(buf_hostname, + out_buf_data + offset_hostname, + sizeof(buf_hostname))); + EXPECT_EQ(0, memcmp(buf_boot_filesize, + out_buf_data + offset_boot_filesize, + sizeof(buf_boot_filesize))); +} + +TEST_F(PerfPkt4Test, RawUnpack) { + // Create new packet. + std::vector buf = capture(); + boost::scoped_ptr pkt(new PerfPkt4(&buf[0], buf.size())); + + // Create options (existing in the packet) and specify their offsets. + const size_t offset_merit = 250; + LocalizedOptionPtr opt_merit(new LocalizedOption(Option::V4, + DHO_MERIT_DUMP, + OptionBuffer(), + offset_merit)); + + const size_t offset_msg_type = 255; + LocalizedOptionPtr opt_msg_type(new LocalizedOption(Option::V4, + DHO_DHCP_MESSAGE_TYPE, + OptionBuffer(), + offset_msg_type)); + // Addition should be successful + ASSERT_NO_THROW(pkt->addOption(opt_merit)); + ASSERT_NO_THROW(pkt->addOption(opt_msg_type)); + + // Option fit to packet boundaries and offsets are valid, + // so this should unpack successfully. + ASSERT_TRUE(pkt->rawUnpack()); + + // At this point we should have updated options data (read from buffer). + // Let's try to retrieve them. + opt_merit = boost::dynamic_pointer_cast + (pkt->getOption(DHO_MERIT_DUMP)); + opt_msg_type = boost::dynamic_pointer_cast + (pkt->getOption(DHO_DHCP_MESSAGE_TYPE)); + ASSERT_TRUE(opt_merit); + ASSERT_TRUE(opt_msg_type); + + // Get first option payload. + OptionBuffer opt_merit_data = opt_merit->getData(); + + // Define reference data. + uint8_t buf_merit[] = { 20, 21, 22 }; + + // Validate first option data. + ASSERT_EQ(sizeof(buf_merit), opt_merit_data.size()); + EXPECT_TRUE(std::equal(opt_merit_data.begin(), + opt_merit_data.end(), + buf_merit)); + + // Get second option payload. + OptionBuffer opt_msg_type_data = opt_msg_type->getData(); + + // Expect one byte of message type payload. + ASSERT_EQ(1, opt_msg_type_data.size()); + EXPECT_EQ(1, opt_msg_type_data[0]); +} + +TEST_F(PerfPkt4Test, InvalidOptions) { + // Create new packet. + std::vector buf = capture(); + boost::scoped_ptr pkt1(new PerfPkt4(&buf[0], buf.size())); + + // Create option with invalid offset. + // This option is at offset 250 (not 251). + const size_t offset_merit = 251; + LocalizedOptionPtr opt_merit(new LocalizedOption(Option::V4, + DHO_MERIT_DUMP, + OptionBuffer(), + offset_merit)); + ASSERT_NO_THROW(pkt1->addOption(opt_merit)); + + cout << "Testing unpack of invalid options. " + << "This may produce spurious errors." << endl; + + // Unpack is expected to fail because it is supposed to read + // option type from buffer and match it with DHO_MERIT_DUMP. + // It will not match because option is shifted by on byte. + ASSERT_FALSE(pkt1->rawUnpack()); + + // Create another packet. + boost::scoped_ptr pkt2(new PerfPkt4(&buf[0], buf.size())); + + // Create DHO_DHCP_MESSAGE_TYPE option that has the wrong offset. + // With this offset, option goes beyond packet size (268). + const size_t offset_msg_type = 266; + LocalizedOptionPtr opt_msg_type(new LocalizedOption(Option::V4, + DHO_DHCP_MESSAGE_TYPE, + OptionBuffer(1, 2), + offset_msg_type)); + // Adding option is expected to be successful because no + // offset validation takes place at this point. + ASSERT_NO_THROW(pkt2->addOption(opt_msg_type)); + + // This is expected to fail because option is out of bounds. + ASSERT_FALSE(pkt2->rawPack()); +} + +TEST_F(PerfPkt4Test, TruncatedPacket) { + // Get the whole packet and truncate it to 249 bytes. + std::vector buf = capture(); + buf.resize(249); + boost::scoped_ptr pkt(new PerfPkt4(&buf[0], buf.size())); + + // Option DHO_BOOT_SIZE is now truncated because whole packet + // is truncated. This option ends at 249 while last index of + // truncated packet is now 248. + const size_t offset_boot_filesize = 245; + LocalizedOptionPtr opt_boot_filesize(new LocalizedOption(Option::V4, + DHO_BOOT_SIZE, + OptionBuffer(3, 1), + offset_boot_filesize)); + ASSERT_NO_THROW(pkt->addOption(opt_boot_filesize)); + + cout << "Testing pack and unpack of options in truncated " + << "packet. This may produce spurious errors." << endl; + + // Both pack and unpack are expected to fail because + // added option is out of bounds. + EXPECT_FALSE(pkt->rawUnpack()); + EXPECT_FALSE(pkt->rawPack()); +} + +TEST_F(PerfPkt4Test, PackTransactionId) { + // Create dummy packet that consists of zeros. + std::vector buf(268, 0); + + const size_t offset_transid[] = { 10, 265 }; + const uint32_t transid = 0x0102; + // Initialize transaction id 0x00000102 at offset 10. + boost::scoped_ptr pkt1(new PerfPkt4(&buf[0], buf.size(), + offset_transid[0], + transid)); + + // Pack will inject transaction id at offset 10 into the + // packet buffer. + ASSERT_TRUE(pkt1->rawPack()); + + // Get packet's output buffer and make sure it has valid size. + util::OutputBuffer out_buf = pkt1->getBuffer(); + ASSERT_EQ(buf.size(), out_buf.getLength()); + const uint8_t *out_buf_data = + static_cast(out_buf.getData()); + + // Initialize reference data for transaction id. + const uint8_t ref_data[] = { 0, 0, 1, 2 }; + + // Expect that reference transaction id matches what we have + // read from buffer. + EXPECT_EQ(0, memcmp(ref_data, out_buf_data + offset_transid[0], 4)); + + cout << "Testing pack with invalid transaction id offset. " + << "This may produce spurious errors" << endl; + + // Create packet with invalid transaction id offset. + // Packet length is 268, transaction id is 4 bytes long so last byte of + // transaction id is out of bounds. + boost::scoped_ptr pkt2(new PerfPkt4(&buf[0], buf.size(), + offset_transid[1], + transid)); + EXPECT_FALSE(pkt2->rawPack()); +} + +TEST_F(PerfPkt4Test, UnpackTransactionId) { + // Initialize packet data, lebgth 268, zeros only. + std::vector in_data(268, 0); + + // Assume that transaction id is at offset 100. + // Fill 4 bytes at offset 100 with dummy transaction id. + for (int i = 100; i < 104; ++i) { + in_data[i] = i - 99; + } + + // Create packet from initialized buffer. + const size_t offset_transid[] = { 100, 270 }; + boost::scoped_ptr pkt1(new PerfPkt4(&in_data[0], + in_data.size(), + offset_transid[0])); + ASSERT_TRUE(pkt1->rawUnpack()); + + // Get unpacked transaction id and compare with reference. + EXPECT_EQ(0x01020304, pkt1->getTransid()); + + // Create packet with transaction id at invalid offset. + boost::scoped_ptr pkt2(new PerfPkt4(&in_data[0], + in_data.size(), + offset_transid[1])); + + cout << "Testing unpack of transaction id at invalid offset. " + << "This may produce spurious errors." << endl; + + // Unpack is supposed to fail because transaction id is at + // out of bounds offset. + EXPECT_FALSE(pkt2->rawUnpack()); +} + +} diff --git a/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc b/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc new file mode 100644 index 0000000000..de134ccbbb --- /dev/null +++ b/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc @@ -0,0 +1,327 @@ +// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../localized_option.h" +#include "../perf_pkt6.h" + +using namespace std; +using namespace isc; +using namespace isc::dhcp; +using namespace isc::perfdhcp; + +typedef PerfPkt6::LocalizedOptionPtr LocalizedOptionPtr; + +namespace { + +class PerfPkt6Test : public ::testing::Test { +public: + PerfPkt6Test() { + } + + /// \brief Returns captured SOLICIT packet. + /// + /// Captured SOLICIT packet with transid=0x3d79fb and options: client-id, + /// in_na, dns-server, elapsed-time, option-request + /// This code was autogenerated + /// (see src/bin/dhcp6/tests/iface_mgr_unittest.c), + /// but we spent some time to make is less ugly than it used to be. + /// + /// \return pointer to Pkt6 that represents received SOLICIT + PerfPkt6* capture() { + uint8_t data[98]; + data[0] = 1; + data[1] = 1; data[2] = 2; data[3] = 3; data[4] = 0; + data[5] = 1; data[6] = 0; data[7] = 14; data[8] = 0; + data[9] = 1; data[10] = 0; data[11] = 1; data[12] = 21; + data[13] = 158; data[14] = 60; data[15] = 22; data[16] = 0; + data[17] = 30; data[18] = 140; data[19] = 155; data[20] = 115; + data[21] = 73; data[22] = 0; data[23] = 3; data[24] = 0; + data[25] = 40; data[26] = 0; data[27] = 0; data[28] = 0; + data[29] = 1; data[30] = 255; data[31] = 255; data[32] = 255; + data[33] = 255; data[34] = 255; data[35] = 255; data[36] = 255; + data[37] = 255; data[38] = 0; data[39] = 5; data[40] = 0; + data[41] = 24; data[42] = 32; data[43] = 1; data[44] = 13; + data[45] = 184; data[46] = 0; data[47] = 1; data[48] = 0; + data[49] = 0; data[50] = 0; data[51] = 0; data[52] = 0; + data[53] = 0; data[54] = 0; data[55] = 0; data[56] = 18; + data[57] = 52; data[58] = 255; data[59] = 255; data[60] = 255; + data[61] = 255; data[62] = 255; data[63] = 255; data[64] = 255; + data[65] = 255; data[66] = 0; data[67] = 23; data[68] = 0; + data[69] = 16; data[70] = 32; data[71] = 1; data[72] = 13; + data[73] = 184; data[74] = 0; data[75] = 1; data[76] = 0; + data[77] = 0; data[78] = 0; data[79] = 0; data[80] = 0; + data[81] = 0; data[82] = 0; data[83] = 0; data[84] = 221; + data[85] = 221; data[86] = 0; data[87] = 8; data[88] = 0; + data[89] = 2; data[90] = 0; data[91] = 100; data[92] = 0; + data[93] = 6; data[94] = 0; data[95] = 2; data[96] = 0; + data[97] = 23; + + PerfPkt6* pkt = new PerfPkt6(data, sizeof(data)); + + return (pkt); + } + + /// \brief Returns truncated SOLICIT packet. + /// + /// Returns truncated SOLICIT packet which will be used for + /// negative tests: e.g. pack options out of packet. + /// + /// \return pointer to Pkt6 that represents truncated SOLICIT + PerfPkt6* captureTruncated() { + uint8_t data[17]; + data[0] = 1; + data[1] = 1; data[2] = 2; data[3] = 3; data[4] = 0; + data[5] = 1; data[6] = 0; data[7] = 14; data[8] = 0; + data[9] = 1; data[10] = 0; data[11] = 1; data[12] = 21; + data[13] = 158; data[14] = 60; data[15] = 22; data[16] = 0; + + PerfPkt6* pkt = new PerfPkt6(data, sizeof(data)); + + return (pkt); + } + + +}; + +TEST_F(PerfPkt6Test, Constructor) { + // Data to be used to create packet. + uint8_t data[] = { 0, 1, 2, 3, 4, 5 }; + + // Test constructor to be used for incoming messages. + // Use default (1) offset value and don't specify transaction id. + boost::scoped_ptr pkt1(new PerfPkt6(data, sizeof(data))); + EXPECT_EQ(sizeof(data), pkt1->getData().size()); + EXPECT_EQ(0, memcmp(&pkt1->getData()[0], data, sizeof(data))); + EXPECT_EQ(1, pkt1->getTransidOffset()); + + // Test constructor to be used for outgoing messages. + // Use non-zero offset and specify transaction id. + const size_t offset_transid = 10; + const uint32_t transid = 0x010203; + boost::scoped_ptr pkt2(new PerfPkt6(data, sizeof(data), + offset_transid, transid)); + EXPECT_EQ(sizeof(data), pkt2->getData().size()); + EXPECT_EQ(0, memcmp(&pkt2->getData()[0], data, sizeof(data))); + EXPECT_EQ(0x010203, pkt2->getTransid()); + EXPECT_EQ(10, pkt2->getTransidOffset()); +} + +TEST_F(PerfPkt6Test, RawPackUnpack) { + // Create first packet. + boost::scoped_ptr pkt1(capture()); + + // Create some input buffers to initialize options. + uint8_t buf_elapsed_time[] = { 1, 1 }; + uint8_t buf_duid[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; + + // Create options. + const size_t offset_elapsed_time = 86; + OptionBuffer vec_elapsed_time(buf_elapsed_time, + buf_elapsed_time + sizeof(buf_elapsed_time)); + LocalizedOptionPtr pkt1_elapsed_time(new LocalizedOption(Option::V6, + D6O_ELAPSED_TIME, + vec_elapsed_time, + offset_elapsed_time)); + const size_t offset_duid = 4; + OptionBuffer vec_duid(buf_duid, buf_duid + sizeof(buf_duid)); + LocalizedOptionPtr pkt1_duid(new LocalizedOption(Option::V6, + D6O_CLIENTID, + vec_duid, + offset_duid)); + + // Add option to packet and create on-wire format from added options. + // Contents of options will override contents of packet buffer. + ASSERT_NO_THROW(pkt1->addOption(pkt1_elapsed_time)); + ASSERT_NO_THROW(pkt1->addOption(pkt1_duid)); + ASSERT_TRUE(pkt1->rawPack()); + + // Reset so as we can reuse them for another packet. + vec_elapsed_time.clear(); + vec_duid.clear(); + + // Get output buffer from packet 1 to create new packet + // that will be later validated. + util::OutputBuffer pkt1_output = pkt1->getBuffer(); + ASSERT_EQ(pkt1_output.getLength(), pkt1->getData().size()); + const uint8_t* pkt1_output_data = static_cast + (pkt1_output.getData()); + boost::scoped_ptr pkt2(new PerfPkt6(pkt1_output_data, + pkt1_output.getLength())); + + // Create objects specifying options offset in a packet. + // Offsets will inform pkt2 object where to read data from. + LocalizedOptionPtr pkt2_elapsed_time(new LocalizedOption(Option::V6, + D6O_ELAPSED_TIME, + vec_elapsed_time, + offset_elapsed_time)); + LocalizedOptionPtr pkt2_duid(new LocalizedOption(Option::V6, + D6O_CLIENTID, + vec_duid, + offset_duid)); + // Add options to packet to pass their offsets. + pkt2->addOption(pkt2_elapsed_time); + pkt2->addOption(pkt2_duid); + + // Unpack: get relevant parts of buffer data into option objects. + ASSERT_TRUE(pkt2->rawUnpack()); + + // Once option data is stored in options objects we pull it out. + pkt2_elapsed_time = boost::dynamic_pointer_cast + (pkt2->getOption(D6O_ELAPSED_TIME)); + pkt2_duid = boost::dynamic_pointer_cast + (pkt2->getOption(D6O_CLIENTID)); + + // Check if options are present. They have to be there since + // we have added them ourselfs. + ASSERT_TRUE(pkt2_elapsed_time); + ASSERT_TRUE(pkt2_duid); + + // Expecting option contents be the same as original. + OptionBuffer pkt2_elapsed_time_data = pkt2_elapsed_time->getData(); + OptionBuffer pkt2_duid_data = pkt2_duid->getData(); + EXPECT_EQ(0x0101, pkt2_elapsed_time->getUint16()); + EXPECT_TRUE(std::equal(pkt2_duid_data.begin(), + pkt2_duid_data.end(), + buf_duid)); +} + +TEST_F(PerfPkt6Test, InvalidOptions) { + // Create packet. + boost::scoped_ptr pkt1(capture()); + OptionBuffer vec_server_id; + vec_server_id.resize(10); + // Testing invalid offset of the option (greater than packet size) + const size_t offset_serverid[] = { 150, 85 }; + LocalizedOptionPtr pkt1_serverid(new LocalizedOption(Option::V6, + D6O_SERVERID, + vec_server_id, + offset_serverid[0])); + pkt1->addOption(pkt1_serverid); + // Pack has to fail due to invalid offset. + EXPECT_FALSE(pkt1->rawPack()); + + // Create packet. + boost::scoped_ptr pkt2(capture()); + // Testing offset of the option (lower than pakcet size but + // tail of the option out of bounds). + LocalizedOptionPtr pkt2_serverid(new LocalizedOption(Option::V6, + D6O_SERVERID, + vec_server_id, + offset_serverid[1])); + pkt2->addOption(pkt2_serverid); + // Pack must fail due to invalid offset. + EXPECT_FALSE(pkt2->rawPack()); +} + + +TEST_F(PerfPkt6Test, TruncatedPacket) { + cout << "Testing parsing options from truncated packet." + << "This may produce spurious errors" << endl; + + // Create truncated (in the middle of duid options) + boost::scoped_ptr pkt1(captureTruncated()); + OptionBuffer vec_duid; + vec_duid.resize(30); + const size_t offset_duid = 4; + LocalizedOptionPtr pkt1_duid(new LocalizedOption(Option::V6, + D6O_CLIENTID, + vec_duid, + offset_duid)); + pkt1->addOption(pkt1_duid); + // Pack/unpack must fail because length of the option read from buffer + // will extend over the actual packet length. + EXPECT_FALSE(pkt1->rawUnpack()); + EXPECT_FALSE(pkt1->rawPack()); +} + +TEST_F(PerfPkt6Test, PackTransactionId) { + uint8_t data[100]; + memset(&data, 0, sizeof(data)); + + const size_t offset_transid[] = { 50, 100 }; + const uint32_t transid = 0x010203; + + // Create dummy packet that is simply filled with zeros. + boost::scoped_ptr pkt1(new PerfPkt6(data, + sizeof(data), + offset_transid[0], + transid)); + + // Reference data are non zero so we can detect them in dummy packet. + uint8_t ref_data[3] = { 1, 2, 3 }; + + // This will store given transaction id in the packet data at + // offset of 50. + ASSERT_TRUE(pkt1->rawPack()); + + // Get the output buffer so we can validate it. + util::OutputBuffer out_buf = pkt1->getBuffer(); + ASSERT_EQ(sizeof(data), out_buf.getLength()); + const uint8_t *out_buf_data = static_cast + (out_buf.getData()); + + // Validate transaction id. + EXPECT_EQ(0, memcmp(out_buf_data + offset_transid[0], ref_data, 3)); + + + // Out of bounds transaction id offset. + boost::scoped_ptr pkt2(new PerfPkt6(data, + sizeof(data), + offset_transid[1], + transid)); + cout << "Testing out of bounds offset. " + "This may produce spurious errors ..." << endl; + EXPECT_FALSE(pkt2->rawPack()); +} + +TEST_F(PerfPkt6Test, UnpackTransactionId) { + // Initialize data for dummy packet (zeros only). + uint8_t data[100] = { 0 }; + + // Generate transaction id = 0x010203 and inject at offset = 50. + for (int i = 50; i < 53; ++i) { + data[i] = i - 49; + } + // Create packet and point out that transaction id is at offset 50. + const size_t offset_transid[] = { 50, 300 }; + boost::scoped_ptr pkt1(new PerfPkt6(data, + sizeof(data), + offset_transid[0])); + + // Get transaction id out of buffer and store in class member. + ASSERT_TRUE(pkt1->rawUnpack()); + // Test value of transaction id. + EXPECT_EQ(0x010203, pkt1->getTransid()); + + // Out of bounds transaction id offset. + boost::scoped_ptr pkt2(new PerfPkt6(data, + sizeof(data), + offset_transid[1])); + cout << "Testing out of bounds offset. " + "This may produce spurious errors ..." << endl; + EXPECT_FALSE(pkt2->rawUnpack()); + +} + +} diff --git a/tests/tools/perfdhcp/tests/run_unittests.cc b/tests/tools/perfdhcp/tests/run_unittests.cc new file mode 100644 index 0000000000..6eeca758ad --- /dev/null +++ b/tests/tools/perfdhcp/tests/run_unittests.cc @@ -0,0 +1,25 @@ +// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC") +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THIS SOFTWARE. + +#include + +#include +#include + +int +main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); + + return (isc::util::unittests::run_all()); +}