mirror of
https://gitlab.isc.org/isc-projects/kea
synced 2025-08-30 05:27:55 +00:00
[trac710] Merge branch 'master' into trac710
Conflicts: src/bin/cfgmgr/plugins/Makefile.am src/lib/python/isc/log/Makefile.am
This commit is contained in:
commit
dae1d2e24f
@ -887,12 +887,6 @@ AC_OUTPUT([doc/version.ent
|
||||
src/bin/zonemgr/run_b10-zonemgr.sh
|
||||
src/bin/stats/stats.py
|
||||
src/bin/stats/stats_httpd.py
|
||||
src/bin/stats/stats.spec
|
||||
src/bin/stats/stats-schema.spec
|
||||
src/bin/stats/stats-httpd.spec
|
||||
src/bin/stats/stats-httpd-xml.tpl
|
||||
src/bin/stats/stats-httpd-xsd.tpl
|
||||
src/bin/stats/stats-httpd-xsl.tpl
|
||||
src/bin/bind10/bind10.py
|
||||
src/bin/bind10/run_bind10.sh
|
||||
src/bin/bind10/tests/bind10_test.py
|
||||
@ -914,8 +908,8 @@ AC_OUTPUT([doc/version.ent
|
||||
src/lib/config/tests/data_def_unittests_config.h
|
||||
src/lib/python/isc/config/tests/config_test
|
||||
src/lib/python/isc/cc/tests/cc_test
|
||||
src/lib/python/isc/log/tests/log_test
|
||||
src/lib/python/isc/notify/tests/notify_out_test
|
||||
src/lib/python/isc/log/tests/log_console.py
|
||||
src/lib/dns/gen-rdatacode.py
|
||||
src/lib/python/bind10_config.py
|
||||
src/lib/dns/tests/testdata/gen-wiredata.py
|
||||
@ -957,6 +951,7 @@ AC_OUTPUT([doc/version.ent
|
||||
chmod +x src/lib/log/tests/destination_test.sh
|
||||
chmod +x src/lib/log/tests/severity_test.sh
|
||||
chmod +x src/lib/util/python/mkpywrapper.py
|
||||
chmod +x src/lib/python/isc/log/tests/log_console.py
|
||||
chmod +x tests/system/conf.sh
|
||||
])
|
||||
AC_OUTPUT
|
||||
|
@ -4,6 +4,7 @@ EXTRA_DIST += logging.spec b10logging.py
|
||||
|
||||
config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
|
||||
config_plugin_DATA = tsig_keys.py tsig_keys.spec
|
||||
config_plugin_DATA += b10logging.py logging.spec
|
||||
|
||||
CLEANDIRS = __pycache__
|
||||
|
||||
|
@ -118,7 +118,7 @@ of the body of the message failed due to some non-protocol related reason
|
||||
(although the parsing of the header succeeded). The message parameters give
|
||||
a textual description of the problem and the RCODE returned.
|
||||
|
||||
% PRINTMSG print message command, aeguments are: %1
|
||||
% PRINTMSG print message command, arguments are: %1
|
||||
This message is logged when a "print_message" command is received over the
|
||||
command channel.
|
||||
|
||||
@ -169,7 +169,7 @@ resolver gives up trying to resolve a query. Retry count: the number of times
|
||||
the resolver will retry a query to an upstream server if it gets a timeout.
|
||||
|
||||
The client and lookup timeouts require a bit more explanation. The
|
||||
resolution of the clent query might require a large number of queries to
|
||||
resolution of the client query might require a large number of queries to
|
||||
upstream nameservers. Even if none of these queries timeout, the total time
|
||||
taken to perform all the queries may exceed the client timeout. When this
|
||||
happens, a SERVFAIL is returned to the client, but the resolver continues
|
||||
|
@ -20,5 +20,5 @@ endif
|
||||
echo Running test: $$pytest ; \
|
||||
env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
|
||||
$(LIBRARY_PATH_PLACEHOLDER) \
|
||||
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
|
||||
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
|
||||
done
|
||||
|
@ -116,8 +116,8 @@ class TestXfroutSession(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
|
||||
self.log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
|
||||
self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), self.log, TSIGKeyRing())
|
||||
#self.log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
|
||||
self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), TSIGKeyRing())
|
||||
self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
|
||||
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
|
||||
|
||||
@ -520,7 +520,7 @@ class MyUnixSockServer(UnixSockServer):
|
||||
self._shutdown_event = threading.Event()
|
||||
self._max_transfers_out = 10
|
||||
self._cc = MyCCSession()
|
||||
self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
|
||||
#self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
|
||||
|
||||
class TestUnixSockServer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
@ -26,7 +26,7 @@ from isc.datasrc import sqlite3_ds
|
||||
from socketserver import *
|
||||
import os
|
||||
from isc.config.ccsession import *
|
||||
from isc.log.log import *
|
||||
#from isc.log.log import *
|
||||
from isc.cc import SessionError, SessionTimeout
|
||||
from isc.notify import notify_out
|
||||
import isc.util.process
|
||||
@ -88,13 +88,13 @@ def get_rrset_len(rrset):
|
||||
|
||||
|
||||
class XfroutSession():
|
||||
def __init__(self, sock_fd, request_data, server, log, tsig_key_ring):
|
||||
def __init__(self, sock_fd, request_data, server, tsig_key_ring):
|
||||
# The initializer for the superclass may call functions
|
||||
# that need _log to be set, so we set it first
|
||||
self._sock_fd = sock_fd
|
||||
self._request_data = request_data
|
||||
self._server = server
|
||||
self._log = log
|
||||
#self._log = log
|
||||
self._tsig_key_ring = tsig_key_ring
|
||||
self._tsig_ctx = None
|
||||
self._tsig_len = 0
|
||||
@ -110,7 +110,8 @@ class XfroutSession():
|
||||
self.dns_xfrout_start(self._sock_fd, self._request_data)
|
||||
#TODO, avoid catching all exceptions
|
||||
except Exception as e:
|
||||
self._log.log_message("error", str(e))
|
||||
#self._log.log_message("error", str(e))
|
||||
pass
|
||||
|
||||
os.close(self._sock_fd)
|
||||
|
||||
@ -137,7 +138,7 @@ class XfroutSession():
|
||||
rcode = self._check_request_tsig(msg, mdata)
|
||||
|
||||
except Exception as err:
|
||||
self._log.log_message("error", str(err))
|
||||
#self._log.log_message("error", str(err))
|
||||
return Rcode.FORMERR(), None
|
||||
|
||||
return rcode, msg
|
||||
@ -244,16 +245,17 @@ class XfroutSession():
|
||||
zone_name = self._get_query_zone_name(msg)
|
||||
rcode_ = self._check_xfrout_available(zone_name)
|
||||
if rcode_ != Rcode.NOERROR():
|
||||
self._log.log_message("info", "transfer of '%s/IN' failed: %s",
|
||||
zone_name, rcode_.to_text())
|
||||
#self._log.log_message("info", "transfer of '%s/IN' failed: %s",
|
||||
# zone_name, rcode_.to_text())
|
||||
return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
|
||||
|
||||
try:
|
||||
self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
|
||||
#self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
|
||||
self._reply_xfrout_query(msg, sock_fd, zone_name)
|
||||
self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
|
||||
#self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
|
||||
except Exception as err:
|
||||
self._log.log_message("error", str(err))
|
||||
#self._log.log_message("error", str(err))
|
||||
pass
|
||||
|
||||
self._server.decrease_transfers_counter()
|
||||
return
|
||||
@ -317,7 +319,7 @@ class XfroutSession():
|
||||
|
||||
for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
|
||||
if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
|
||||
self._log.log_message("info", "xfrout process is being shutdown")
|
||||
#self._log.log_message("info", "xfrout process is being shutdown")
|
||||
return
|
||||
# TODO: RRType.SOA() ?
|
||||
if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
|
||||
@ -357,7 +359,7 @@ class XfroutSession():
|
||||
class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
'''The unix domain socket server which accept xfr query sent from auth server.'''
|
||||
|
||||
def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc, log):
|
||||
def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
|
||||
self._remove_unused_sock_file(sock_file)
|
||||
self._sock_file = sock_file
|
||||
socketserver_mixin.NoPollMixIn.__init__(self)
|
||||
@ -366,7 +368,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
self._transfers_counter = 0
|
||||
self._shutdown_event = shutdown_event
|
||||
self._write_sock, self._read_sock = socket.socketpair()
|
||||
self._log = log
|
||||
#self._log = log
|
||||
self.update_config_data(config_data)
|
||||
self._cc = cc
|
||||
|
||||
@ -394,7 +396,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
try:
|
||||
request, client_address = self.get_request()
|
||||
except socket.error:
|
||||
self._log.log_message("error", "Failed to fetch request")
|
||||
#self._log.log_message("error", "Failed to fetch request")
|
||||
return
|
||||
|
||||
# Check self._shutdown_event to ensure the real shutdown comes.
|
||||
@ -408,7 +410,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
(rlist, wlist, xlist) = ([], [], [])
|
||||
continue
|
||||
else:
|
||||
self._log.log_message("error", "Error with select(): %s" %e)
|
||||
#self._log.log_message("error", "Error with select(): %s" %e)
|
||||
break
|
||||
|
||||
# self.server._shutdown_event will be set by now, if it is not a false
|
||||
@ -419,8 +421,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
try:
|
||||
self.process_request(request)
|
||||
except:
|
||||
self._log.log_message("error", "Exception happened during processing of %s"
|
||||
% str(client_address))
|
||||
#self._log.log_message("error", "Exception happened during processing of %s"
|
||||
# % str(client_address))
|
||||
break
|
||||
|
||||
def _handle_request_noblock(self):
|
||||
@ -438,8 +440,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
# This may happen when one xfrout process try to connect to
|
||||
# xfrout unix socket server, to check whether there is another
|
||||
# xfrout running.
|
||||
if sock_fd == FD_COMM_ERROR:
|
||||
self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
|
||||
#if sock_fd == FD_COMM_ERROR:
|
||||
#self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
|
||||
return
|
||||
|
||||
# receive request msg
|
||||
@ -456,7 +458,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
|
||||
def finish_request(self, sock_fd, request_data):
|
||||
'''Finish one request by instantiating RequestHandlerClass.'''
|
||||
self.RequestHandlerClass(sock_fd, request_data, self, self._log, self.tsig_key_ring)
|
||||
self.RequestHandlerClass(sock_fd, request_data, self, self.tsig_key_ring)
|
||||
|
||||
def _remove_unused_sock_file(self, sock_file):
|
||||
'''Try to remove the socket file. If the file is being used
|
||||
@ -464,8 +466,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
If it's not a socket file or nobody is listening
|
||||
, it will be removed. If it can't be removed, exit from python. '''
|
||||
if self._sock_file_in_use(sock_file):
|
||||
self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
|
||||
" is being used by another xfrout process\n" % sock_file)
|
||||
#self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
|
||||
# " is being used by another xfrout process\n" % sock_file)
|
||||
sys.exit(0)
|
||||
else:
|
||||
if not os.path.exists(sock_file):
|
||||
@ -474,7 +476,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
try:
|
||||
os.unlink(sock_file)
|
||||
except OSError as err:
|
||||
self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
|
||||
#self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
|
||||
sys.exit(0)
|
||||
|
||||
def _sock_file_in_use(self, sock_file):
|
||||
@ -495,17 +497,18 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
try:
|
||||
os.unlink(self._sock_file)
|
||||
except Exception as e:
|
||||
self._log.log_message('error', str(e))
|
||||
#self._log.log_message('error', str(e))
|
||||
pass
|
||||
|
||||
def update_config_data(self, new_config):
|
||||
'''Apply the new config setting of xfrout module. '''
|
||||
self._log.log_message('info', 'update config data start.')
|
||||
#self._log.log_message('info', 'update config data start.')
|
||||
self._lock.acquire()
|
||||
self._max_transfers_out = new_config.get('transfers_out')
|
||||
self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
|
||||
self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
|
||||
#self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
|
||||
self._lock.release()
|
||||
self._log.log_message('info', 'update config data complete.')
|
||||
#self._log.log_message('info', 'update config data complete.')
|
||||
|
||||
def set_tsig_key_ring(self, key_list):
|
||||
"""Set the tsig_key_ring , given a TSIG key string list representation. """
|
||||
@ -521,7 +524,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
self.tsig_key_ring.add(TSIGKey(key_item))
|
||||
except InvalidParameter as ipe:
|
||||
errmsg = "bad TSIG key string: " + str(key_item)
|
||||
self._log.log_message('error', '%s' % errmsg)
|
||||
#self._log.log_message('error', '%s' % errmsg)
|
||||
|
||||
def get_db_file(self):
|
||||
file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
|
||||
@ -553,16 +556,16 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
|
||||
class XfroutServer:
|
||||
def __init__(self):
|
||||
self._unix_socket_server = None
|
||||
self._log = None
|
||||
#self._log = None
|
||||
self._listen_sock_file = UNIX_SOCKET_FILE
|
||||
self._shutdown_event = threading.Event()
|
||||
self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
|
||||
self._config_data = self._cc.get_full_config()
|
||||
self._cc.start()
|
||||
self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
|
||||
self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
|
||||
self._config_data.get('log_severity'), self._config_data.get('log_versions'),
|
||||
self._config_data.get('log_max_bytes'), True)
|
||||
#self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
|
||||
# self._config_data.get('log_severity'), self._config_data.get('log_versions'),
|
||||
# self._config_data.get('log_max_bytes'), True)
|
||||
self._start_xfr_query_listener()
|
||||
self._start_notifier()
|
||||
|
||||
@ -570,13 +573,13 @@ class XfroutServer:
|
||||
'''Start a new thread to accept xfr query. '''
|
||||
self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
|
||||
self._shutdown_event, self._config_data,
|
||||
self._cc, self._log);
|
||||
self._cc)
|
||||
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
|
||||
listener.start()
|
||||
|
||||
def _start_notifier(self):
|
||||
datasrc = self._unix_socket_server.get_db_file()
|
||||
self._notifier = notify_out.NotifyOut(datasrc, self._log)
|
||||
self._notifier = notify_out.NotifyOut(datasrc)
|
||||
self._notifier.dispatcher()
|
||||
|
||||
def send_notify(self, zone_name, zone_class):
|
||||
@ -591,8 +594,8 @@ class XfroutServer:
|
||||
continue
|
||||
self._config_data[key] = new_config[key]
|
||||
|
||||
if self._log:
|
||||
self._log.update_config(new_config)
|
||||
#if self._log:
|
||||
# self._log.update_config(new_config)
|
||||
|
||||
if self._unix_socket_server:
|
||||
self._unix_socket_server.update_config_data(self._config_data)
|
||||
@ -621,7 +624,7 @@ class XfroutServer:
|
||||
|
||||
def command_handler(self, cmd, args):
|
||||
if cmd == "shutdown":
|
||||
self._log.log_message("info", "Received shutdown command.")
|
||||
#self._log.log_message("info", "Received shutdown command.")
|
||||
self.shutdown()
|
||||
answer = create_answer(0)
|
||||
|
||||
@ -629,8 +632,8 @@ class XfroutServer:
|
||||
zone_name = args.get('zone_name')
|
||||
zone_class = args.get('zone_class')
|
||||
if zone_name and zone_class:
|
||||
self._log.log_message("info", "zone '%s/%s': receive notify others command" \
|
||||
% (zone_name, zone_class))
|
||||
#self._log.log_message("info", "zone '%s/%s': receive notify others command" \
|
||||
# % (zone_name, zone_class))
|
||||
self.send_notify(zone_name, zone_class)
|
||||
answer = create_answer(0)
|
||||
else:
|
||||
|
@ -1,4 +1,5 @@
|
||||
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
|
||||
AM_CPPFLAGS += $(BOOST_INCLUDES)
|
||||
|
||||
TESTS =
|
||||
if HAVE_GTEST
|
||||
|
@ -266,12 +266,12 @@ writePythonFile(const string& file, MessageDictionary& dictionary) {
|
||||
"# File created from " << message_file.fullName() << " on " <<
|
||||
currentTime() << "\n" <<
|
||||
"\n" <<
|
||||
"import isc.log.message\n" <<
|
||||
"import isc.log\n" <<
|
||||
"\n";
|
||||
|
||||
vector<string> idents(sortedIdentifiers(dictionary));
|
||||
BOOST_FOREACH(const string& ident, idents) {
|
||||
pyfile << ident << " = isc.log.message.create(\"" <<
|
||||
pyfile << ident << " = isc.log.create_message(\"" <<
|
||||
ident << "\", \"" << quoteString(dictionary.getText(ident)) <<
|
||||
"\")\n";
|
||||
}
|
||||
|
@ -2,4 +2,3 @@ import isc.datasrc
|
||||
import isc.cc
|
||||
import isc.config
|
||||
#import isc.dns
|
||||
import isc.log
|
||||
|
@ -1,8 +1,24 @@
|
||||
SUBDIRS = . tests
|
||||
|
||||
python_PYTHON = __init__.py log.py
|
||||
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
|
||||
AM_CPPFLAGS += $(BOOST_INCLUDES)
|
||||
AM_CXXFLAGS = $(B10_CXXFLAGS)
|
||||
|
||||
pythondir = $(pyexecdir)/isc/log
|
||||
pythondir = $(pyexecdir)/isc
|
||||
python_LTLIBRARIES = log.la
|
||||
log_la_SOURCES = log.cc
|
||||
|
||||
log_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
|
||||
# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
|
||||
# placed after -Wextra defined in AM_CXXFLAGS
|
||||
log_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
|
||||
log_la_LDFLAGS = $(PYTHON_LDFLAGS)
|
||||
log_la_LDFLAGS += -module
|
||||
log_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
|
||||
log_la_LIBADD += $(PYTHON_LIB)
|
||||
|
||||
# This is not installed, it helps locate the module during tests
|
||||
EXTRA_DIST = __init__.py
|
||||
|
||||
pytest:
|
||||
$(SHELL) tests/log_test
|
||||
|
@ -1 +1,29 @@
|
||||
from isc.log.log import *
|
||||
# Copyright (C) 2011 Internet Systems Consortium.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
|
||||
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
|
||||
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
|
||||
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
# This file is not installed. The log.so is installed into the right place.
|
||||
# It is only to find it in the .libs directory when we run as a test or
|
||||
# from the build directory.
|
||||
# But as nobody gives us the builddir explicitly (and we can't use generation
|
||||
# from .in file, as it would put us into the builddir and we wouldn't be found)
|
||||
# we guess from current directory. Any idea for something better? This should
|
||||
# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
|
||||
# Should we look there? Or define something in bind10_config?
|
||||
|
||||
import os
|
||||
cwd = os.getcwd()
|
||||
pos = cwd.rfind('/src/')
|
||||
import sys; sys.path.insert(0, cwd[:pos] + '/src/lib/python/isc/log/.libs')
|
||||
from log import *
|
||||
|
629
src/lib/python/isc/log/log.cc
Normal file
629
src/lib/python/isc/log/log.cc
Normal file
@ -0,0 +1,629 @@
|
||||
// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
||||
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
#define PY_SSIZE_T_CLEAN
|
||||
#include <Python.h>
|
||||
#include <structmember.h>
|
||||
|
||||
#include <config.h>
|
||||
|
||||
#include <log/message_dictionary.h>
|
||||
#include <log/logger_manager.h>
|
||||
#include <log/logger.h>
|
||||
|
||||
#include <string>
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
using namespace isc::log;
|
||||
using std::string;
|
||||
using boost::bind;
|
||||
|
||||
namespace {
|
||||
|
||||
// This is for testing only. The real module will have it always set as
|
||||
// NULL and will use the global dictionary.
|
||||
MessageDictionary* testDictionary = NULL;
|
||||
|
||||
PyObject*
|
||||
setTestDictionary(PyObject*, PyObject* args) {
|
||||
PyObject* enableO;
|
||||
// The API doesn't seem to provide conversion to bool,
|
||||
// so we do it little bit manually
|
||||
if (!PyArg_ParseTuple(args, "O", &enableO)) {
|
||||
return (NULL);
|
||||
}
|
||||
int enableI(PyObject_IsTrue(enableO));
|
||||
if (enableI == -1) {
|
||||
return (NULL);
|
||||
}
|
||||
bool enable(enableI != 0);
|
||||
|
||||
try {
|
||||
delete testDictionary;
|
||||
testDictionary = NULL;
|
||||
if (enable) {
|
||||
testDictionary = new MessageDictionary;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyObject*
|
||||
createMessage(PyObject*, PyObject* args) {
|
||||
const char* mid;
|
||||
const char* text;
|
||||
// We parse the strings
|
||||
if (!PyArg_ParseTuple(args, "ss", &mid, &text)) {
|
||||
return (NULL);
|
||||
}
|
||||
PyObject* origMid;
|
||||
// And extract the original representation of the message
|
||||
// ID, so we can return it instead of creating another instance.
|
||||
// This call shouldn't fail if the previous suceeded.
|
||||
if (!PyArg_ParseTuple(args, "Os", &origMid, &text)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
try {
|
||||
MessageDictionary* dict = testDictionary ? testDictionary :
|
||||
&MessageDictionary::globalDictionary();
|
||||
|
||||
// We ignore the result, they will be in some kind of dupe list
|
||||
// if there's a problem
|
||||
dict->add(mid, text);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
// Return the ID
|
||||
Py_INCREF(origMid);
|
||||
return (origMid);
|
||||
}
|
||||
|
||||
PyObject*
|
||||
getMessage(PyObject*, PyObject* args) {
|
||||
const char* mid;
|
||||
if (!PyArg_ParseTuple(args, "s", &mid)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
try {
|
||||
MessageDictionary* dict = testDictionary ? testDictionary :
|
||||
&MessageDictionary::globalDictionary();
|
||||
|
||||
const std::string& result(dict->getText(mid));
|
||||
if (result.empty()) {
|
||||
Py_RETURN_NONE;
|
||||
} else {
|
||||
return (Py_BuildValue("s", result.c_str()));
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PyObject*
|
||||
reset(PyObject*, PyObject*) {
|
||||
LoggerManager::reset();
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyObject*
|
||||
init(PyObject*, PyObject* args) {
|
||||
const char* root;
|
||||
const char* file(NULL);
|
||||
const char* severity("INFO");
|
||||
int dbglevel(0);
|
||||
if (!PyArg_ParseTuple(args, "s|siz", &root, &severity, &dbglevel, &file)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
try {
|
||||
LoggerManager::init(root, getSeverity(severity), dbglevel, file);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyMethodDef methods[] = {
|
||||
{"set_test_dictionary", setTestDictionary, METH_VARARGS,
|
||||
"Set or unset testing mode for message dictionary. In testing, "
|
||||
"the create_message and get_message functions work on different "
|
||||
"than the logger-global dictionary, not polluting it."},
|
||||
{"create_message", createMessage, METH_VARARGS,
|
||||
"Creates a new message in the dictionary. You shouldn't need to "
|
||||
"call this directly, it should be called by the generated message "
|
||||
"file. Returns the identifier to be used in logging. The text "
|
||||
"shouldn't be empty."},
|
||||
{"get_message", getMessage, METH_VARARGS,
|
||||
"Get a message. This function is for testing purposes and you don't "
|
||||
"need to call it. It returns None if the message does not exist."},
|
||||
{"reset", reset, METH_NOARGS,
|
||||
"Reset all logging. For testing purposes only, do not use."},
|
||||
{"init", init, METH_VARARGS,
|
||||
"Run-time initialization. You need to call this before you do any "
|
||||
"logging, to configure the root logger name. You may also provide "
|
||||
"logging severity (one of 'DEBUG', 'INFO', 'WARN', 'ERROR' or "
|
||||
"'FATAL'), a debug level (integer in the range 0-99) and a file name "
|
||||
"of a dictionary with message text translations."},
|
||||
{NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
class LoggerWrapper : public PyObject {
|
||||
// Everything is public here, as it is accessible only inside this .cc file.
|
||||
public:
|
||||
Logger *logger_;
|
||||
};
|
||||
|
||||
extern PyTypeObject logger_type;
|
||||
|
||||
int
|
||||
Logger_init(LoggerWrapper* self, PyObject* args) {
|
||||
const char* name;
|
||||
if (!PyArg_ParseTuple(args, "s", &name)) {
|
||||
return (-1);
|
||||
}
|
||||
try {
|
||||
self->logger_ = new Logger(name);
|
||||
return (0);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (-1);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Logger_destroy(LoggerWrapper* const self) {
|
||||
delete self->logger_;
|
||||
self->logger_ = NULL;
|
||||
Py_TYPE(self)->tp_free(self);
|
||||
}
|
||||
|
||||
// The isc::log doesn't contain function to convert this way
|
||||
const char*
|
||||
severityToText(const Severity& severity) {
|
||||
switch (severity) {
|
||||
case DEFAULT:
|
||||
return ("DEFAULT");
|
||||
case DEBUG:
|
||||
return ("DEBUG");
|
||||
case INFO:
|
||||
return ("INFO");
|
||||
case WARN:
|
||||
return ("WARN");
|
||||
case ERROR:
|
||||
return ("ERROR");
|
||||
case FATAL:
|
||||
return ("FATAL");
|
||||
default:
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
|
||||
try {
|
||||
return (Py_BuildValue("s",
|
||||
severityToText(
|
||||
self->logger_->getEffectiveSeverity())));
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
|
||||
try {
|
||||
return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
|
||||
const char* severity;
|
||||
int dbgLevel = 0;
|
||||
if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
|
||||
return (NULL);
|
||||
}
|
||||
try {
|
||||
self->logger_->setSeverity((severity == NULL) ? DEFAULT :
|
||||
getSeverity(severity), dbgLevel);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
template<class FPtr> // Who should remember the pointer-to-method syntax
|
||||
PyObject*
|
||||
Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
|
||||
try {
|
||||
if ((self->logger_->*function)()) {
|
||||
Py_RETURN_TRUE;
|
||||
} else {
|
||||
Py_RETURN_FALSE;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
|
||||
return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
|
||||
return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
|
||||
return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
|
||||
return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
|
||||
int level = MIN_DEBUG_LEVEL;
|
||||
if (!PyArg_ParseTuple(args, "|i", &level)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
try {
|
||||
if (self->logger_->isDebugEnabled(level)) {
|
||||
Py_RETURN_TRUE;
|
||||
} else {
|
||||
Py_RETURN_FALSE;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// To propagate python exceptions trough our code
|
||||
class InternalError {};
|
||||
|
||||
string
|
||||
objectToStr(PyObject* object, bool convert) {
|
||||
PyObject* cleanup(NULL);
|
||||
if (convert) {
|
||||
object = cleanup = PyObject_Str(object);
|
||||
if (object == NULL) {
|
||||
throw InternalError();
|
||||
}
|
||||
}
|
||||
const char* value;
|
||||
PyObject* tuple(Py_BuildValue("(O)", object));
|
||||
if (tuple == NULL) {
|
||||
if (cleanup != NULL) {
|
||||
Py_DECREF(cleanup);
|
||||
}
|
||||
throw InternalError();
|
||||
}
|
||||
|
||||
if (!PyArg_ParseTuple(tuple, "s", &value)) {
|
||||
Py_DECREF(tuple);
|
||||
if (cleanup != NULL) {
|
||||
Py_DECREF(cleanup);
|
||||
}
|
||||
throw InternalError();
|
||||
}
|
||||
string result(value);
|
||||
Py_DECREF(tuple);
|
||||
if (cleanup != NULL) {
|
||||
Py_DECREF(cleanup);
|
||||
}
|
||||
return (result);
|
||||
}
|
||||
|
||||
// Generic function to output the logging message. Called by the real functions.
|
||||
template<class Function>
|
||||
PyObject*
|
||||
Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
|
||||
try {
|
||||
Py_ssize_t number(PyObject_Length(args));
|
||||
if (number < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
// Which argument is the first to format?
|
||||
size_t start(1);
|
||||
if (dbgLevel) {
|
||||
start ++;
|
||||
}
|
||||
|
||||
if (number < start) {
|
||||
return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
|
||||
"logging call, at least %zu needed and %zd "
|
||||
"given", start, number));
|
||||
}
|
||||
|
||||
// Extract the fixed arguments
|
||||
PyObject *midO(PySequence_GetItem(args, start - 1));
|
||||
if (midO == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
string mid(objectToStr(midO, false));
|
||||
long dbg(0);
|
||||
if (dbgLevel) {
|
||||
PyObject *dbgO(PySequence_GetItem(args, 0));
|
||||
if (dbgO == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
dbg = PyLong_AsLong(dbgO);
|
||||
if (PyErr_Occurred()) {
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// We create the logging message right now. If we fail to convert a
|
||||
// parameter to string, at least the part that we already did will
|
||||
// be output
|
||||
Logger::Formatter formatter(function(dbg, mid.c_str()));
|
||||
|
||||
// Now process the rest of parameters, convert each to string and put
|
||||
// into the formatter. It will print itself in the end.
|
||||
for (size_t i(start); i < number; ++ i) {
|
||||
PyObject* param(PySequence_GetItem(args, i));
|
||||
if (param == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
formatter = formatter.arg(objectToStr(param, true));
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
catch (const InternalError&) {
|
||||
return (NULL);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return (NULL);
|
||||
}
|
||||
catch (...) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Now map the functions into the performOutput. I wish C++ could do
|
||||
// functional programming.
|
||||
PyObject*
|
||||
Logger_debug(LoggerWrapper* self, PyObject* args) {
|
||||
return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
|
||||
args, true));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_info(LoggerWrapper* self, PyObject* args) {
|
||||
return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
|
||||
args, false));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_warn(LoggerWrapper* self, PyObject* args) {
|
||||
return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
|
||||
args, false));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_error(LoggerWrapper* self, PyObject* args) {
|
||||
return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
|
||||
args, false));
|
||||
}
|
||||
|
||||
PyObject*
|
||||
Logger_fatal(LoggerWrapper* self, PyObject* args) {
|
||||
return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
|
||||
args, false));
|
||||
}
|
||||
|
||||
PyMethodDef loggerMethods[] = {
|
||||
{ "get_effective_severity",
|
||||
reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
|
||||
METH_NOARGS, "Returns the effective logging severity as string" },
|
||||
{ "get_effective_debug_level",
|
||||
reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
|
||||
METH_NOARGS, "Returns the current debug level." },
|
||||
{ "set_severity",
|
||||
reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
|
||||
"Sets the severity of a logger. The parameters are severity as a "
|
||||
"string and, optionally, a debug level (integer in range 0-99). "
|
||||
"The severity may be NULL, in which case an inherited value is taken."
|
||||
},
|
||||
{ "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
|
||||
METH_VARARGS, "Returns if the logger would log debug message now. "
|
||||
"You can provide a desired debug level." },
|
||||
{ "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
|
||||
METH_NOARGS, "Returns if the logger would log info message now." },
|
||||
{ "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
|
||||
METH_NOARGS, "Returns if the logger would log warn message now." },
|
||||
{ "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
|
||||
METH_NOARGS, "Returns if the logger would log error message now." },
|
||||
{ "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
|
||||
METH_NOARGS, "Returns if the logger would log fatal message now." },
|
||||
{ "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
|
||||
"Logs a debug-severity message. It takes the debug level, message ID "
|
||||
"and any number of stringifiable arguments to the message." },
|
||||
{ "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
|
||||
"Logs a info-severity message. It taskes the message ID and any "
|
||||
"number of stringifiable arguments to the message." },
|
||||
{ "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
|
||||
"Logs a warn-severity message. It taskes the message ID and any "
|
||||
"number of stringifiable arguments to the message." },
|
||||
{ "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
|
||||
"Logs a error-severity message. It taskes the message ID and any "
|
||||
"number of stringifiable arguments to the message." },
|
||||
{ "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
|
||||
"Logs a fatal-severity message. It taskes the message ID and any "
|
||||
"number of stringifiable arguments to the message." },
|
||||
{ NULL, NULL, 0, NULL }
|
||||
};
|
||||
|
||||
PyTypeObject logger_type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
"isc.log.Logger",
|
||||
sizeof(LoggerWrapper), // tp_basicsize
|
||||
0, // tp_itemsize
|
||||
reinterpret_cast<destructor>(Logger_destroy), // tp_dealloc
|
||||
NULL, // tp_print
|
||||
NULL, // tp_getattr
|
||||
NULL, // tp_setattr
|
||||
NULL, // tp_reserved
|
||||
NULL, // tp_repr
|
||||
NULL, // tp_as_number
|
||||
NULL, // tp_as_sequence
|
||||
NULL, // tp_as_mapping
|
||||
NULL, // tp_hash
|
||||
NULL, // tp_call
|
||||
NULL, // tp_str
|
||||
NULL, // tp_getattro
|
||||
NULL, // tp_setattro
|
||||
NULL, // tp_as_buffer
|
||||
Py_TPFLAGS_DEFAULT, // tp_flags
|
||||
"Wrapper around the C++ isc::log::Logger class."
|
||||
"It is not complete, but everything important should be here.",
|
||||
NULL, // tp_traverse
|
||||
NULL, // tp_clear
|
||||
NULL, // tp_richcompare
|
||||
0, // tp_weaklistoffset
|
||||
NULL, // tp_iter
|
||||
NULL, // tp_iternext
|
||||
loggerMethods, // tp_methods
|
||||
NULL, // tp_members
|
||||
NULL, // tp_getset
|
||||
NULL, // tp_base
|
||||
NULL, // tp_dict
|
||||
NULL, // tp_descr_get
|
||||
NULL, // tp_descr_set
|
||||
0, // tp_dictoffset
|
||||
reinterpret_cast<initproc>(Logger_init), // tp_init
|
||||
NULL, // tp_alloc
|
||||
PyType_GenericNew, // tp_new
|
||||
NULL, // tp_free
|
||||
NULL, // tp_is_gc
|
||||
NULL, // tp_bases
|
||||
NULL, // tp_mro
|
||||
NULL, // tp_cache
|
||||
NULL, // tp_subclasses
|
||||
NULL, // tp_weaklist
|
||||
NULL, // tp_del
|
||||
0 // tp_version_tag
|
||||
};
|
||||
|
||||
PyModuleDef iscLog = {
|
||||
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
|
||||
"log",
|
||||
"Python bindings for the classes in the isc::log namespace.\n\n"
|
||||
"These bindings are close match to the C++ API, but they are not complete "
|
||||
"(some parts are not needed) and some are done in more python-like ways.",
|
||||
-1,
|
||||
methods,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
PyMODINIT_FUNC
|
||||
PyInit_log(void) {
|
||||
PyObject* mod = PyModule_Create(&iscLog);
|
||||
if (mod == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (PyType_Ready(&logger_type) < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (PyModule_AddObject(mod, "Logger",
|
||||
static_cast<PyObject*>(static_cast<void*>(
|
||||
&logger_type))) < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
Py_INCREF(&logger_type);
|
||||
|
||||
return (mod);
|
||||
}
|
@ -1,280 +0,0 @@
|
||||
# Copyright (C) 2010 Internet Systems Consortium.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
|
||||
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
|
||||
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
|
||||
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""This module is to convert python logging module over
|
||||
to log4python.
|
||||
Copyright (C) 2010 Internet Systems Consortium.
|
||||
To use, simply 'import isc.log.log' and log away!
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import syslog
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
"""LEVELS: logging levels mapping
|
||||
"""
|
||||
LEVELS = {'debug' : logging.DEBUG,
|
||||
'info' : logging.INFO,
|
||||
'warning' : logging.WARNING,
|
||||
'error' : logging.ERROR,
|
||||
'critical' : logging.CRITICAL}
|
||||
|
||||
FORMATTER = logging.Formatter("%(name)s: %(levelname)s: %(message)s")
|
||||
TIME_FORMATTER = logging.Formatter("%(asctime)s.%(msecs)03d %(name)s: %(levelname)s: %(message)s",
|
||||
"%d-%b-%Y %H:%M:%S")
|
||||
|
||||
def log_err(err_type, err_msg):
|
||||
sys.stderr.write(err_type + ": " + "%s.\n" % str(err_msg)[str(err_msg).find(']')+1:])
|
||||
|
||||
|
||||
class NSFileLogHandler(logging.handlers.RotatingFileHandler):
|
||||
"""RotatingFileHandler: replace RotatingFileHandler with a custom handler"""
|
||||
|
||||
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
|
||||
abs_file_name = self._get_abs_file_path(filename)
|
||||
"""Create log directory beforehand, because the underlying logging framework won't
|
||||
create non-exsiting log directory on writing logs.
|
||||
"""
|
||||
if not (os.path.exists(os.path.dirname(abs_file_name))):
|
||||
os.makedirs(os.path.dirname(abs_file_name))
|
||||
super(NSFileLogHandler, self).__init__(abs_file_name, mode, maxBytes,
|
||||
backupCount, encoding, delay)
|
||||
|
||||
def handleError(self, record):
|
||||
"""Overwrite handleError to provide more user-friendly error messages"""
|
||||
if logging.raiseExceptions:
|
||||
ei = sys.exc_info()
|
||||
if (ei[1]):
|
||||
sys.stderr.write("[b10-logging] : " + str(ei[1]))
|
||||
|
||||
def _get_abs_file_path(self, file_name):
|
||||
""" Get absolute file path"""
|
||||
# For a bare filename, log_dir will be set the current directory.
|
||||
if not os.path.dirname(file_name):
|
||||
abs_file_dir = os.getcwd()
|
||||
else:
|
||||
abs_file_dir = os.path.abspath(os.path.dirname(file_name))
|
||||
abs_file_name = os.path.join(abs_file_dir, os.path.basename(file_name))
|
||||
return abs_file_name
|
||||
|
||||
def shouldRollover(self, record):
|
||||
"""Rewrite RotatingFileHandler.shouldRollover.
|
||||
|
||||
If the log file is deleted at runtime, a new file will be created.
|
||||
"""
|
||||
dfn = self.baseFilename
|
||||
if (self.stream) and (not os.path.exists(dfn)): #Does log file exist?
|
||||
self.stream = None
|
||||
""" Log directory may be deleted while bind10 running or updated with a
|
||||
non-existing directory. Need to create log directory beforehand, because
|
||||
the underlying logging framework won't create non-exsiting log directory
|
||||
on writing logs.
|
||||
"""
|
||||
if not (os.path.exists(os.path.dirname(dfn))): #Does log subdirectory exist?
|
||||
os.makedirs(os.path.dirname(dfn))
|
||||
self.stream = self._open()
|
||||
return super(NSFileLogHandler, self).shouldRollover(record)
|
||||
|
||||
def update_config(self, file_name, backup_count, max_bytes):
|
||||
"""Update RotatingFileHandler configuration.
|
||||
Changes will be picked up in the next call to shouldRollover().
|
||||
|
||||
input:
|
||||
log file name
|
||||
max backup count
|
||||
predetermined log file size
|
||||
"""
|
||||
abs_file_name = self._get_abs_file_path(file_name)
|
||||
self.baseFilename = abs_file_name
|
||||
self.maxBytes = max_bytes
|
||||
self.backupCount = backup_count
|
||||
|
||||
|
||||
class NSSysLogHandler(logging.Handler):
|
||||
"""Replace SysLogHandler with a custom handler
|
||||
|
||||
A handler class which sends formatted logging records to a syslog
|
||||
server.
|
||||
"""
|
||||
def __init__(self, ident, logopt=0, facility=syslog.LOG_USER):
|
||||
"""Initialize a handler.
|
||||
|
||||
If facility is not specified, LOG_USER is used.
|
||||
"""
|
||||
super(NSSysLogHandler, self).__init__()
|
||||
self._ident = ident
|
||||
self._logopt = logopt
|
||||
self._facility = facility
|
||||
self._mappings = {
|
||||
logging.DEBUG: syslog.LOG_DEBUG,
|
||||
logging.INFO: syslog.LOG_INFO,
|
||||
logging.WARNING: syslog.LOG_WARNING,
|
||||
logging.ERROR: syslog.LOG_ERR,
|
||||
logging.CRITICAL: syslog.LOG_CRIT,
|
||||
}
|
||||
|
||||
def _encodeLevel(self, level):
|
||||
"""Encoding the priority."""
|
||||
return self._mappings.get(level, syslog.LOG_INFO)
|
||||
|
||||
def emit(self, record):
|
||||
"""Emit a record.
|
||||
|
||||
The record is formatted, and then sent to the syslog server. If
|
||||
exception information is present, it is NOT sent to the server.
|
||||
"""
|
||||
syslog.openlog(self._ident, self._logopt, self._facility)
|
||||
msg = self.format(record)
|
||||
prio = self._encodeLevel(record.levelno)
|
||||
syslog.syslog(prio, msg)
|
||||
syslog.closelog()
|
||||
|
||||
|
||||
class NSLogger(logging.getLoggerClass()):
|
||||
"""Override logging.logger behaviour."""
|
||||
def __init__(self, log_name, log_file, severity='debug', versions=0,
|
||||
max_bytes=0, log_to_console=True):
|
||||
"""Initializes the logger with some specific parameters
|
||||
|
||||
If log_to_console is True, stream handler will be used;
|
||||
else syslog handler will be used.
|
||||
|
||||
To disable file handler, set log_file = ''.
|
||||
"""
|
||||
self._log_name = log_name
|
||||
self._log_file = log_file
|
||||
self._severity = severity
|
||||
self._versions = versions
|
||||
self._max_bytes = max_bytes
|
||||
|
||||
super(NSLogger, self).__init__(self._log_name)
|
||||
|
||||
# Set up a specific logger with our desired output level
|
||||
logLevel = LEVELS.get(self._severity, logging.NOTSET)
|
||||
self.setLevel(logLevel)
|
||||
|
||||
self._file_handler = None
|
||||
self._stream_handler = None
|
||||
self._syslog_handler = None
|
||||
|
||||
self._add_rotate_handler(self._log_file, self._versions, self._max_bytes)
|
||||
if log_to_console:
|
||||
self._add_stream_handler()
|
||||
else:
|
||||
self._add_syslog_handler()
|
||||
|
||||
def _add_rotate_handler(self, log_file, backup_count, max_bytes):
|
||||
"""Add a rotate file handler.
|
||||
|
||||
input:
|
||||
log_file : the location of log file. Handler will not be created
|
||||
if log_file=''
|
||||
max_bytes : limit log growth
|
||||
backup_count : max backup count
|
||||
"""
|
||||
if (log_file != 0 and log_file != ''):
|
||||
try:
|
||||
self._file_handler = NSFileLogHandler(filename = log_file,
|
||||
maxBytes = max_bytes, backupCount = backup_count)
|
||||
except (IOError, OSError) as e:
|
||||
self._file_handler = None
|
||||
log_err("[b10-logging] Add file handler fail", str(e))
|
||||
return
|
||||
self._file_handler.setFormatter(TIME_FORMATTER)
|
||||
self.addHandler(self._file_handler)
|
||||
|
||||
def _add_stream_handler(self):
|
||||
"""Add a stream handler.
|
||||
|
||||
sys.stderr will be used for logging output.
|
||||
"""
|
||||
self._stream_handler = logging.StreamHandler()
|
||||
self._stream_handler.setFormatter(TIME_FORMATTER)
|
||||
self.addHandler(self._stream_handler)
|
||||
|
||||
def _add_syslog_handler(self, nsfacility=syslog.LOG_USER):
|
||||
"""Add a syslog handler.
|
||||
|
||||
If facility is not specified, LOG_USER is used.
|
||||
The default severity level is INFO.
|
||||
"""
|
||||
self._syslog_handler = NSSysLogHandler('BIND10', facility = nsfacility)
|
||||
self._syslog_handler.setFormatter(FORMATTER)
|
||||
#set syslog handler severity level INFO
|
||||
self._syslog_handler.setLevel(logging.INFO)
|
||||
self.addHandler(self._syslog_handler)
|
||||
|
||||
def _update_rotate_handler(self, log_file, backup_count, max_bytes):
|
||||
"""If the rotate file handler has been added to the logger, update its
|
||||
configuration, or add it to the logger.
|
||||
"""
|
||||
if (self._file_handler in self.handlers):
|
||||
if (log_file != 0 and log_file != ''):
|
||||
self._file_handler.update_config(log_file, backup_count, max_bytes)
|
||||
else:
|
||||
"""If log file is empty, the handler will be removed."""
|
||||
self._file_handler.flush()
|
||||
self._file_handler.close()
|
||||
self.removeHandler(self._file_handler)
|
||||
else:
|
||||
self._add_rotate_handler(log_file, backup_count, max_bytes)
|
||||
|
||||
def _get_config(self, config_data):
|
||||
"""Get config data from module configuration"""
|
||||
|
||||
log_file_str = config_data.get('log_file')
|
||||
if (log_file_str):
|
||||
self._log_file = log_file_str
|
||||
|
||||
severity_str = config_data.get('log_severity')
|
||||
if (severity_str):
|
||||
self._severity = severity_str
|
||||
|
||||
versions_str = config_data.get('log_versions')
|
||||
if (versions_str):
|
||||
self._versions = int(versions_str)
|
||||
|
||||
max_bytes_str = config_data.get('log_max_bytes')
|
||||
if (max_bytes_str):
|
||||
self._max_bytes = int(max_bytes_str)
|
||||
|
||||
def update_config(self, config_data):
|
||||
"""Update logger's configuration.
|
||||
|
||||
We can update logger's log level and its rotate file handler's configuration.
|
||||
"""
|
||||
self._get_config(config_data)
|
||||
|
||||
logLevel = LEVELS.get(self._severity, logging.NOTSET)
|
||||
if (logLevel != self.getEffectiveLevel()):
|
||||
self.setLevel(logLevel)
|
||||
self._update_rotate_handler(self._log_file, self._versions, self._max_bytes)
|
||||
|
||||
def log_message(self, level, msg, *args, **kwargs):
|
||||
"""Log 'msg % args' with the integer severity 'level'.
|
||||
|
||||
To pass exception information, use the keyword argument exc_info with
|
||||
a true value, e.g.
|
||||
|
||||
logger.log_message('info', "We have a %s", "mysterious problem").
|
||||
"""
|
||||
logLevel = LEVELS.get(level, logging.NOTSET)
|
||||
try:
|
||||
self.log(logLevel, msg, *args, **kwargs)
|
||||
except (TypeError, KeyError) as e:
|
||||
sys.stderr.write("[b10-logging] Log message fail %s\n" % (str(e)))
|
||||
|
||||
|
@ -1,16 +1,27 @@
|
||||
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
|
||||
PYTESTS = log_test.py
|
||||
EXTRA_DIST = $(PYTESTS)
|
||||
EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
|
||||
|
||||
# If necessary (rare cases), explicitly specify paths to dynamic libraries
|
||||
# required by loadable python modules.
|
||||
LIBRARY_PATH_PLACEHOLDER =
|
||||
if SET_ENV_LIBRARY_PATH
|
||||
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
|
||||
endif
|
||||
|
||||
# test using command-line arguments, so use check-local target instead of TESTS
|
||||
check-local:
|
||||
$(LIBRARY_PATH_PLACEHOLDER) \
|
||||
env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
|
||||
$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
|
||||
if ENABLE_PYTHON_COVERAGE
|
||||
touch $(abs_top_srcdir)/.coverage
|
||||
touch $(abs_top_srcdir)/.coverage
|
||||
rm -f .coverage
|
||||
${LN_S} $(abs_top_srcdir)/.coverage .coverage
|
||||
endif
|
||||
for pytest in $(PYTESTS) ; do \
|
||||
echo Running test: $$pytest ; \
|
||||
env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
|
||||
$(LIBRARY_PATH_PLACEHOLDER) \
|
||||
env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
|
||||
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
|
||||
done
|
||||
|
3
src/lib/python/isc/log/tests/check_output.sh
Executable file
3
src/lib/python/isc/log/tests/check_output.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
"$1" 2>&1 | cut -d\ -f3- | diff - "$2" 1>&2
|
4
src/lib/python/isc/log/tests/console.out
Normal file
4
src/lib/python/isc/log/tests/console.out
Normal file
@ -0,0 +1,4 @@
|
||||
INFO [test.output] MSG_ID, Message with list [1, 2, 3, 4]
|
||||
WARN [test.output] DIFFERENT, Different message
|
||||
FATAL [test.output] MSG_ID, Message with 2 1
|
||||
DEBUG [test.output] MSG_ID, Message with 3 2
|
15
src/lib/python/isc/log/tests/log_console.py.in
Executable file
15
src/lib/python/isc/log/tests/log_console.py.in
Executable file
@ -0,0 +1,15 @@
|
||||
#!@PYTHON@
|
||||
|
||||
import isc.log
|
||||
# This would come from a dictionary in real life
|
||||
MSG_ID = isc.log.create_message("MSG_ID", "Message with %2 %1")
|
||||
DIFFERENT = isc.log.create_message("DIFFERENT", "Different message")
|
||||
isc.log.init("test")
|
||||
logger = isc.log.Logger("output")
|
||||
|
||||
logger.debug(20, MSG_ID, "test", "no output")
|
||||
logger.info(MSG_ID, [1, 2, 3, 4], "list")
|
||||
logger.warn(DIFFERENT)
|
||||
logger.fatal(MSG_ID, 1, 2)
|
||||
logger.set_severity("DEBUG", 99)
|
||||
logger.debug(1, MSG_ID, 2, 3)
|
@ -1,26 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
# Copyright (C) 2010 Internet Systems Consortium.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
|
||||
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
|
||||
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
|
||||
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
PYTHON_EXEC=${PYTHON_EXEC:-@PYTHON@}
|
||||
export PYTHON_EXEC
|
||||
|
||||
TEST_PATH=@abs_top_srcdir@/src/lib/python/isc/log/tests
|
||||
PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
|
||||
export PYTHONPATH
|
||||
|
||||
cd ${TEST_PATH}
|
||||
exec ${PYTHON_EXEC} -O log_test.py $*
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2010 Internet Systems Consortium.
|
||||
# Copyright (C) 2011 Internet Systems Consortium.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
@ -13,225 +13,121 @@
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
#
|
||||
# Tests for the python logging module
|
||||
#
|
||||
|
||||
from isc.log.log import *
|
||||
# This tests it can be loaded, nothing more yet
|
||||
import isc.log
|
||||
import unittest
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
|
||||
class TestRotateFileHandler(unittest.TestCase):
|
||||
|
||||
class LogDict(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.FILE_LOG1 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.FILE_LOG2 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.FILE_LOG3 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.handler = NSFileLogHandler(filename = self.FILE_LOG1.name,
|
||||
maxBytes = 1024,
|
||||
backupCount = 5)
|
||||
|
||||
def test_shouldRollover(self):
|
||||
if(os.path.exists(self.FILE_LOG1.name)):
|
||||
os.remove(self.FILE_LOG1.name)
|
||||
record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
|
||||
self.handler.shouldRollover(record)
|
||||
self.assertTrue(os.path.exists(self.FILE_LOG1.name))
|
||||
|
||||
def test_get_absolute_file_path(self):
|
||||
abs_file_name = self.handler._get_abs_file_path(self.FILE_LOG1.name)
|
||||
self.assertEqual(abs_file_name, self.FILE_LOG1.name)
|
||||
# test bare filename
|
||||
file_name1 = "bind10.py"
|
||||
abs_file_name = self.handler._get_abs_file_path(file_name1)
|
||||
self.assertEqual(abs_file_name, os.path.join(os.getcwd(), file_name1))
|
||||
# test relative path
|
||||
file_name2 = "./bind10.py"
|
||||
abs_file_name = self.handler._get_abs_file_path(file_name2)
|
||||
self.assertEqual(abs_file_name, os.path.join(os.getcwd(), os.path.basename(file_name2)))
|
||||
|
||||
def test_update_config(self):
|
||||
self.handler.update_config(self.FILE_LOG2.name, 3, 512)
|
||||
self.assertEqual(self.handler.baseFilename, self.FILE_LOG2.name)
|
||||
self.assertEqual(self.handler.maxBytes, 512)
|
||||
self.assertEqual(self.handler.backupCount, 3)
|
||||
|
||||
# check the existence of new log file.
|
||||
# emit() will call shouldRollover() to update the log file
|
||||
if(os.path.exists(self.FILE_LOG2.name)):
|
||||
os.remove(self.FILE_LOG2.name)
|
||||
record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
|
||||
self.handler.emit(record)
|
||||
self.assertTrue(os.path.exists(self.FILE_LOG2.name))
|
||||
|
||||
def test_handle_Error(self):
|
||||
if(os.path.exists(self.FILE_LOG3.name)):
|
||||
os.remove(self.FILE_LOG3.name)
|
||||
# redirect error message to file
|
||||
savederr = sys.stderr
|
||||
errfd = open(self.FILE_LOG3.name, 'w+')
|
||||
sys.stderr = errfd
|
||||
record = logging.LogRecord(None, None, "", 0, "record message", (), None, None)
|
||||
try:
|
||||
raise ValueError("ValueError")
|
||||
except ValueError:
|
||||
self.handler.handleError(record)
|
||||
|
||||
self.assertEqual("[b10-logging] : ValueError", errfd.read())
|
||||
sys.stderr = savederr
|
||||
errfd.close()
|
||||
|
||||
# We work on a test dictionary now.
|
||||
isc.log.set_test_dictionary(True)
|
||||
def tearDown(self):
|
||||
self.handler.flush()
|
||||
self.handler.close()
|
||||
self.FILE_LOG1.close()
|
||||
self.FILE_LOG2.close()
|
||||
self.FILE_LOG3.close()
|
||||
# Return to the global dictionary
|
||||
isc.log.set_test_dictionary(False)
|
||||
|
||||
class TestSysLogHandler(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.handler = NSSysLogHandler("BIND10")
|
||||
def test_load_msgs(self):
|
||||
# Try loading a message and see it's there, but nothing more
|
||||
self.assertEqual(isc.log.create_message("ID", "Text"), "ID")
|
||||
self.assertEqual(isc.log.get_message("ID"), "Text")
|
||||
self.assertEqual(isc.log.get_message("no-ID"), None)
|
||||
|
||||
def test_encodeLevel(self):
|
||||
sysLevel = self.handler._encodeLevel(logging.ERROR)
|
||||
self.assertEqual(sysLevel, syslog.LOG_ERR)
|
||||
|
||||
def test_emit(self):
|
||||
syslog_message = "bind10 syslog testing"
|
||||
record = logging.LogRecord(None, None, "", 0, syslog_message, (), None, None)
|
||||
self.handler.emit(record)
|
||||
|
||||
class TestLogging(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.FILE_STREAM_LOG1 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.FILE_STREAM_LOG2 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.FILE_STREAM_LOG3 = tempfile.NamedTemporaryFile(mode='w',
|
||||
prefix="b10",
|
||||
delete=True)
|
||||
self.file_stream_logger = NSLogger('File_Stream_Logger',
|
||||
self.FILE_STREAM_LOG1.name,
|
||||
'debug', 5, 1024, True)
|
||||
self.syslog_logger = NSLogger('SysLogger', '', 'info', 5, 1024, False)
|
||||
self.stderr_bak = sys.stderr
|
||||
sys.stderr = open(os.devnull, 'w')
|
||||
|
||||
def test_logging_init(self):
|
||||
self.assertNotEqual(self.file_stream_logger._file_handler, None)
|
||||
self.assertNotEqual(self.file_stream_logger._stream_handler, None)
|
||||
self.assertEqual(self.file_stream_logger._syslog_handler, None)
|
||||
|
||||
self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
|
||||
self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
|
||||
self.assertNotIn(self.file_stream_logger._syslog_handler, self.file_stream_logger.handlers)
|
||||
logLevel = LEVELS.get('debug', logging.NOTSET)
|
||||
self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
|
||||
|
||||
self.assertEqual(self.syslog_logger._file_handler, None)
|
||||
self.assertEqual(self.syslog_logger._stream_handler, None)
|
||||
self.assertNotEqual(self.syslog_logger._syslog_handler, None)
|
||||
self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
|
||||
self.assertNotIn(self.syslog_logger._stream_handler, self.syslog_logger.handlers)
|
||||
self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
|
||||
|
||||
logLevel = LEVELS.get('info', logging.NOTSET)
|
||||
self.assertEqual(self.syslog_logger.getEffectiveLevel(), logLevel)
|
||||
|
||||
def test_add_rotate_handler(self):
|
||||
if(self.syslog_logger._file_handler in self.syslog_logger.handlers):
|
||||
self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
|
||||
|
||||
self.syslog_logger._add_rotate_handler('', 5, 1024)
|
||||
self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
|
||||
|
||||
self.syslog_logger._add_rotate_handler(self.FILE_STREAM_LOG1.name, 5, 1024)
|
||||
self.assertIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
|
||||
|
||||
# test IOError exception
|
||||
self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
|
||||
log_file = self.FILE_STREAM_LOG1.name + '/logfile'
|
||||
self.syslog_logger._add_rotate_handler(log_file, 5, 1024)
|
||||
self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
|
||||
|
||||
def test_add_stream_handler(self):
|
||||
if(self.file_stream_logger._stream_handler in self.file_stream_logger.handlers):
|
||||
self.file_stream_logger.removeHandler(self.file_stream_logger._stream_handler)
|
||||
|
||||
self.file_stream_logger._add_stream_handler()
|
||||
self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
|
||||
|
||||
def test_add_syslog_handler(self):
|
||||
if(self.syslog_logger._syslog_handler in self.syslog_logger.handlers):
|
||||
self.syslog_logger.removeHandler(self.syslog_logger._syslog_handler)
|
||||
|
||||
self.syslog_logger._add_syslog_handler()
|
||||
self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
|
||||
|
||||
def test_update_rotate_handler(self):
|
||||
self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG2.name, 4, 1024)
|
||||
self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
|
||||
|
||||
self.file_stream_logger._update_rotate_handler('', 5, 1024)
|
||||
self.assertNotIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
|
||||
|
||||
self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG1.name, 4, 1024)
|
||||
self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
|
||||
|
||||
def test_get_config(self):
|
||||
config_data = {'log_file' : self.FILE_STREAM_LOG1.name,
|
||||
'log_severity' : 'critical',
|
||||
'log_versions' : 4,
|
||||
'log_max_bytes' : 1024}
|
||||
self.file_stream_logger._get_config(config_data)
|
||||
self.assertEqual(self.file_stream_logger._log_file, self.FILE_STREAM_LOG1.name)
|
||||
self.assertEqual(self.file_stream_logger._severity, 'critical')
|
||||
self.assertEqual(self.file_stream_logger._versions, 4)
|
||||
self.assertEqual(self.file_stream_logger._max_bytes, 1024)
|
||||
|
||||
|
||||
def test_update_config(self):
|
||||
update_config = {'log_file' : self.FILE_STREAM_LOG1.name,
|
||||
'log_severity' : 'error',
|
||||
'log_versions' : 4,
|
||||
'log_max_bytes' : 1024}
|
||||
self.file_stream_logger.update_config(update_config)
|
||||
logLevel = LEVELS.get('error', logging.NOTSET)
|
||||
self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
|
||||
|
||||
def test_log_message(self):
|
||||
update_config = {'log_file' : self.FILE_STREAM_LOG3.name,
|
||||
'log_severity' : 'critical',
|
||||
'log_versions' : 4,
|
||||
'log_max_bytes' : 1024}
|
||||
self.file_stream_logger.update_config(update_config)
|
||||
self.file_stream_logger.log_message('debug', 'debug message')
|
||||
self.file_stream_logger.log_message('warning', 'warning message')
|
||||
self.file_stream_logger.log_message('error', 'error message')
|
||||
#test non-exist log level
|
||||
self.assertRaises(None, self.file_stream_logger.log_message('not-exist', 'not exist message'))
|
||||
#test log_message KeyError exception
|
||||
self.assertRaises(None, self.file_stream_logger.log_message('critical', 'critical message', extra=['message', 'asctime']))
|
||||
self.assertTrue(os.path.exists(self.FILE_STREAM_LOG3.name))
|
||||
|
||||
class Manager(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
self.FILE_STREAM_LOG1.close()
|
||||
self.FILE_STREAM_LOG2.close()
|
||||
self.FILE_STREAM_LOG3.close()
|
||||
sys.stderr.flush();
|
||||
sys.stderr = self.stderr_bak
|
||||
isc.log.reset()
|
||||
|
||||
def test_init_debug(self):
|
||||
# We try calling it now only, as we don't have any other functions
|
||||
# to check the outcome by it. Once we add the logger class, we may
|
||||
# check more.
|
||||
isc.log.init("root", "DEBUG", 50, None)
|
||||
|
||||
def test_init_defaults(self):
|
||||
# We try calling it now only, as we don't have any other functions
|
||||
# to check the outcome by it. Once we add the logger class, we may
|
||||
# check more.
|
||||
isc.log.init("root")
|
||||
|
||||
def test_init_notfound(self):
|
||||
# This should not throw, because the C++ one doesn't. Should we really
|
||||
# ignore errors like missing file?
|
||||
isc.log.init("root", "INFO", 0, "/no/such/file");
|
||||
|
||||
class Logger(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
isc.log.reset()
|
||||
|
||||
def setUp(self):
|
||||
isc.log.init("root", "DEBUG", 50)
|
||||
self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
|
||||
|
||||
# Checks defaults of the logger
|
||||
def defaults(self, logger):
|
||||
self.assertEqual(logger.get_effective_severity(), "DEBUG")
|
||||
self.assertEqual(logger.get_effective_debug_level(), 50)
|
||||
|
||||
def test_default_severity(self):
|
||||
logger = isc.log.Logger("child")
|
||||
self.defaults(logger)
|
||||
|
||||
# Try changing the severities little bit
|
||||
def test_severity(self):
|
||||
logger = isc.log.Logger("child")
|
||||
logger.set_severity('DEBUG', 25)
|
||||
self.assertEqual(logger.get_effective_severity(), "DEBUG")
|
||||
self.assertEqual(logger.get_effective_debug_level(), 25)
|
||||
for sev in self.sevs:
|
||||
logger.set_severity(sev)
|
||||
self.assertEqual(logger.get_effective_severity(), sev)
|
||||
self.assertEqual(logger.get_effective_debug_level(), 0)
|
||||
# Return to default
|
||||
logger.set_severity(None)
|
||||
self.defaults(logger)
|
||||
|
||||
def test_enabled(self):
|
||||
logger = isc.log.Logger("child")
|
||||
self.sevs.insert(0, 'DEBUG')
|
||||
methods = {
|
||||
'DEBUG': logger.is_debug_enabled,
|
||||
'INFO': logger.is_info_enabled,
|
||||
'WARN': logger.is_warn_enabled,
|
||||
'ERROR': logger.is_error_enabled,
|
||||
'FATAL': logger.is_fatal_enabled
|
||||
}
|
||||
for sev in self.sevs:
|
||||
logger.set_severity(sev)
|
||||
enabled = False
|
||||
for tested in self.sevs:
|
||||
if tested == sev:
|
||||
enabled = True
|
||||
self.assertEqual(methods[tested](), enabled)
|
||||
logger.set_severity('DEBUG', 50)
|
||||
self.assertTrue(logger.is_debug_enabled())
|
||||
self.assertTrue(logger.is_debug_enabled(0))
|
||||
self.assertTrue(logger.is_debug_enabled(50))
|
||||
self.assertFalse(logger.is_debug_enabled(99))
|
||||
|
||||
def test_invalid_params(self):
|
||||
"""
|
||||
Tests invalid arguments for logging functions. The output is tested
|
||||
in check_output.sh.
|
||||
"""
|
||||
logger = isc.log.Logger("child")
|
||||
methods = [
|
||||
logger.info,
|
||||
logger.warn,
|
||||
logger.error,
|
||||
logger.fatal
|
||||
]
|
||||
for meth in methods:
|
||||
# Not enough arguments
|
||||
self.assertRaises(TypeError, meth)
|
||||
# Bad type
|
||||
self.assertRaises(TypeError, meth, 1)
|
||||
# Too few arguments
|
||||
self.assertRaises(TypeError, logger.debug, 42)
|
||||
self.assertRaises(TypeError, logger.debug)
|
||||
# Bad type
|
||||
self.assertRaises(TypeError, logger.debug, "42", "hello")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
Loading…
x
Reference in New Issue
Block a user