daemon: Start monitor process, not daemon process, in new session.
To keep control+C and other signals in the initiating session from killing
the monitor process, we need to put the monitor process into its own
session. However, until this point, we've only done that for the daemon
processes that the monitor started, which means that control+C would kill
the monitor but not the daemons that it launched.
I don't know of a benefit to putting the monitor and daemon processes in
different sessions, as opposed to one new session for both of them, so
this change does the latter.
daemonize_post_detach() is called from one additional context where we'd
want to be in a new session, the worker_start() function, but that function
is documented as to be called after daemonize_start(), in which case we
will (after this commit) already have called setsid(), so no additional
change is required there.
Bug #14280.
Reported-by: Gordon Good <ggood@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2012-12-13 14:01:23 -08:00
|
|
|
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
|
2010-08-25 10:26:40 -07:00
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at:
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
|
|
import errno
|
|
|
|
|
import os
|
|
|
|
|
import signal
|
|
|
|
|
import sys
|
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
import ovs.dirs
|
|
|
|
|
import ovs.fatal_signal
|
|
|
|
|
import ovs.process
|
|
|
|
|
import ovs.socket_util
|
|
|
|
|
import ovs.timeval
|
|
|
|
|
import ovs.util
|
2011-09-24 17:53:30 -07:00
|
|
|
import ovs.vlog
|
|
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32':
|
|
|
|
|
import fcntl
|
|
|
|
|
import resource
|
|
|
|
|
else:
|
|
|
|
|
import ovs.winutils as winutils
|
|
|
|
|
import ovs.fcntl_win as fcntl
|
|
|
|
|
import pywintypes
|
|
|
|
|
import subprocess
|
|
|
|
|
import win32process
|
|
|
|
|
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog = ovs.vlog.Vlog("daemon")
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
|
# --detach: Should we run in the background?
|
|
|
|
|
_detach = False
|
|
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
# Running as the child process - Windows only.
|
|
|
|
|
_detached = False
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
# --pidfile: Name of pidfile (null if none).
|
|
|
|
|
_pidfile = None
|
|
|
|
|
|
2010-09-23 09:39:47 -07:00
|
|
|
# Our pidfile's inode and device, if we have created one.
|
|
|
|
|
_pidfile_dev = None
|
|
|
|
|
_pidfile_ino = None
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
|
|
|
|
|
_overwrite_pidfile = False
|
|
|
|
|
|
|
|
|
|
# --no-chdir: Should we chdir to "/"?
|
|
|
|
|
_chdir = True
|
|
|
|
|
|
|
|
|
|
# --monitor: Should a supervisory process monitor the daemon and restart it if
|
|
|
|
|
# it dies due to an error signal?
|
|
|
|
|
_monitor = False
|
|
|
|
|
|
|
|
|
|
# File descriptor used by daemonize_start() and daemonize_complete().
|
|
|
|
|
_daemonize_fd = None
|
|
|
|
|
|
2010-09-22 12:40:39 -07:00
|
|
|
RESTART_EXIT_CODE = 5
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def make_pidfile_name(name):
|
|
|
|
|
"""Returns the file name that would be used for a pidfile if 'name' were
|
|
|
|
|
provided to set_pidfile()."""
|
|
|
|
|
if name is None or name == "":
|
|
|
|
|
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
|
|
|
|
|
else:
|
|
|
|
|
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def set_pidfile(name):
|
|
|
|
|
"""Sets up a following call to daemonize() to create a pidfile named
|
|
|
|
|
'name'. If 'name' begins with '/', then it is treated as an absolute path.
|
|
|
|
|
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
|
|
|
|
|
$(prefix)/var/run by default.
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
|
|
|
|
|
used."""
|
|
|
|
|
global _pidfile
|
|
|
|
|
_pidfile = make_pidfile_name(name)
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def set_no_chdir():
|
|
|
|
|
"""Sets that we do not chdir to "/"."""
|
|
|
|
|
global _chdir
|
|
|
|
|
_chdir = False
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def ignore_existing_pidfile():
|
2011-03-31 09:44:30 -07:00
|
|
|
"""Normally, daemonize() or daemonize_start() will terminate the program
|
|
|
|
|
with a message if a locked pidfile already exists. If this function is
|
|
|
|
|
called, an existing pidfile will be replaced, with a warning."""
|
2010-08-25 10:26:40 -07:00
|
|
|
global _overwrite_pidfile
|
|
|
|
|
_overwrite_pidfile = True
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def set_detach():
|
|
|
|
|
"""Sets up a following call to daemonize() to detach from the foreground
|
|
|
|
|
session, running this process in the background."""
|
|
|
|
|
global _detach
|
|
|
|
|
_detach = True
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
def set_detached(wp):
|
|
|
|
|
"""Sets up a following call to daemonize() to fork a supervisory
|
|
|
|
|
process to monitor the daemon and restart it if it dies due to
|
|
|
|
|
an error signal. Used on Windows only."""
|
|
|
|
|
global _detached
|
|
|
|
|
global _daemonize_fd
|
|
|
|
|
_detached = True
|
|
|
|
|
_daemonize_fd = int(wp)
|
|
|
|
|
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def get_detach():
|
|
|
|
|
"""Will daemonize() really detach?"""
|
|
|
|
|
return _detach
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def set_monitor():
|
|
|
|
|
"""Sets up a following call to daemonize() to fork a supervisory process to
|
|
|
|
|
monitor the daemon and restart it if it dies due to an error signal."""
|
|
|
|
|
global _monitor
|
|
|
|
|
_monitor = True
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
def _fatal(msg):
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.err(msg)
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
sys.stderr.write("%s\n" % msg)
|
|
|
|
|
sys.exit(1)
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _make_pidfile():
|
|
|
|
|
"""If a pidfile has been configured, creates it and stores the running
|
|
|
|
|
process's pid in it. Ensures that the pidfile will be deleted when the
|
|
|
|
|
process exits."""
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
pid = os.getpid()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
# Create a temporary pidfile.
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32':
|
|
|
|
|
tmpfile = "%s.tmp%d" % (_pidfile, pid)
|
|
|
|
|
ovs.fatal_signal.add_file_to_unlink(tmpfile)
|
|
|
|
|
else:
|
|
|
|
|
tmpfile = "%s" % _pidfile
|
|
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
try:
|
|
|
|
|
# This is global to keep Python from garbage-collecting and
|
|
|
|
|
# therefore closing our file after this function exits. That would
|
|
|
|
|
# unlock the lock for us, and we don't want that.
|
2011-09-28 23:09:33 -07:00
|
|
|
global file_handle
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle = open(tmpfile, "w")
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
|
2010-09-23 09:39:47 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
try:
|
2011-09-16 16:38:39 -07:00
|
|
|
s = os.fstat(file_handle.fileno())
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
try:
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle.write("%s\n" % pid)
|
|
|
|
|
file_handle.flush()
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
|
|
|
|
|
|
|
|
|
|
try:
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32':
|
|
|
|
|
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
|
|
|
else:
|
|
|
|
|
fcntl.lockf(file_handle, fcntl.LOCK_SH | fcntl.LOCK_NB)
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
|
|
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
# Ensure that the pidfile will gets closed and deleted on exit.
|
|
|
|
|
ovs.fatal_signal.add_file_to_close_and_unlink(_pidfile, file_handle)
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
else:
|
2017-01-03 20:10:52 +00:00
|
|
|
# Rename or link it to the correct name.
|
|
|
|
|
if _overwrite_pidfile:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
try:
|
2017-01-03 20:10:52 +00:00
|
|
|
os.rename(tmpfile, _pidfile)
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
2017-01-03 20:10:52 +00:00
|
|
|
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
|
|
|
|
|
% (tmpfile, _pidfile, e.strerror))
|
|
|
|
|
else:
|
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
os.link(tmpfile, _pidfile)
|
|
|
|
|
error = 0
|
|
|
|
|
except OSError as e:
|
|
|
|
|
error = e.errno
|
|
|
|
|
if error == errno.EEXIST:
|
|
|
|
|
_check_already_running()
|
|
|
|
|
elif error != errno.EINTR:
|
|
|
|
|
break
|
|
|
|
|
if error:
|
|
|
|
|
_fatal("failed to link \"%s\" as \"%s\" (%s)"
|
|
|
|
|
% (tmpfile, _pidfile, os.strerror(error)))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
# Ensure that the pidfile will get deleted on exit.
|
|
|
|
|
ovs.fatal_signal.add_file_to_unlink(_pidfile)
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
# Delete the temporary pidfile if it still exists.
|
|
|
|
|
if not _overwrite_pidfile:
|
|
|
|
|
error = ovs.fatal_signal.unlink_file_now(tmpfile)
|
|
|
|
|
if error:
|
|
|
|
|
_fatal("%s: unlink failed (%s)" % (
|
|
|
|
|
tmpfile, os.strerror(error)))
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
|
2011-08-24 17:00:15 -07:00
|
|
|
global _pidfile_dev
|
|
|
|
|
global _pidfile_ino
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
_pidfile_dev = s.st_dev
|
|
|
|
|
_pidfile_ino = s.st_ino
|
2010-09-23 09:39:47 -07:00
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def daemonize():
|
|
|
|
|
"""If configured with set_pidfile() or set_detach(), creates the pid file
|
|
|
|
|
and detaches from the foreground session."""
|
|
|
|
|
daemonize_start()
|
|
|
|
|
daemonize_complete()
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _waitpid(pid, options):
|
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
return os.waitpid(pid, options)
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
2010-08-25 10:26:40 -07:00
|
|
|
if e.errno == errno.EINTR:
|
|
|
|
|
pass
|
|
|
|
|
return -e.errno, 0
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _fork_and_wait_for_startup():
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
return _fork_and_wait_for_startup_windows()
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
try:
|
|
|
|
|
rfd, wfd = os.pipe()
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
pid = os.fork()
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
if pid > 0:
|
|
|
|
|
# Running in parent process.
|
|
|
|
|
os.close(wfd)
|
|
|
|
|
ovs.fatal_signal.fork()
|
2011-03-31 09:36:10 -07:00
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
s = os.read(rfd, 1)
|
|
|
|
|
error = 0
|
2015-07-29 21:12:45 -05:00
|
|
|
except OSError as e:
|
2011-03-31 09:36:10 -07:00
|
|
|
s = ""
|
|
|
|
|
error = e.errno
|
|
|
|
|
if error != errno.EINTR:
|
|
|
|
|
break
|
2010-08-25 10:26:40 -07:00
|
|
|
if len(s) != 1:
|
|
|
|
|
retval, status = _waitpid(pid, 0)
|
2011-11-23 12:15:42 -08:00
|
|
|
if retval == pid:
|
|
|
|
|
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
|
|
|
|
|
# Child exited with an error. Convey the same error to
|
|
|
|
|
# our parent process as a courtesy.
|
|
|
|
|
sys.exit(os.WEXITSTATUS(status))
|
|
|
|
|
else:
|
|
|
|
|
sys.stderr.write("fork child failed to signal "
|
|
|
|
|
"startup (%s)\n"
|
|
|
|
|
% ovs.process.status_msg(status))
|
2010-08-25 10:26:40 -07:00
|
|
|
else:
|
2011-11-23 12:15:42 -08:00
|
|
|
assert retval < 0
|
|
|
|
|
sys.stderr.write("waitpid failed (%s)\n"
|
|
|
|
|
% os.strerror(-retval))
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
os.close(rfd)
|
|
|
|
|
else:
|
|
|
|
|
# Running in parent process.
|
|
|
|
|
os.close(rfd)
|
|
|
|
|
ovs.timeval.postfork()
|
|
|
|
|
|
|
|
|
|
global _daemonize_fd
|
|
|
|
|
_daemonize_fd = wfd
|
|
|
|
|
return pid
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
def _fork_and_wait_for_startup_windows():
|
|
|
|
|
global _detached
|
|
|
|
|
if _detached:
|
|
|
|
|
# Running in child process
|
|
|
|
|
ovs.timeval.postfork()
|
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
""" close the log file, on Windows cannot be moved while the parent has
|
|
|
|
|
a reference on it."""
|
|
|
|
|
vlog.close_log_file()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
(rfd, wfd) = winutils.windows_create_pipe()
|
|
|
|
|
except pywintypes.error as e:
|
|
|
|
|
sys.stderr.write("pipe failed to create: %s\n" % e.strerror)
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
creationFlags = win32process.DETACHED_PROCESS
|
|
|
|
|
args = ("%s %s --pipe-handle=%ld" % (
|
|
|
|
|
sys.executable, " ".join(sys.argv), int(wfd)))
|
|
|
|
|
proc = subprocess.Popen(
|
|
|
|
|
args=args,
|
|
|
|
|
close_fds=False,
|
|
|
|
|
shell=False,
|
|
|
|
|
creationflags=creationFlags,
|
|
|
|
|
stdout=sys.stdout,
|
|
|
|
|
stderr=sys.stderr)
|
|
|
|
|
pid = proc.pid
|
|
|
|
|
except OSError as e:
|
|
|
|
|
sys.stderr.write("CreateProcess failed (%s)\n" % os.strerror(e.errno))
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
# Running in parent process.
|
|
|
|
|
winutils.win32file.CloseHandle(wfd)
|
|
|
|
|
ovs.fatal_signal.fork()
|
|
|
|
|
|
|
|
|
|
error, s = winutils.windows_read_pipe(rfd, 1)
|
|
|
|
|
if error:
|
|
|
|
|
s = ""
|
|
|
|
|
|
|
|
|
|
if len(s) != 1:
|
|
|
|
|
retval = proc.wait()
|
|
|
|
|
if retval == 0:
|
|
|
|
|
sys.stderr.write("fork child failed to signal startup\n")
|
|
|
|
|
else:
|
|
|
|
|
# Child exited with an error. Convey the same error to
|
|
|
|
|
# our parent process as a courtesy.
|
|
|
|
|
sys.exit(retval)
|
|
|
|
|
winutils.win32file.CloseHandle(rfd)
|
|
|
|
|
|
|
|
|
|
return pid
|
|
|
|
|
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _fork_notify_startup(fd):
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
_fork_notify_startup_windows(fd)
|
|
|
|
|
return
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if fd is not None:
|
|
|
|
|
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
|
|
|
|
|
if error:
|
|
|
|
|
sys.stderr.write("could not write to pipe\n")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
os.close(fd)
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
def _fork_notify_startup_windows(fd):
|
|
|
|
|
if fd is not None:
|
|
|
|
|
try:
|
|
|
|
|
# Python 2 requires a string as second parameter, while
|
|
|
|
|
# Python 3 requires a bytes-like object. b"0" fits for both
|
|
|
|
|
# python versions.
|
|
|
|
|
winutils.win32file.WriteFile(fd, b"0", None)
|
|
|
|
|
except winutils.pywintypes.error as e:
|
|
|
|
|
sys.stderr.write("could not write to pipe: %s\n" %
|
|
|
|
|
os.strerror(e.winerror))
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _should_restart(status):
|
2010-09-22 12:40:39 -07:00
|
|
|
global RESTART_EXIT_CODE
|
|
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
# The exit status is encoded in the high byte of the
|
|
|
|
|
# 16-bit number 'status'.
|
|
|
|
|
exit_status = status >> 8
|
|
|
|
|
|
|
|
|
|
if exit_status == RESTART_EXIT_CODE:
|
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
2010-09-22 12:40:39 -07:00
|
|
|
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
|
|
|
|
|
return True
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if os.WIFSIGNALED(status):
|
|
|
|
|
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
|
|
|
|
|
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
|
2011-08-22 14:31:18 -07:00
|
|
|
if os.WTERMSIG(status) == getattr(signal, signame, None):
|
2010-08-25 10:26:40 -07:00
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _monitor_daemon(daemon_pid):
|
|
|
|
|
# XXX should log daemon's stderr output at startup time
|
|
|
|
|
# XXX should use setproctitle module if available
|
|
|
|
|
last_restart = None
|
|
|
|
|
while True:
|
|
|
|
|
retval, status = _waitpid(daemon_pid, 0)
|
|
|
|
|
if retval < 0:
|
|
|
|
|
sys.stderr.write("waitpid failed\n")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
elif retval == daemon_pid:
|
|
|
|
|
status_msg = ("pid %d died, %s"
|
|
|
|
|
% (daemon_pid, ovs.process.status_msg(status)))
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if _should_restart(status):
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32' and os.WCOREDUMP(status):
|
2010-08-25 10:26:40 -07:00
|
|
|
# Disable further core dumps to save disk space.
|
|
|
|
|
try:
|
|
|
|
|
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
|
|
|
|
|
except resource.error:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("failed to disable core dumps")
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
|
# Throttle restarts to no more than once every 10 seconds.
|
|
|
|
|
if (last_restart is not None and
|
|
|
|
|
ovs.timeval.msec() < last_restart + 10000):
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s, waiting until 10 seconds since last "
|
|
|
|
|
"restart" % status_msg)
|
2010-08-25 10:26:40 -07:00
|
|
|
while True:
|
|
|
|
|
now = ovs.timeval.msec()
|
|
|
|
|
wakeup = last_restart + 10000
|
|
|
|
|
if now > wakeup:
|
|
|
|
|
break
|
2015-07-29 21:12:45 -05:00
|
|
|
sys.stdout.write("sleep %f\n" % (
|
|
|
|
|
(wakeup - now) / 1000.0))
|
2010-08-25 10:26:40 -07:00
|
|
|
time.sleep((wakeup - now) / 1000.0)
|
|
|
|
|
last_restart = ovs.timeval.msec()
|
|
|
|
|
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.err("%s, restarting" % status_msg)
|
2010-08-25 10:26:40 -07:00
|
|
|
daemon_pid = _fork_and_wait_for_startup()
|
|
|
|
|
if not daemon_pid:
|
|
|
|
|
break
|
|
|
|
|
else:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.info("%s, exiting" % status_msg)
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.exit(0)
|
|
|
|
|
|
2016-01-05 14:50:40 -05:00
|
|
|
# Running in new daemon process.
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def _close_standard_fds():
|
|
|
|
|
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
|
|
|
|
|
then this keeps us from holding that session open artificially."""
|
|
|
|
|
null_fd = ovs.socket_util.get_null_fd()
|
|
|
|
|
if null_fd >= 0:
|
|
|
|
|
os.dup2(null_fd, 0)
|
|
|
|
|
os.dup2(null_fd, 1)
|
|
|
|
|
os.dup2(null_fd, 2)
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def daemonize_start():
|
|
|
|
|
"""If daemonization is configured, then starts daemonization, by forking
|
|
|
|
|
and returning in the child process. The parent process hangs around until
|
|
|
|
|
the child lets it know either that it completed startup successfully (by
|
|
|
|
|
calling daemon_complete()) or that it failed to start up (by exiting with a
|
|
|
|
|
nonzero exit code)."""
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if _detach:
|
|
|
|
|
if _fork_and_wait_for_startup() > 0:
|
|
|
|
|
# Running in parent process.
|
|
|
|
|
sys.exit(0)
|
daemon: Start monitor process, not daemon process, in new session.
To keep control+C and other signals in the initiating session from killing
the monitor process, we need to put the monitor process into its own
session. However, until this point, we've only done that for the daemon
processes that the monitor started, which means that control+C would kill
the monitor but not the daemons that it launched.
I don't know of a benefit to putting the monitor and daemon processes in
different sessions, as opposed to one new session for both of them, so
this change does the latter.
daemonize_post_detach() is called from one additional context where we'd
want to be in a new session, the worker_start() function, but that function
is documented as to be called after daemonize_start(), in which case we
will (after this commit) already have called setsid(), so no additional
change is required there.
Bug #14280.
Reported-by: Gordon Good <ggood@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2012-12-13 14:01:23 -08:00
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32':
|
|
|
|
|
# Running in daemon or monitor process.
|
|
|
|
|
os.setsid()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
|
if _monitor:
|
|
|
|
|
saved_daemonize_fd = _daemonize_fd
|
|
|
|
|
daemon_pid = _fork_and_wait_for_startup()
|
|
|
|
|
if daemon_pid > 0:
|
|
|
|
|
# Running in monitor process.
|
|
|
|
|
_fork_notify_startup(saved_daemonize_fd)
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform != 'win32':
|
|
|
|
|
_close_standard_fds()
|
2010-08-25 10:26:40 -07:00
|
|
|
_monitor_daemon(daemon_pid)
|
|
|
|
|
# Running in daemon process
|
2011-09-16 16:03:31 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
if _pidfile:
|
|
|
|
|
_make_pidfile()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def daemonize_complete():
|
|
|
|
|
"""If daemonization is configured, then this function notifies the parent
|
|
|
|
|
process that the child process has completed startup successfully."""
|
|
|
|
|
_fork_notify_startup(_daemonize_fd)
|
|
|
|
|
|
|
|
|
|
if _detach:
|
|
|
|
|
if _chdir:
|
|
|
|
|
os.chdir("/")
|
|
|
|
|
_close_standard_fds()
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def usage():
|
|
|
|
|
sys.stdout.write("""
|
|
|
|
|
Daemon options:
|
|
|
|
|
--detach run in background as daemon
|
|
|
|
|
--no-chdir do not chdir to '/'
|
|
|
|
|
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
|
|
|
|
|
--overwrite-pidfile with --pidfile, start even if already running
|
|
|
|
|
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
def __read_pidfile(pidfile, delete_if_stale):
|
2010-09-23 09:39:47 -07:00
|
|
|
if _pidfile_dev is not None:
|
|
|
|
|
try:
|
|
|
|
|
s = os.stat(pidfile)
|
|
|
|
|
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
|
|
|
|
|
# It's our own pidfile. We can't afford to open it,
|
|
|
|
|
# because closing *any* fd for a file that a process
|
|
|
|
|
# has locked also releases all the locks on that file.
|
|
|
|
|
#
|
|
|
|
|
# Fortunately, we know the associated pid anyhow.
|
|
|
|
|
return os.getpid()
|
|
|
|
|
except OSError:
|
|
|
|
|
pass
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
try:
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle = open(pidfile, "r+")
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
if e.errno == errno.ENOENT and delete_if_stale:
|
2011-03-29 09:44:55 -07:00
|
|
|
return 0
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
|
2010-08-25 10:26:40 -07:00
|
|
|
return -e.errno
|
|
|
|
|
|
|
|
|
|
# Python fcntl doesn't directly support F_GETLK so we have to just try
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
# to lock it.
|
2010-08-25 10:26:40 -07:00
|
|
|
try:
|
2011-09-16 16:38:39 -07:00
|
|
|
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
|
|
|
|
|
# pidfile exists but wasn't locked by anyone. Now we have the lock.
|
|
|
|
|
if not delete_if_stale:
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle.close()
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: pid file is stale" % pidfile)
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
return -errno.ESRCH
|
|
|
|
|
|
|
|
|
|
# Is the file we have locked still named 'pidfile'?
|
|
|
|
|
try:
|
|
|
|
|
raced = False
|
|
|
|
|
s = os.stat(pidfile)
|
2011-09-16 16:38:39 -07:00
|
|
|
s2 = os.fstat(file_handle.fileno())
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
|
|
|
|
|
raced = True
|
2010-08-25 10:26:40 -07:00
|
|
|
except IOError:
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
raced = True
|
|
|
|
|
if raced:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: lost race to delete pidfile" % pidfile)
|
2011-08-24 17:00:46 -07:00
|
|
|
return -errno.EALREADY
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
|
|
|
|
|
# We won the right to delete the stale pidfile.
|
|
|
|
|
try:
|
|
|
|
|
os.unlink(pidfile)
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: failed to delete stale pidfile (%s)"
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
% (pidfile, e.strerror))
|
|
|
|
|
return -e.errno
|
2011-08-22 14:40:09 -07:00
|
|
|
else:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.dbg("%s: deleted stale pidfile" % pidfile)
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle.close()
|
2011-08-22 14:40:09 -07:00
|
|
|
return 0
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
2010-08-25 10:26:40 -07:00
|
|
|
if e.errno not in [errno.EACCES, errno.EAGAIN]:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
|
2010-08-25 10:26:40 -07:00
|
|
|
return -e.errno
|
|
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
# Someone else has the pidfile locked.
|
2010-08-25 10:26:40 -07:00
|
|
|
try:
|
|
|
|
|
try:
|
2011-09-16 16:46:18 -07:00
|
|
|
error = int(file_handle.readline())
|
2015-07-29 21:12:45 -05:00
|
|
|
except IOError as e:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
|
2011-09-16 16:46:18 -07:00
|
|
|
error = -e.errno
|
2010-08-25 10:26:40 -07:00
|
|
|
except ValueError:
|
2011-09-24 17:53:30 -07:00
|
|
|
vlog.warn("%s does not contain a pid" % pidfile)
|
2011-09-16 16:46:18 -07:00
|
|
|
error = -errno.EINVAL
|
|
|
|
|
|
|
|
|
|
return error
|
2010-08-25 10:26:40 -07:00
|
|
|
finally:
|
|
|
|
|
try:
|
2011-09-16 16:38:39 -07:00
|
|
|
file_handle.close()
|
2010-08-25 10:26:40 -07:00
|
|
|
except IOError:
|
|
|
|
|
pass
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2011-03-29 09:44:55 -07:00
|
|
|
def read_pidfile(pidfile):
|
|
|
|
|
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
|
|
|
|
|
successful, otherwise a negative errno value."""
|
|
|
|
|
return __read_pidfile(pidfile, False)
|
|
|
|
|
|
2011-09-16 16:03:31 -07:00
|
|
|
|
daemon: Avoid races on pidfile creation.
Until now, if two copies of one OVS daemon started up at the same time,
then due to races in pidfile creation it was possible for both of them to
start successfully, instead of just one. This was made worse when a
previous copy of the daemon had died abruptly, leaving a stale pidfile.
This commit implements a new pidfile creation and removal protocol that I
believe closes these races. Now, a pidfile is asserted with "link" instead
of "rename", which prevents the race on creation, and a stale pidfile may
only be deleted by a process after it has taken a lock on it.
This may solve mysterious problems seen occasionally on vswitch restart.
I'm still puzzled by these problems, however, because I don't see anything
in our tests cases that would actually cause two copies of a daemon to
start at the same time, which as far as I can see is a necessary
precondition for the problem.
2011-04-04 10:59:19 -07:00
|
|
|
def _check_already_running():
|
|
|
|
|
pid = __read_pidfile(_pidfile, True)
|
|
|
|
|
if pid > 0:
|
|
|
|
|
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
|
|
|
|
|
elif pid < 0:
|
|
|
|
|
_fatal("%s: pidfile check failed (%s), aborting"
|
|
|
|
|
% (_pidfile, os.strerror(pid)))
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-26 16:02:26 -07:00
|
|
|
def add_args(parser):
|
|
|
|
|
"""Populates 'parser', an ArgumentParser allocated using the argparse
|
|
|
|
|
module, with the command line arguments required by the daemon module."""
|
2011-09-16 16:03:31 -07:00
|
|
|
|
2011-09-26 16:02:26 -07:00
|
|
|
pidfile = make_pidfile_name(None)
|
|
|
|
|
|
|
|
|
|
group = parser.add_argument_group(title="Daemon Options")
|
|
|
|
|
group.add_argument("--detach", action="store_true",
|
|
|
|
|
help="Run in background as a daemon.")
|
|
|
|
|
group.add_argument("--no-chdir", action="store_true",
|
|
|
|
|
help="Do not chdir to '/'.")
|
|
|
|
|
group.add_argument("--monitor", action="store_true",
|
|
|
|
|
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
|
ovs.daemon: Fix semantics of --pidfile option.
The --pidfile option is supposed to work like this:
* Without --pidfile, you don't get a pidfile.
* With --pidfile, you get the default pidfile.
* With --pidfile=FILE, you get FILE as your pidfile.
However, it actually worked like this:
* Without --pidfile, you got the default pidfile.
* With --pidfile, you got no pidfile at all.
* With --pidfile=FILE, you got FILE as your pidfile.
This is because of the semantics of "default" in argparse. It is
documented as:
The default keyword argument of add_argument(), whose value defaults to
None, specifies what value should be used if the command-line argument
is not present. For optional arguments, the default value is used when
the option string was not present at the command line.
We actually want "const", which is documented under the description of
nargs="?" as:
If no command-line argument is present, the value from default will be
produced. Note that for optional arguments, there is an additional
case - the option string is present but not followed by a command-line
argument. In this case the value from const will be produced.
Bug #7533.
2011-09-28 23:07:11 -07:00
|
|
|
group.add_argument("--pidfile", nargs="?", const=pidfile,
|
2011-09-26 16:02:26 -07:00
|
|
|
help="Create pidfile (default %s)." % pidfile)
|
|
|
|
|
group.add_argument("--overwrite-pidfile", action="store_true",
|
|
|
|
|
help="With --pidfile, start even if already running.")
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
group.add_argument("--pipe-handle",
|
|
|
|
|
help=("With --pidfile, start even if "
|
|
|
|
|
"already running."))
|
2011-09-26 16:02:26 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def handle_args(args):
|
|
|
|
|
"""Handles daemon module settings in 'args'. 'args' is an object
|
|
|
|
|
containing values parsed by the parse_args() method of ArgumentParser. The
|
|
|
|
|
parent ArgumentParser should have been prepared by add_args() before
|
|
|
|
|
calling parse_args()."""
|
|
|
|
|
|
2017-01-03 20:10:52 +00:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
if args.pipe_handle:
|
|
|
|
|
set_detached(args.pipe_handle)
|
|
|
|
|
|
2011-09-26 16:02:26 -07:00
|
|
|
if args.detach:
|
2010-08-25 10:26:40 -07:00
|
|
|
set_detach()
|
2011-09-26 16:02:26 -07:00
|
|
|
|
|
|
|
|
if args.no_chdir:
|
2010-08-25 10:26:40 -07:00
|
|
|
set_no_chdir()
|
2011-09-26 16:02:26 -07:00
|
|
|
|
|
|
|
|
if args.pidfile:
|
|
|
|
|
set_pidfile(args.pidfile)
|
|
|
|
|
|
|
|
|
|
if args.overwrite_pidfile:
|
2010-08-25 10:26:40 -07:00
|
|
|
ignore_existing_pidfile()
|
2011-09-26 16:02:26 -07:00
|
|
|
|
|
|
|
|
if args.monitor:
|
2010-08-25 10:26:40 -07:00
|
|
|
set_monitor()
|