2
0
mirror of https://github.com/vdukhovni/postfix synced 2025-08-30 13:48:06 +00:00

postfix-2.2-20040801

This commit is contained in:
Wietse Venema 2004-08-01 00:00:00 -05:00 committed by Viktor Dukhovni
parent 121bb9ee56
commit 5cd36a369b
9 changed files with 120 additions and 17 deletions

View File

@ -9617,6 +9617,21 @@ Apologies for any names omitted.
queue manager is unable to schedule back-to-back deliveries. queue manager is unable to schedule back-to-back deliveries.
File: *qmgr/qmgr_entry.c. File: *qmgr/qmgr_entry.c.
20040730
Hysteresis: turn on "opportunistic session caching" when
back-to-back deliveries happen, but don't turn if off
until both concurrent and back-to-back delivery ends.
20040801
Workaround: disable session caching for Linux < 2.2 (does
not work) or Glibc < 2 (does not compile). Files:
util/sys_defs.h, util/unix_{recv,send}_fd.c.
Portability: h_errno is not an lvalue in the UnixWare 7.1
multi-threaded environment. Olivier PRENANT.
Open problems: Open problems:
Low: update events.c so that 1-second timer requests do Low: update events.c so that 1-second timer requests do

View File

@ -782,7 +782,9 @@ extern int var_smtp_reuse_limit;
extern char *var_smtp_cache_dest; extern char *var_smtp_cache_dest;
#define VAR_SMTP_CACHE_DEMAND "smtp_connection_cache_on_demand" #define VAR_SMTP_CACHE_DEMAND "smtp_connection_cache_on_demand"
#ifndef DEF_SMTP_CACHE_DEMAND
#define DEF_SMTP_CACHE_DEMAND 1 #define DEF_SMTP_CACHE_DEMAND 1
#endif
extern bool var_smtp_cache_demand; extern bool var_smtp_cache_demand;
#define VAR_SMTP_CONN_TMOUT "smtp_connect_timeout" #define VAR_SMTP_CONN_TMOUT "smtp_connect_timeout"

View File

@ -20,7 +20,7 @@
* Patches change the patchlevel and the release date. Snapshots change the * Patches change the patchlevel and the release date. Snapshots change the
* release date only. * release date only.
*/ */
#define MAIL_RELEASE_DATE "20040730" #define MAIL_RELEASE_DATE "20040801"
#define MAIL_VERSION_NUMBER "2.2" #define MAIL_VERSION_NUMBER "2.2"
#define VAR_MAIL_VERSION "mail_version" #define VAR_MAIL_VERSION "mail_version"

View File

@ -108,21 +108,47 @@ QMGR_ENTRY *qmgr_entry_select(QMGR_QUEUE *queue)
* only 1) save a session upon completion, but also 2) reuse a cached * only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out * session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly * on 2), we have to make caching sticky or else we get silly
* behavior when the in-memory queue drains. New connections must not * behavior when the in-memory queue drains. Specifically, new
* be made while cached connections aren't being reused. * connections must not be made as long as cached connections exist.
* *
* Safety: don't enable opportunistic session caching until the queue * Safety: don't enable opportunistic session caching unless the queue
* manager is able to schedule back-to-back deliveries. * manager is able to schedule concurrent or back-to-back deliveries
* (we need to recognize back-to-back deliveries for transports with
* concurrency 1).
*
* XXX It would be nice if we could say "try to reuse a cached
* connection, but don't bother saving it when you're done". As long
* as we can't, we must not turn off session caching too early.
*/
#define CONCURRENT_OR_BACK_TO_BACK_DELIVERY() \
(queue->busy_refcount > 1 || BACK_TO_BACK_DELIVERY())
#define BACK_TO_BACK_DELIVERY() \
(queue->last_done + 1 >= event_time())
/*
* Turn on session caching after we get up to speed. Don't enable
* session caching just because we have concurrent deliveries. This
* prevents unnecessary session caching when we have a burst of mail
* <= the initial concurrency limit.
*/ */
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) { if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
if (queue->last_done + 1 >= event_time()) { if (BACK_TO_BACK_DELIVERY()) {
if (msg_verbose) if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s", msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name); myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE; queue->dflags |= DEL_REQ_FLAG_SCACHE;
} }
} else { }
if (queue->last_done + 1 < event_time()) {
/*
* Turn off session caching when concurrency drops and we're running
* out of steam. This is what prevents from turning off session
* caching too early, and from making new connections while old ones
* are still cached.
*/
else {
if (!CONCURRENT_OR_BACK_TO_BACK_DELIVERY()) {
if (msg_verbose) if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s", msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name); myname, queue->name);

View File

@ -120,21 +120,47 @@ QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *peer)
* only 1) save a session upon completion, but also 2) reuse a cached * only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out * session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly * on 2), we have to make caching sticky or else we get silly
* behavior when the in-memory queue drains. New connections must not * behavior when the in-memory queue drains. Specifically, new
* be made while cached connections aren't being reused. * connections must not be made as long as cached connections exist.
* *
* Safety: don't enable opportunistic session caching until the queue * Safety: don't enable opportunistic session caching unless the queue
* manager is able to schedule back-to-back deliveries. * manager is able to schedule concurrent or back-to-back deliveries
* (we need to recognize back-to-back deliveries for transports with
* concurrency 1).
*
* XXX It would be nice if we could say "try to reuse a cached
* connection, but don't bother saving it when you're done". As long
* as we can't, we must not turn off session caching too early.
*/
#define CONCURRENT_OR_BACK_TO_BACK_DELIVERY() \
(queue->busy_refcount > 1 || BACK_TO_BACK_DELIVERY())
#define BACK_TO_BACK_DELIVERY() \
(queue->last_done + 1 >= event_time())
/*
* Turn on session caching after we get up to speed. Don't enable
* session caching just because we have concurrent deliveries. This
* prevents unnecessary session caching when we have a burst of mail
* <= the initial concurrency limit.
*/ */
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) { if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
if (queue->last_done + 1 >= event_time()) { if (BACK_TO_BACK_DELIVERY()) {
if (msg_verbose) if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s", msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name); myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE; queue->dflags |= DEL_REQ_FLAG_SCACHE;
} }
} else { }
if (queue->last_done + 1 < event_time()) {
/*
* Turn off session caching when concurrency drops and we're running
* out of steam. This is what prevents from turning off session
* caching too early, and from making new connections while old ones
* are still cached.
*/
else {
if (!CONCURRENT_OR_BACK_TO_BACK_DELIVERY()) {
if (msg_verbose) if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s", msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name); myname, queue->name);

View File

@ -2183,7 +2183,7 @@ static int check_server_access(SMTPD_STATE *state, const char *table,
FULL, &found, reply_name, reply_class, FULL, &found, reply_name, reply_class,
def_acl)) != 0 || found) def_acl)) != 0 || found)
CHECK_SERVER_RETURN(status); CHECK_SERVER_RETURN(status);
h_errno = 0; /* XXX */ SET_H_ERRNO(0);
if ((hp = gethostbyname((char *) server->data)) == 0) { if ((hp = gethostbyname((char *) server->data)) == 0) {
msg_warn("Unable to look up %s host %s for %s %s: %s", msg_warn("Unable to look up %s host %s for %s %s: %s",
dns_strtype(type), (char *) server->data, dns_strtype(type), (char *) server->data,

View File

@ -341,6 +341,7 @@ extern int opterr;
#define STATVFS_IN_SYS_STATVFS_H #define STATVFS_IN_SYS_STATVFS_H
#define UNIX_DOMAIN_CONNECT_BLOCKS_FOR_ACCEPT #define UNIX_DOMAIN_CONNECT_BLOCKS_FOR_ACCEPT
#define STRCASECMP_IN_STRINGS_H #define STRCASECMP_IN_STRINGS_H
#define SET_H_ERRNO(err) (set_h_errno(err))
#endif #endif
#ifdef UW21 /* UnixWare 2.1.x */ #ifdef UW21 /* UnixWare 2.1.x */
@ -560,7 +561,14 @@ extern int initgroups(const char *, int);
#define SOCKADDR_SIZE socklen_t #define SOCKADDR_SIZE socklen_t
#define SOCKOPT_SIZE socklen_t #define SOCKOPT_SIZE socklen_t
#endif #endif
#define CANT_WRITE_BEFORE_SENDING_FD #include <linux/version.h>
#if !defined(KERNEL_VERSION) || (LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)) \
|| (__GLIBC__ < 2)
# define CANT_USE_SEND_RECV_MSG
# define DEF_SMTP_CACHE_DEMAND 0
#else
# define CANT_WRITE_BEFORE_SENDING_FD
#endif
#endif #endif
#ifdef LINUX1 #ifdef LINUX1
@ -589,6 +597,8 @@ extern int initgroups(const char *, int);
#define NATIVE_NEWALIAS_PATH "/usr/bin/newaliases" #define NATIVE_NEWALIAS_PATH "/usr/bin/newaliases"
#define NATIVE_COMMAND_DIR "/usr/sbin" #define NATIVE_COMMAND_DIR "/usr/sbin"
#define NATIVE_DAEMON_DIR "/usr/libexec/postfix" #define NATIVE_DAEMON_DIR "/usr/libexec/postfix"
#define CANT_USE_SEND_RECV_MSG
#define DEF_SMTP_CACHE_DEMAND 0
#endif #endif
/* /*

View File

@ -44,6 +44,16 @@
int unix_recv_fd(int fd) int unix_recv_fd(int fd)
{ {
char *myname = "unix_recv_fd"; char *myname = "unix_recv_fd";
/*
* This code does not work with version <2.2 Linux kernels, and it does
* not compile with version <2 Linux libraries.
*/
#ifdef CANT_USE_SEND_RECV_MSG
msg_warn("%s: your system has no support for file descriptor passing",
myname);
return (-1);
#else
struct msghdr msg; struct msghdr msg;
int newfd; int newfd;
struct iovec iov[1]; struct iovec iov[1];
@ -103,6 +113,7 @@ int unix_recv_fd(int fd)
else else
return (-1); return (-1);
#endif #endif
#endif
} }
#ifdef TEST #ifdef TEST

View File

@ -46,6 +46,18 @@
int unix_send_fd(int fd, int sendfd) int unix_send_fd(int fd, int sendfd)
{ {
/*
* This code does not work with version <2.2 Linux kernels, and it does
* not compile with version <2 Linux libraries.
*/
#ifdef CANT_USE_SEND_RECV_MSG
char *myname = "unix_send_fd";
msg_warn("%s: your system has no support for file descriptor passing",
myname);
return (-1);
#else
struct msghdr msg; struct msghdr msg;
struct iovec iov[1]; struct iovec iov[1];
@ -87,6 +99,7 @@ int unix_send_fd(int fd, int sendfd)
msg.msg_iovlen = 1; msg.msg_iovlen = 1;
return (sendmsg(fd, &msg, 0)); return (sendmsg(fd, &msg, 0));
#endif
} }
#ifdef TEST #ifdef TEST