2
0
mirror of https://github.com/vdukhovni/postfix synced 2025-08-29 21:27:57 +00:00

postfix-2.2-20040730

This commit is contained in:
Wietse Venema 2004-07-30 00:00:00 -05:00 committed by Viktor Dukhovni
parent 539ba25d8b
commit 121bb9ee56
8 changed files with 78 additions and 36 deletions

View File

@ -9602,7 +9602,7 @@ Apologies for any names omitted.
20040729
Feature: enable SMTP session caching temporarily while a
site has a high volume of mail in the active queue.
postfix is able to schedule back-to-back deliveries.
Parameter: smtp_connection_cache_on_demand (default:
yes). Files: smtp/smtp_connect.c, *qmgr/qmgr_entry.c,
*qmgr/qmgr_queue.c, *qmgr/qmgr_deliver.c.
@ -9611,6 +9611,12 @@ Apologies for any names omitted.
addresses for (trivial-rewrite) stress testing. Victor
Duchovni, Morgan Stanley. File: smtpstone/smtp-source.c.
20040730
Safety: disallow "opportunistic session caching" when the
queue manager is unable to schedule back-to-back deliveries.
File: *qmgr/qmgr_entry.c.
Open problems:
Low: update events.c so that 1-second timer requests do

View File

@ -20,7 +20,7 @@
* Patches change the patchlevel and the release date. Snapshots change the
* release date only.
*/
#define MAIL_RELEASE_DATE "20040729"
#define MAIL_RELEASE_DATE "20040730"
#define MAIL_VERSION_NUMBER "2.2"
#define VAR_MAIL_VERSION "mail_version"

View File

@ -141,6 +141,7 @@ struct QMGR_ENTRY_LIST {
struct QMGR_QUEUE {
int dflags; /* delivery request options */
time_t last_done; /* last delivery completion */
char *name; /* domain name or address */
char *nexthop; /* domain name */
int todo_refcount; /* queue entries (todo list) */

View File

@ -94,6 +94,7 @@
QMGR_ENTRY *qmgr_entry_select(QMGR_QUEUE *queue)
{
char *myname = "qmgr_entry_select";
QMGR_ENTRY *entry;
if ((entry = queue->todo.prev) != 0) {
@ -101,6 +102,33 @@ QMGR_ENTRY *qmgr_entry_select(QMGR_QUEUE *queue)
queue->todo_refcount--;
QMGR_LIST_APPEND(queue->busy, entry);
queue->busy_refcount++;
/*
* With opportunistic session caching, the delivery agent must not
* only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly
* behavior when the in-memory queue drains. New connections must not
* be made while cached connections aren't being reused.
*
* Safety: don't enable opportunistic session caching until the queue
* manager is able to schedule back-to-back deliveries.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
if (queue->last_done + 1 >= event_time()) {
if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
} else {
if (queue->last_done + 1 < event_time()) {
if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name);
queue->dflags &= ~DEL_REQ_FLAG_SCACHE;
}
}
}
return (entry);
}
@ -146,6 +174,11 @@ void qmgr_entry_done(QMGR_ENTRY *entry, int which)
myfree((char *) entry);
/*
* Maintain back-to-back delivery status.
*/
queue->last_done = event_time();
/*
* When the in-core queue for this site is empty and when this site is
* not dead, discard the in-core queue. When this site is dead, but the
@ -188,7 +221,6 @@ void qmgr_entry_done(QMGR_ENTRY *entry, int which)
QMGR_ENTRY *qmgr_entry_create(QMGR_QUEUE *queue, QMGR_MESSAGE *message)
{
char *myname = "qmgr_entry_create";
QMGR_ENTRY *entry;
/*
@ -209,22 +241,6 @@ QMGR_ENTRY *qmgr_entry_create(QMGR_QUEUE *queue, QMGR_MESSAGE *message)
QMGR_LIST_APPEND(queue->todo, entry);
queue->todo_refcount++;
/*
* With opportunistic session caching, the delivery agent must not only
* 1) save a session upon completion, but also 2) reuse a cached session
* upon the next delivery request. In order to not miss out on 2), we
* have to make caching sticky or else we get silly behavior when the
* in-memory queue drains. New connections must not be made while cached
* connections aren't being reused.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0
&& queue->window < queue->todo_refcount + queue->busy_refcount) {
if (msg_verbose)
msg_info("%s: passing on-demand session caching threshold for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
/*
* Warn if a destination is falling behind while the active queue
* contains a non-trivial amount of single-recipient email. When a

View File

@ -256,6 +256,7 @@ QMGR_QUEUE *qmgr_queue_create(QMGR_TRANSPORT *transport, const char *name,
queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
qmgr_queue_count++;
queue->dflags = 0;
queue->last_done = 0;
queue->name = mystrdup(name);
queue->nexthop = mystrdup(nexthop);
queue->todo_refcount = 0;

View File

@ -177,6 +177,7 @@ struct QMGR_ENTRY_LIST {
struct QMGR_QUEUE {
int dflags; /* delivery request options */
time_t last_done; /* last delivery completion */
char *name; /* domain name or address */
char *nexthop; /* domain name */
int todo_refcount; /* queue entries (todo list) */

View File

@ -102,6 +102,7 @@
QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *peer)
{
char *myname = "qmgr_entry_select";
QMGR_ENTRY *entry;
QMGR_QUEUE *queue;
@ -113,6 +114,33 @@ QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *peer)
queue->busy_refcount++;
QMGR_LIST_UNLINK(peer->entry_list, QMGR_ENTRY *, entry, peer_peers);
peer->job->selected_entries++;
/*
* With opportunistic session caching, the delivery agent must not
* only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly
* behavior when the in-memory queue drains. New connections must not
* be made while cached connections aren't being reused.
*
* Safety: don't enable opportunistic session caching until the queue
* manager is able to schedule back-to-back deliveries.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
if (queue->last_done + 1 >= event_time()) {
if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
} else {
if (queue->last_done + 1 < event_time()) {
if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name);
queue->dflags &= ~DEL_REQ_FLAG_SCACHE;
}
}
}
return (entry);
}
@ -219,6 +247,11 @@ void qmgr_entry_done(QMGR_ENTRY *entry, int which)
if (peer->refcount == 0)
qmgr_peer_free(peer);
/*
* Maintain back-to-back delivery status.
*/
queue->last_done = event_time();
/*
* When the in-core queue for this site is empty and when this site is
* not dead, discard the in-core queue. When this site is dead, but the
@ -245,7 +278,6 @@ void qmgr_entry_done(QMGR_ENTRY *entry, int which)
QMGR_ENTRY *qmgr_entry_create(QMGR_PEER *peer, QMGR_MESSAGE *message)
{
char *myname = "qmgr_entry_create";
QMGR_ENTRY *entry;
QMGR_QUEUE *queue = peer->queue;
@ -270,22 +302,6 @@ QMGR_ENTRY *qmgr_entry_create(QMGR_PEER *peer, QMGR_MESSAGE *message)
QMGR_LIST_APPEND(queue->todo, entry, queue_peers);
queue->todo_refcount++;
/*
* With opportunistic session caching, the delivery agent must not only
* 1) save a session upon completion, but also 2) reuse a cached session
* upon the next delivery request. In order to not miss out on 2), we
* have to make caching sticky or else we get silly behavior when the
* in-memory queue drains. New connections must not be made while cached
* connections aren't being reused.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0
&& queue->window < queue->todo_refcount + queue->busy_refcount) {
if (msg_verbose)
msg_info("%s: passing on-demand session caching threshold for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
/*
* Warn if a destination is falling behind while the active queue
* contains a non-trivial amount of single-recipient email. When a

View File

@ -233,6 +233,7 @@ QMGR_QUEUE *qmgr_queue_create(QMGR_TRANSPORT *transport, const char *name,
queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
qmgr_queue_count++;
queue->dflags = 0;
queue->last_done = 0;
queue->name = mystrdup(name);
queue->nexthop = mystrdup(nexthop);
queue->todo_refcount = 0;