2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 22:45:39 +00:00

Fix memory accounting bug in TLSDNS

After a partial write the tls.senddata buffer would be rearranged to
contain only the data tha wasn't sent and the len part would be made
shorter, which would lead to attempt to free only part of a socket's
tls.senddata buffer.
This commit is contained in:
Ondřej Surý
2021-03-18 18:14:38 +01:00
parent 15f676f111
commit 1d64d4cde8
2 changed files with 19 additions and 18 deletions

View File

@@ -806,7 +806,7 @@ struct isc_nmsocket {
TLS_STATE_ERROR, TLS_STATE_ERROR,
TLS_STATE_CLOSING TLS_STATE_CLOSING
} state; } state;
uv_buf_t senddata; isc_region_t senddata;
bool cycle; bool cycle;
isc_result_t pending_error; isc_result_t pending_error;
/* List of active send requests. */ /* List of active send requests. */

View File

@@ -1063,12 +1063,12 @@ static void
free_senddata(isc_nmsocket_t *sock) { free_senddata(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tls.senddata.base != NULL); REQUIRE(sock->tls.senddata.base != NULL);
REQUIRE(sock->tls.senddata.len > 0); REQUIRE(sock->tls.senddata.length > 0);
isc_mem_put(sock->mgr->mctx, sock->tls.senddata.base, isc_mem_put(sock->mgr->mctx, sock->tls.senddata.base,
sock->tls.senddata.len); sock->tls.senddata.length);
sock->tls.senddata.base = NULL; sock->tls.senddata.base = NULL;
sock->tls.senddata.len = 0; sock->tls.senddata.length = 0;
} }
static void static void
@@ -1105,7 +1105,7 @@ tls_cycle_output(isc_nmsocket_t *sock) {
int err; int err;
if (sock->tls.senddata.base != NULL || if (sock->tls.senddata.base != NULL ||
sock->tls.senddata.len > 0) { sock->tls.senddata.length > 0) {
break; break;
} }
@@ -1114,42 +1114,43 @@ tls_cycle_output(isc_nmsocket_t *sock) {
} }
sock->tls.senddata.base = isc_mem_get(sock->mgr->mctx, pending); sock->tls.senddata.base = isc_mem_get(sock->mgr->mctx, pending);
sock->tls.senddata.len = pending; sock->tls.senddata.length = pending;
rv = BIO_read_ex(sock->tls.app_rbio, sock->tls.senddata.base, req = isc__nm_uvreq_get(sock->mgr, sock);
pending, &bytes); req->uvbuf.base = (char *)sock->tls.senddata.base;
req->uvbuf.len = sock->tls.senddata.length;
rv = BIO_read_ex(sock->tls.app_rbio, req->uvbuf.base,
req->uvbuf.len, &bytes);
RUNTIME_CHECK(rv == 1); RUNTIME_CHECK(rv == 1);
INSIST((size_t)pending == bytes); INSIST((size_t)pending == bytes);
err = uv_try_write(&sock->uv_handle.stream, &sock->tls.senddata, err = uv_try_write(&sock->uv_handle.stream, &req->uvbuf, 1);
1);
if (err == pending) { if (err == pending) {
/* Wrote everything, restart */ /* Wrote everything, restart */
isc__nm_uvreq_put(&req, sock);
free_senddata(sock); free_senddata(sock);
continue; continue;
} }
if (err > 0) { if (err > 0) {
/* Partial write, send rest asynchronously */ /* Partial write, send rest asynchronously */
memmove(sock->tls.senddata.base, memmove(req->uvbuf.base, req->uvbuf.base + err,
sock->tls.senddata.base + err, pending - err); req->uvbuf.len - err);
sock->tls.senddata.len = pending - err; req->uvbuf.len = req->uvbuf.len - err;
} else if (err == UV_ENOSYS || err == UV_EAGAIN) { } else if (err == UV_ENOSYS || err == UV_EAGAIN) {
/* uv_try_write is not supported, send asynchronously */ /* uv_try_write is not supported, send asynchronously */
} else { } else {
result = isc__nm_uverr2result(err); result = isc__nm_uverr2result(err);
isc__nm_uvreq_put(&req, sock);
free_senddata(sock); free_senddata(sock);
break; break;
} }
req = isc__nm_uvreq_get(sock->mgr, sock);
req->uvbuf.base = (char *)sock->tls.senddata.base;
req->uvbuf.len = sock->tls.senddata.len;
err = uv_write(&req->uv_req.write, &sock->uv_handle.stream, err = uv_write(&req->uv_req.write, &sock->uv_handle.stream,
&sock->tls.senddata, 1, tls_write_cb); &req->uvbuf, 1, tls_write_cb);
INSIST(err == 0); INSIST(err == 0);