2
0
mirror of https://gitlab.isc.org/isc-projects/kea synced 2025-09-02 15:05:16 +00:00

[#1732] Minor clean up

modified:
    src/lib/http/client.h
    src/lib/http/tests/mt_client_unittests.cc
This commit is contained in:
Thomas Markwalder
2021-03-29 10:01:30 -04:00
parent 2825e316a0
commit 5601d69a23
2 changed files with 41 additions and 33 deletions

View File

@@ -59,13 +59,13 @@ class HttpClientImpl;
/// the next request in the queue for the particular URL will be initiated. /// the next request in the queue for the particular URL will be initiated.
/// ///
/// Furthermore, the class supports two modes of operation: single-threaded /// Furthermore, the class supports two modes of operation: single-threaded
/// and multi-threaded mode. In single-threaded mode, all IO is driven by /// and multi-threaded mode. In single-threaded mode, all IO is driven by
/// an external IOService passed into the class constructor, and ultimately /// an external IOService passed into the class constructor, and ultimately
/// only a single connection per URL can be open at any given time. /// only a single connection per URL can be open at any given time.
/// ///
/// In multi-threaded mode, an internal thread pool, driven by a private /// In multi-threaded mode an internal thread pool driven by a private
/// IOService instance, is used to support multiple concurrent connections /// IOService instance is used to support multiple concurrent connections
/// per URL. Currently the number of connections per URL is equal to the /// per URL. Currently, the number of connections per URL is set to the
/// number of threads in the thread pool. /// number of threads in the thread pool.
/// ///
/// The client tests the persistent connection for usability before sending /// The client tests the persistent connection for usability before sending
@@ -136,8 +136,9 @@ public:
/// ///
/// @param io_service IO service to be used by the HTTP client. /// @param io_service IO service to be used by the HTTP client.
/// @param thread_pool_size maximum number of threads in the thread pool. /// @param thread_pool_size maximum number of threads in the thread pool.
/// Currently this also sets the maximum number of concurrent connections /// A value greater than zero enables multi-threaded mode as sets the
/// per URL. /// maximum number of concurrent connections per URL. A value of zero
/// (default) enables single-threaded mode with one connection per URL.
explicit HttpClient(asiolink::IOService& io_service, size_t thread_pool_size = 0); explicit HttpClient(asiolink::IOService& io_service, size_t thread_pool_size = 0);
/// @brief Destructor. /// @brief Destructor.
@@ -145,26 +146,26 @@ public:
/// @brief Queues new asynchronous HTTP request for a given URL. /// @brief Queues new asynchronous HTTP request for a given URL.
/// ///
/// The client maintains an internal connection pool which manages lists /// The client maintains an internal connection pool which manages lists
/// of connections per URL. In single-threaded mode, each URL is limited /// of connections per URL. In single-threaded mode, each URL is limited
/// to a single /connection. In multi-threaded mode, each URL may have /// to a single /connection. In multi-threaded mode, each URL may have
/// more than one open connection per URL, enabling the client to carry /// more than one open connection per URL, enabling the client to carry
/// on multiple concurrent requests per URL. /// on multiple concurrent requests per URL.
/// ///
/// The client will search the pool for an open, idle connection for the /// The client will search the pool for an open, idle connection for the
/// given URL. If there are no idle connections, the client will open /// given URL. If there are no idle connections, the client will open
/// a new connection up to the maximum number of connections allowed by the /// a new connection up to the maximum number of connections allowed by the
/// thread mode. If all possible connections are busy, the request is /// thread mode. If all possible connections are busy, the request is
/// pushed on to back of a URL-specific FIFO queue of pending requests. /// pushed on to back of a URL-specific FIFO queue of pending requests.
/// ///
/// If however, there is an idle connection available than a new transaction /// If however, there is an idle connection available than a new transaction
/// for the request will be initiated immediately upon that connection. /// for the request will be initiated immediately upon that connection.
/// ///
/// Note that when a connection completes a transaction, and its URL /// Note that when a connection completes a transaction, and its URL
/// queue is not empty, it will pop a pending request from the front of /// queue is not empty, it will pop a pending request from the front of
/// the queue and begin a new transaction for that request. The net effect /// the queue and begin a new transaction for that request. The net effect
/// is that requests are always pulled from the front of the queue unless /// is that requests are always pulled from the front of the queue unless
/// the queue is empty. /// the queue is empty.
/// ///
/// The existing connection is tested before it is used for the new /// The existing connection is tested before it is used for the new
/// transaction by attempting to read (with message peeking) from /// transaction by attempting to read (with message peeking) from
@@ -241,7 +242,11 @@ public:
const CloseHandler& close_callback = const CloseHandler& close_callback =
CloseHandler()); CloseHandler());
/// @brief Closes all connections. /// @brief Halts client-side IO activity.
///
/// Closes all connections, discards any queued requests, and in
/// multi-threaded mode discards the thread-pool and the internal
/// IOService.
void stop(); void stop();
/// @brief Closes a connection if it has an out-of-band socket event /// @brief Closes a connection if it has an out-of-band socket event

View File

@@ -50,12 +50,15 @@ const long TEST_TIMEOUT = 10000;
/// @brief Container request/response pair handled by a given thread. /// @brief Container request/response pair handled by a given thread.
struct ClientRR { struct ClientRR {
/// @brief Thread id of the client thread handling the request as a string.
std::string thread_id_; std::string thread_id_;
/// @brief HTTP request submitted by the client thread.
PostHttpRequestJsonPtr request_; PostHttpRequestJsonPtr request_;
/// @brief HTTP response received by the client thread.
HttpResponseJsonPtr response_; HttpResponseJsonPtr response_;
}; };
/// @brief Pointer to a ClientRR. /// @brief Pointer to a ClientRR instance.
typedef boost::shared_ptr<ClientRR> ClientRRPtr; typedef boost::shared_ptr<ClientRR> ClientRRPtr;
/// @brief Implementation of the @ref HttpResponseCreator. /// @brief Implementation of the @ref HttpResponseCreator.
@@ -93,7 +96,7 @@ private:
/// @brief Creates HTTP response. /// @brief Creates HTTP response.
/// ///
/// This method generates a response with the JSON body copied /// This method generates a response with the JSON body copied
/// from the request. /// from the request.
/// ///
/// @param request Pointer to the HTTP request. /// @param request Pointer to the HTTP request.
@@ -103,7 +106,6 @@ private:
// Request must always be JSON. // Request must always be JSON.
PostHttpRequestJsonPtr request_json = PostHttpRequestJsonPtr request_json =
boost::dynamic_pointer_cast<PostHttpRequestJson>(request); boost::dynamic_pointer_cast<PostHttpRequestJson>(request);
if (!request_json) { if (!request_json) {
return(createStockHttpResponse(request, HttpStatusCode::BAD_REQUEST)); return(createStockHttpResponse(request, HttpStatusCode::BAD_REQUEST));
} }
@@ -114,6 +116,7 @@ private:
return(createStockHttpResponse(request, HttpStatusCode::BAD_REQUEST)); return(createStockHttpResponse(request, HttpStatusCode::BAD_REQUEST));
} }
// Create the response.
HttpResponseJsonPtr response(new HttpResponseJson(request->getHttpVersion(), HttpResponseJsonPtr response(new HttpResponseJson(request->getHttpVersion(),
HttpStatusCode::OK)); HttpStatusCode::OK));
@@ -138,7 +141,7 @@ public:
} }
}; };
/// @brief Test fixture class for testing multi-threaded HTTP client. /// @brief Test fixture class for testing threading modes of HTTP client.
class MtHttpClientTest : public ::testing::Test { class MtHttpClientTest : public ::testing::Test {
public: public:
@@ -162,7 +165,7 @@ public:
} }
} }
/// @brief Callback function invoke upon test timeout. /// @brief Callback function to invoke upon test timeout.
/// ///
/// It stops the IO service and reports test timeout. /// It stops the IO service and reports test timeout.
/// ///
@@ -174,11 +177,12 @@ public:
io_service_.stop(); io_service_.stop();
} }
/// @brief Runs test's IOService until the desired number of have been carried out. /// @brief Runs the test's IOService until the desired number of rquests
/// have been carried out or the test fails.
void runIOService() { void runIOService() {
// Loop until the clients are done, an error occurs, or the time runs out. // Loop until the clients are done, an error occurs, or the time runs out.
while (clientRRs_.size() < num_requests_) { while (clientRRs_.size() < num_requests_) {
// Always call restart() before we call run(); // Always call reset() before we call run();
io_service_.get_io_service().reset(); io_service_.get_io_service().reset();
// Run until a client stops the service. // Run until a client stops the service.
@@ -186,7 +190,7 @@ public:
} }
} }
/// @brief Creates HTTP request with JSON body. /// @brief Creates an HTTP request with JSON body.
/// ///
/// It includes a JSON parameter with a specified value. /// It includes a JSON parameter with a specified value.
/// ///
@@ -206,7 +210,6 @@ public:
request->setBodyAsJson(body); request->setBodyAsJson(body);
try { try {
request->finalize(); request->finalize();
} catch (const std::exception& ex) { } catch (const std::exception& ex) {
ADD_FAILURE() << "failed to create request: " << ex.what(); ADD_FAILURE() << "failed to create request: " << ex.what();
} }
@@ -224,7 +227,7 @@ public:
/// threads in the pool. At that point, the handler will unblock /// threads in the pool. At that point, the handler will unblock
/// until all threads have finished preparing the response and are /// until all threads have finished preparing the response and are
/// ready to return. The handler will notify all pending threads /// ready to return. The handler will notify all pending threads
/// and invoke stop() on the test's main IO service thread. /// and invoke stop() on the test's main IO service thread.
/// ///
/// @param sequence value for the integer element, "sequence", /// @param sequence value for the integer element, "sequence",
/// to send in the request. /// to send in the request.
@@ -236,17 +239,16 @@ public:
// Initiate request to the server. // Initiate request to the server.
PostHttpRequestJsonPtr request_json = createRequest("sequence", sequence); PostHttpRequestJsonPtr request_json = createRequest("sequence", sequence);
HttpResponseJsonPtr response_json = boost::make_shared<HttpResponseJson>(); HttpResponseJsonPtr response_json = boost::make_shared<HttpResponseJson>();
ASSERT_NO_THROW(client_->asyncSendRequest(url, TlsContextPtr(), ASSERT_NO_THROW(client_->asyncSendRequest(url, TlsContextPtr(),
request_json, response_json, request_json, response_json,
[this, request_json, response_json](const boost::system::error_code& ec, [this, request_json, response_json](const boost::system::error_code& ec,
const HttpResponsePtr&/* response*/, const HttpResponsePtr&,
const std::string&) { const std::string&) {
// Bail on an error. // Bail on an error.
ASSERT_FALSE(ec) << "asyncSendRequest failed, ec: " << ec; ASSERT_FALSE(ec) << "asyncSendRequest failed, ec: " << ec;
// Wait here until we have a many in progress as we have threads.
{ {
std::unique_lock<std::mutex> lck(mutex_); std::unique_lock<std::mutex> lck(mutex_);
++num_in_progress_; ++num_in_progress_;
@@ -272,6 +274,7 @@ public:
clientRR->request_ = request_json; clientRR->request_ = request_json;
clientRR->response_ = response_json; clientRR->response_ = response_json;
// Wait here until we have as many ready to finish as we have threads.
{ {
std::unique_lock<std::mutex> lck(mutex_); std::unique_lock<std::mutex> lck(mutex_);
num_finished_++; num_finished_++;
@@ -297,7 +300,7 @@ public:
/// @brief Starts one or more HTTP requests via HttpClient to a test listener. /// @brief Starts one or more HTTP requests via HttpClient to a test listener.
/// ///
/// This function command creates a HttpClient with the given number /// This function command creates a HttpClient with the given number
/// of threads. It initiates then given number of HTTP requests. Each /// of threads. It initiates then given number of HTTP requests. Each
/// request carries a single integer element, "sequence" in its body. /// request carries a single integer element, "sequence" in its body.
/// The response is expected to be this same element echoed back. /// The response is expected to be this same element echoed back.
/// Then it iteratively runs the test's IOService until all /// Then it iteratively runs the test's IOService until all
@@ -319,16 +322,16 @@ public:
/// @param num_requests /// @param num_requests
void threadRequestAndReceive(size_t num_threads, size_t num_requests) { void threadRequestAndReceive(size_t num_threads, size_t num_requests) {
// First we makes sure the parameter rules apply. // First we makes sure the parameter rules apply.
ASSERT_TRUE((num_threads == 0) || (num_requests < num_threads) ASSERT_TRUE((num_threads == 0) || (num_requests < num_threads)
|| (num_requests % num_threads == 0)); || (num_requests % num_threads == 0));
num_threads_ = num_threads; num_threads_ = num_threads;
num_requests_ = num_requests; num_requests_ = num_requests;
// Make a factory // Make a factory
factory_.reset(new TestHttpResponseCreatorFactory()); factory_.reset(new TestHttpResponseCreatorFactory());
// Need to create a Listener on // Need to create a Listener on
listener_.reset(new HttpListener(io_service_, listener_.reset(new HttpListener(io_service_,
IOAddress(SERVER_ADDRESS), SERVER_PORT, IOAddress(SERVER_ADDRESS), SERVER_PORT,
TlsContextPtr(), factory_, TlsContextPtr(), factory_,
HttpListener::RequestTimeout(10000), HttpListener::RequestTimeout(10000),
@@ -467,7 +470,7 @@ public:
/// @brief Condition variable used make client threads wait /// @brief Condition variable used make client threads wait
/// until number of in-progress requests reaches the number /// until number of in-progress requests reaches the number
/// of client requests. /// of client requests.
std::condition_variable cv_; std::condition_variable cv_;
}; };