This is a better match for what they do and the general "cpool" var/function prefix works well. The pool now handles very long hostnames correctly. The following changes have been made: * 'struct connectdata', e.g. connections, keep new members named `destination` and ' destination_len' that fully specifies interface+port+hostname of where the connection is going to. This is used in the pool for "bundling" of connections with the same destination. There is no limit on the length any more. * Locking: all locks are done inside conncache.c when calling into the pool and released on return. This eliminates hazards of the callers keeping track. * 'struct connectbundle' is now internal to the pool. It is no longer referenced by a connection. * 'bundle->multiuse' no longer exists. HTTP/2 and 3 and TLS filters no longer need to set it. Instead, the multi checks on leaving MSTATE_CONNECT or MSTATE_CONNECTING if the connection is now multiplexed and new, e.g. not conn->bits.reuse. In that case the processing of pending handles is triggered. * The pool's init is provided with a callback to invoke on all connections being discarded. This allows the cleanups in `Curl_disconnect` to run, wherever it is decided to retire a connection. * Several pool operations can now be fully done with one call. Pruning dead connections, upkeep and checks on pool limits can now directly discard connections and need no longer return those to the caller for doing that (as we have now the callback described above). * Finding a connection for reuse is now done via `Curl_cpool_find()` and the caller provides callbacks to evaluate the connection candidates. * The 'Curl_cpool_check_limits()' now directly uses the max values that may be set in the transfer's multi. No need to pass them around. Curl_multi_max_host_connections() and Curl_multi_max_total_connections() are gone. * Add method 'Curl_node_llist()' to get the llist a node is in. Used in cpool to verify connection are indeed in the list (or not in any list) as they need to. I left the conncache.[ch] as is for now and also did not touch the documentation. If we update that outside the feature window, we can do this in a separate PR. Multi-thread safety is not achieved by this PR, but since more details on how pools operate are now "internal" it is a better starting point to go for this in the future. Closes #14662
108 lines
2.8 KiB
C
108 lines
2.8 KiB
C
/***************************************************************************
|
|
* _ _ ____ _
|
|
* Project ___| | | | _ \| |
|
|
* / __| | | | |_) | |
|
|
* | (__| |_| | _ <| |___
|
|
* \___|\___/|_| \_\_____|
|
|
*
|
|
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
*
|
|
* This software is licensed as described in the file COPYING, which
|
|
* you should have received as part of this distribution. The terms
|
|
* are also available at https://curl.se/docs/copyright.html.
|
|
*
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
* copies of the Software, and permit persons to whom the Software is
|
|
* furnished to do so, under the terms of the COPYING file.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
* SPDX-License-Identifier: curl
|
|
*
|
|
***************************************************************************/
|
|
#include "test.h"
|
|
#include "memdebug.h"
|
|
|
|
static const char *ldata_names[] = {
|
|
"NONE",
|
|
"SHARE",
|
|
"COOKIE",
|
|
"DNS",
|
|
"SESSION",
|
|
"CONNECT",
|
|
"PSL",
|
|
"HSTS",
|
|
"NULL",
|
|
};
|
|
|
|
static void my_lock(CURL *handle, curl_lock_data data,
|
|
curl_lock_access laccess, void *useptr)
|
|
{
|
|
(void)handle;
|
|
(void)data;
|
|
(void)laccess;
|
|
(void)useptr;
|
|
printf("-> Mutex lock %s\n", ldata_names[data]);
|
|
}
|
|
|
|
static void my_unlock(CURL *handle, curl_lock_data data, void *useptr)
|
|
{
|
|
(void)handle;
|
|
(void)data;
|
|
(void)useptr;
|
|
printf("<- Mutex unlock %s\n", ldata_names[data]);
|
|
}
|
|
|
|
/* test function */
|
|
CURLcode test(char *URL)
|
|
{
|
|
CURLcode res = CURLE_OK;
|
|
CURLSH *share = NULL;
|
|
int i;
|
|
|
|
global_init(CURL_GLOBAL_ALL);
|
|
|
|
share = curl_share_init();
|
|
if(!share) {
|
|
fprintf(stderr, "curl_share_init() failed\n");
|
|
goto test_cleanup;
|
|
}
|
|
|
|
curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT);
|
|
curl_share_setopt(share, CURLSHOPT_LOCKFUNC, my_lock);
|
|
curl_share_setopt(share, CURLSHOPT_UNLOCKFUNC, my_unlock);
|
|
|
|
/* Loop the transfer and cleanup the handle properly every lap. This will
|
|
still reuse connections since the pool is in the shared object! */
|
|
|
|
for(i = 0; i < 3; i++) {
|
|
CURL *curl = curl_easy_init();
|
|
if(curl) {
|
|
curl_easy_setopt(curl, CURLOPT_URL, URL);
|
|
|
|
/* use the share object */
|
|
curl_easy_setopt(curl, CURLOPT_SHARE, share);
|
|
|
|
/* Perform the request, res will get the return code */
|
|
res = curl_easy_perform(curl);
|
|
|
|
/* always cleanup */
|
|
curl_easy_cleanup(curl);
|
|
|
|
/* Check for errors */
|
|
if(res != CURLE_OK) {
|
|
fprintf(stderr, "curl_easy_perform() failed: %s\n",
|
|
curl_easy_strerror(res));
|
|
goto test_cleanup;
|
|
}
|
|
}
|
|
}
|
|
|
|
test_cleanup:
|
|
curl_share_cleanup(share);
|
|
curl_global_cleanup();
|
|
|
|
return res;
|
|
}
|