code: language cleanup in comments

Based on the standards and guidelines we use for our documentation.

 - expand contractions (they're => they are etc)
 - host name = > hostname
 - file name => filename
 - user name = username
 - man page => manpage
 - run-time => runtime
 - set-up => setup
 - back-end => backend
 - a HTTP => an HTTP
 - Two spaces after a period => one space after period

Closes #14073
This commit is contained in:
Daniel Stenberg 2024-07-01 16:47:21 +02:00
parent 9b683577e1
commit c074ba64a8
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
213 changed files with 1719 additions and 1715 deletions

View File

@ -53,6 +53,7 @@ file name\b:filename
file names\b:filenames
\buser name\b:username
\buser names\b:usernames
\bpass phrase:passphrase
didn't:did not
doesn't:does not
won't:will not

View File

@ -578,12 +578,12 @@ SPDX-License-Identifier: curl
## Certificates
In the HTTPS world, you use certificates to validate that you are the one
you claim to be, as an addition to normal passwords. Curl supports client-
side certificates. All certificates are locked with a pass phrase, which you
need to enter before the certificate can be used by curl. The pass phrase
can be specified on the command line or if not, entered interactively when
curl queries for it. Use a certificate with curl on an HTTPS server like:
In the HTTPS world, you use certificates to validate that you are the one you
claim to be, as an addition to normal passwords. Curl supports client- side
certificates. All certificates are locked with a passphrase, which you need
to enter before the certificate can be used by curl. The passphrase can be
specified on the command line or if not, entered interactively when curl
queries for it. Use a certificate with curl on an HTTPS server like:
curl --cert mycert.pem https://secure.example.com

View File

@ -35,9 +35,9 @@ CURLcode curl_easy_setopt(CURL *handle, CURLOPT_PROXY_KEYPASSWD, char *pwd);
This option is for connecting to an HTTPS proxy, not an HTTPS server.
Pass a pointer to a null-terminated string as parameter. It is used as the
password required to use the CURLOPT_PROXY_SSLKEY(3) private key. You
never need a pass phrase to load a certificate but you need one to load your
private key.
password required to use the CURLOPT_PROXY_SSLKEY(3) private key. You never
need a passphrase to load a certificate but you need one to load your private
key.
The application does not have to keep the string around after setting this
option.

View File

@ -8,7 +8,7 @@ SPDX-License-Identifier: curl
Public include files for libcurl, external users.
They're all placed in the curl subdirectory here for better fit in any kind of
They are all placed in the curl subdirectory here for better fit in any kind of
environment. You must include files from here using...
#include <curl/curl.h>

View File

@ -59,7 +59,7 @@
#endif
#include "curlver.h" /* libcurl version defines */
#include "system.h" /* determine things run-time */
#include "system.h" /* determine things runtime */
#include <stdio.h>
#include <limits.h>
@ -77,7 +77,7 @@
#if !(defined(_WINSOCKAPI_) || defined(_WINSOCK_H) || \
defined(__LWIP_OPT_H__) || defined(LWIP_HDR_OPT_H))
/* The check above prevents the winsock2 inclusion if winsock.h already was
included, since they can't co-exist without problems */
included, since they cannot co-exist without problems */
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
@ -358,13 +358,13 @@ typedef long (*curl_chunk_bgn_callback)(const void *transfer_info,
download of an individual chunk finished.
Note! After this callback was set then it have to be called FOR ALL chunks.
Even if downloading of this chunk was skipped in CHUNK_BGN_FUNC.
This is the reason why we don't need "transfer_info" parameter in this
This is the reason why we do not need "transfer_info" parameter in this
callback and we are not interested in "remains" parameter too. */
typedef long (*curl_chunk_end_callback)(void *ptr);
/* return codes for FNMATCHFUNCTION */
#define CURL_FNMATCHFUNC_MATCH 0 /* string corresponds to the pattern */
#define CURL_FNMATCHFUNC_NOMATCH 1 /* pattern doesn't match the string */
#define CURL_FNMATCHFUNC_NOMATCH 1 /* pattern does not match the string */
#define CURL_FNMATCHFUNC_FAIL 2 /* an error occurred */
/* callback type for wildcard downloading pattern matching. If the
@ -376,7 +376,7 @@ typedef int (*curl_fnmatch_callback)(void *ptr,
/* These are the return codes for the seek callbacks */
#define CURL_SEEKFUNC_OK 0
#define CURL_SEEKFUNC_FAIL 1 /* fail the entire transfer */
#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking can't be done, so
#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking cannot be done, so
libcurl might try other means instead */
typedef int (*curl_seek_callback)(void *instream,
curl_off_t offset,
@ -547,17 +547,17 @@ typedef enum {
CURLE_WRITE_ERROR, /* 23 */
CURLE_OBSOLETE24, /* 24 - NOT USED */
CURLE_UPLOAD_FAILED, /* 25 - failed upload "command" */
CURLE_READ_ERROR, /* 26 - couldn't open/read from file */
CURLE_READ_ERROR, /* 26 - could not open/read from file */
CURLE_OUT_OF_MEMORY, /* 27 */
CURLE_OPERATION_TIMEDOUT, /* 28 - the timeout time was reached */
CURLE_OBSOLETE29, /* 29 - NOT USED */
CURLE_FTP_PORT_FAILED, /* 30 - FTP PORT operation failed */
CURLE_FTP_COULDNT_USE_REST, /* 31 - the REST command failed */
CURLE_OBSOLETE32, /* 32 - NOT USED */
CURLE_RANGE_ERROR, /* 33 - RANGE "command" didn't work */
CURLE_RANGE_ERROR, /* 33 - RANGE "command" did not work */
CURLE_HTTP_POST_ERROR, /* 34 */
CURLE_SSL_CONNECT_ERROR, /* 35 - wrong when connecting with SSL */
CURLE_BAD_DOWNLOAD_RESUME, /* 36 - couldn't resume download */
CURLE_BAD_DOWNLOAD_RESUME, /* 36 - could not resume download */
CURLE_FILE_COULDNT_READ_FILE, /* 37 */
CURLE_LDAP_CANNOT_BIND, /* 38 */
CURLE_LDAP_SEARCH_FAILED, /* 39 */
@ -581,9 +581,9 @@ typedef enum {
CURLE_RECV_ERROR, /* 56 - failure in receiving network data */
CURLE_OBSOLETE57, /* 57 - NOT IN USE */
CURLE_SSL_CERTPROBLEM, /* 58 - problem with the local certificate */
CURLE_SSL_CIPHER, /* 59 - couldn't use specified cipher */
CURLE_SSL_CIPHER, /* 59 - could not use specified cipher */
CURLE_PEER_FAILED_VERIFICATION, /* 60 - peer's certificate or fingerprint
wasn't verified fine */
was not verified fine */
CURLE_BAD_CONTENT_ENCODING, /* 61 - Unrecognized/bad encoding */
CURLE_OBSOLETE62, /* 62 - NOT IN USE since 7.82.0 */
CURLE_FILESIZE_EXCEEDED, /* 63 - Maximum file size exceeded */
@ -612,7 +612,7 @@ typedef enum {
CURLE_SSL_SHUTDOWN_FAILED, /* 80 - Failed to shut down the SSL
connection */
CURLE_AGAIN, /* 81 - socket is not ready for send/recv,
wait till it's ready and try again (Added
wait till it is ready and try again (Added
in 7.18.2) */
CURLE_SSL_CRL_BADFILE, /* 82 - could not load CRL file, missing or
wrong format (Added in 7.19.0) */
@ -868,7 +868,7 @@ enum curl_khstat {
CURLKHSTAT_FINE_ADD_TO_FILE,
CURLKHSTAT_FINE,
CURLKHSTAT_REJECT, /* reject the connection, return an error */
CURLKHSTAT_DEFER, /* do not accept it, but we can't answer right now.
CURLKHSTAT_DEFER, /* do not accept it, but we cannot answer right now.
Causes a CURLE_PEER_FAILED_VERIFICATION error but the
connection will be left intact etc */
CURLKHSTAT_FINE_REPLACE, /* accept and replace the wrong key */
@ -1088,7 +1088,7 @@ typedef CURLSTScode (*curl_hstswrite_callback)(CURL *easy,
#define CURLOPT(na,t,nu) na = t + nu
#define CURLOPTDEPRECATED(na,t,nu,v,m) na CURL_DEPRECATED(v,m) = t + nu
/* CURLOPT aliases that make no run-time difference */
/* CURLOPT aliases that make no runtime difference */
/* 'char *' argument to a string with a trailing zero */
#define CURLOPTTYPE_STRINGPOINT CURLOPTTYPE_OBJECTPOINT
@ -1326,7 +1326,7 @@ typedef enum {
/* Set the krb4/5 security level, this also enables krb4/5 awareness. This
* is a string, 'clear', 'safe', 'confidential' or 'private'. If the string
* is set but doesn't match one of these, 'private' will be used. */
* is set but does not match one of these, 'private' will be used. */
CURLOPT(CURLOPT_KRBLEVEL, CURLOPTTYPE_STRINGPOINT, 63),
/* Set if we should verify the peer in ssl handshake, set 1 to verify. */
@ -1358,12 +1358,12 @@ typedef enum {
/* 73 = OBSOLETE */
/* Set to explicitly use a new connection for the upcoming transfer.
Do not use this unless you're absolutely sure of this, as it makes the
Do not use this unless you are absolutely sure of this, as it makes the
operation slower and is less friendly for the network. */
CURLOPT(CURLOPT_FRESH_CONNECT, CURLOPTTYPE_LONG, 74),
/* Set to explicitly forbid the upcoming transfer's connection to be reused
when done. Do not use this unless you're absolutely sure of this, as it
when done. Do not use this unless you are absolutely sure of this, as it
makes the operation slower and is less friendly for the network. */
CURLOPT(CURLOPT_FORBID_REUSE, CURLOPTTYPE_LONG, 75),
@ -1844,7 +1844,7 @@ typedef enum {
future libcurl release.
libcurl will ask for the compressed methods it knows of, and if that
isn't any, it will not ask for transfer-encoding at all even if this
is not any, it will not ask for transfer-encoding at all even if this
option is set to 1.
*/
@ -1946,7 +1946,7 @@ typedef enum {
/* Service Name */
CURLOPT(CURLOPT_SERVICE_NAME, CURLOPTTYPE_STRINGPOINT, 236),
/* Wait/don't wait for pipe/mutex to clarify */
/* Wait/do not wait for pipe/mutex to clarify */
CURLOPT(CURLOPT_PIPEWAIT, CURLOPTTYPE_LONG, 237),
/* Set the protocol used when curl is given a URL without a protocol */
@ -2274,9 +2274,9 @@ typedef enum {
/* These enums are for use with the CURLOPT_HTTP_VERSION option. */
enum {
CURL_HTTP_VERSION_NONE, /* setting this means we don't care, and that we'd
like the library to choose the best possible
for us! */
CURL_HTTP_VERSION_NONE, /* setting this means we do not care, and that we
would like the library to choose the best
possible for us! */
CURL_HTTP_VERSION_1_0, /* please use HTTP 1.0 in the request */
CURL_HTTP_VERSION_1_1, /* please use HTTP 1.1 in the request */
CURL_HTTP_VERSION_2_0, /* please use HTTP 2 in the request */
@ -2838,7 +2838,7 @@ CURL_EXTERN time_t curl_getdate(const char *p, const time_t *unused);
for with CURLOPT_CERTINFO / CURLINFO_CERTINFO */
struct curl_certinfo {
int num_of_certs; /* number of certificates with information */
struct curl_slist **certinfo; /* for each index in this array, there's a
struct curl_slist **certinfo; /* for each index in this array, there is a
linked list with textual information for a
certificate in the format "name:content".
eg "Subject:foo", "Issuer:bar", etc. */
@ -3029,7 +3029,7 @@ typedef enum {
} CURLSHcode;
typedef enum {
CURLSHOPT_NONE, /* don't use */
CURLSHOPT_NONE, /* do not use */
CURLSHOPT_SHARE, /* specify a data type to share */
CURLSHOPT_UNSHARE, /* specify which data type to stop sharing */
CURLSHOPT_LOCKFUNC, /* pass in a 'curl_lock_function' pointer */
@ -3238,7 +3238,7 @@ CURL_EXTERN CURLcode curl_easy_pause(CURL *handle, int bitmask);
#include "websockets.h"
#include "mprintf.h"
/* the typechecker doesn't work in C++ (yet) */
/* the typechecker does not work in C++ (yet) */
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && \
((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && \
!defined(__cplusplus) && !defined(CURL_DISABLE_TYPECHECK)

View File

@ -24,7 +24,7 @@
*
***************************************************************************/
/*
This is an "external" header file. Don't give away any internals here!
This is an "external" header file. Do not give away any internals here!
GOALS
@ -66,7 +66,7 @@ typedef enum {
CURLM_OK,
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you are in deep sh*t */
CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_BAD_SOCKET, /* the passed in socket argument did not match */
CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */
@ -109,7 +109,7 @@ struct CURLMsg {
typedef struct CURLMsg CURLMsg;
/* Based on poll(2) structure and values.
* We don't use pollfd and POLL* constants explicitly
* We do not use pollfd and POLL* constants explicitly
* to cover platforms without poll(). */
#define CURL_WAIT_POLLIN 0x0001
#define CURL_WAIT_POLLPRI 0x0002
@ -205,7 +205,7 @@ CURL_EXTERN CURLMcode curl_multi_wakeup(CURLM *multi_handle);
/*
* Name: curl_multi_perform()
*
* Desc: When the app thinks there's data available for curl it calls this
* Desc: When the app thinks there is data available for curl it calls this
* function to read/write whatever there is right now. This returns
* as soon as the reads and writes are done. This function does not
* require that there actually is data available for reading or that
@ -236,7 +236,7 @@ CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
/*
* Name: curl_multi_info_read()
*
* Desc: Ask the multi handle if there's any messages/informationals from
* Desc: Ask the multi handle if there is any messages/informationals from
* the individual transfers. Messages include informationals such as
* error code from the transfer or just the fact that a transfer is
* completed. More details on these should be written down as well.
@ -253,7 +253,7 @@ CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
* we will provide the particular "transfer handle" in that struct
* and that should/could/would be used in subsequent
* curl_easy_getinfo() calls (or similar). The point being that we
* must never expose complex structs to applications, as then we'll
* must never expose complex structs to applications, as then we will
* undoubtably get backwards compatibility problems in the future.
*
* Returns: A pointer to a filled-in struct, or NULL if it failed or ran out

View File

@ -402,7 +402,7 @@
# define CURL_PULL_SYS_SOCKET_H 1
#else
/* generic "safe guess" on old 32 bit style */
/* generic "safe guess" on old 32-bit style */
# define CURL_TYPEOF_CURL_OFF_T long
# define CURL_FORMAT_CURL_OFF_T "ld"
# define CURL_FORMAT_CURL_OFF_TU "lu"

View File

@ -37,8 +37,8 @@
* order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x
* when compiling with -Wlogical-op.
*
* To add an option that uses the same type as an existing option, you'll just
* need to extend the appropriate _curl_*_option macro
* To add an option that uses the same type as an existing option, you will
* just need to extend the appropriate _curl_*_option macro
*/
#define curl_easy_setopt(handle, option, value) \
__extension__({ \
@ -245,7 +245,7 @@ CURLWARNING(_curl_easy_getinfo_err_curl_off_t,
/* To add a new option to one of the groups, just add
* (option) == CURLOPT_SOMETHING
* to the or-expression. If the option takes a long or curl_off_t, you don't
* to the or-expression. If the option takes a long or curl_off_t, you do not
* have to do anything
*/
@ -678,7 +678,7 @@ typedef CURLcode (*_curl_ssl_ctx_callback4)(CURL *, const void *,
const void *);
#ifdef HEADER_SSL_H
/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX
* this will of course break if we're included before OpenSSL headers...
* this will of course break if we are included before OpenSSL headers...
*/
typedef CURLcode (*_curl_ssl_ctx_callback5)(CURL *, SSL_CTX *, void *);
typedef CURLcode (*_curl_ssl_ctx_callback6)(CURL *, SSL_CTX *, const void *);

View File

@ -102,7 +102,7 @@ typedef enum {
#define CURLU_GET_EMPTY (1<<14) /* allow empty queries and fragments
when extracting the URL or the
components */
#define CURLU_NO_GUESS_SCHEME (1<<15) /* for get, don't accept a guess */
#define CURLU_NO_GUESS_SCHEME (1<<15) /* for get, do not accept a guess */
typedef struct Curl_URL CURLU;

View File

@ -430,7 +430,7 @@ static bool hostcompare(const char *host, const char *check)
if(hlen && (host[hlen - 1] == '.'))
hlen--;
if(hlen != clen)
/* they can't match if they have different lengths */
/* they cannot match if they have different lengths */
return FALSE;
return strncasecompare(host, check, hlen);
}
@ -477,7 +477,7 @@ static time_t altsvc_debugtime(void *unused)
* Curl_altsvc_parse() takes an incoming alt-svc response header and stores
* the data correctly in the cache.
*
* 'value' points to the header *value*. That's contents to the right of the
* 'value' points to the header *value*. That is contents to the right of the
* header name.
*
* Currently this function rejects invalid data without returning an error.
@ -651,7 +651,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
}
else
break;
/* after the double quote there can be a comma if there's another
/* after the double quote there can be a comma if there is another
string or a semicolon if no more */
if(*p == ',') {
/* comma means another alternative is presented */

View File

@ -117,7 +117,7 @@ void Curl_amiga_cleanup(void)
#ifdef CURLRES_AMIGA
/*
* Because we need to handle the different cases in hostip4.c at run-time,
* Because we need to handle the different cases in hostip4.c at runtime,
* not at compile-time, based on what was detected in Curl_amiga_init(),
* we replace it completely with our own as to not complicate the baseline
* code. Assumes malloc/calloc/free are thread safe because Curl_he2ai()

View File

@ -77,7 +77,7 @@ static const char * const telnetoptions[]=
#define CURL_GA 249 /* Go Ahead, reverse the line */
#define CURL_SB 250 /* SuBnegotiation */
#define CURL_WILL 251 /* Our side WILL use this option */
#define CURL_WONT 252 /* Our side WON'T use this option */
#define CURL_WONT 252 /* Our side will not use this option */
#define CURL_DO 253 /* DO use this option! */
#define CURL_DONT 254 /* DON'T use this option! */
#define CURL_IAC 255 /* Interpret As Command */

View File

@ -65,7 +65,7 @@
# define CARES_STATICLIB
#endif
#include <ares.h>
#include <ares_version.h> /* really old c-ares didn't include this by
#include <ares_version.h> /* really old c-ares did not include this by
itself */
#if ARES_VERSION >= 0x010500
@ -113,7 +113,7 @@ struct thread_data {
obtaining a "definitive" one. For old c-ares without getaddrinfo.
This is intended to equal the c-ares default timeout. cURL always uses that
default value. Unfortunately, c-ares doesn't expose its default timeout in
default value. Unfortunately, c-ares does not expose its default timeout in
its API, but it is officially documented as 5 seconds.
See query_completed_cb() for an explanation of how this is used.
@ -250,12 +250,12 @@ void Curl_resolver_cancel(struct Curl_easy *data)
}
/*
* We're equivalent to Curl_resolver_cancel() for the c-ares resolver. We
* We are equivalent to Curl_resolver_cancel() for the c-ares resolver. We
* never block.
*/
void Curl_resolver_kill(struct Curl_easy *data)
{
/* We don't need to check the resolver state because we can be called safely
/* We do not need to check the resolver state because we can be called safely
at any time and we always do the same thing. */
Curl_resolver_cancel(data);
}
@ -280,7 +280,7 @@ static void destroy_async_data(struct Curl_async *async)
/*
* Curl_resolver_getsock() is called when someone from the outside world
* (using curl_multi_fdset()) wants to get our fd_set setup and we're talking
* (using curl_multi_fdset()) wants to get our fd_set setup and we are talking
* with ares. The caller must make sure that this function is only called when
* we have a working ares channel.
*
@ -359,7 +359,7 @@ static int waitperform(struct Curl_easy *data, timediff_t timeout_ms)
if(!nfds)
/* Call ares_process() unconditionally here, even if we simply timed out
above, as otherwise the ares name resolve won't timeout! */
above, as otherwise the ares name resolve will not timeout! */
ares_process_fd((ares_channel)data->state.async.resolver, ARES_SOCKET_BAD,
ARES_SOCKET_BAD);
else {
@ -394,8 +394,8 @@ CURLcode Curl_resolver_is_resolved(struct Curl_easy *data,
return CURLE_UNRECOVERABLE_POLL;
#ifndef HAVE_CARES_GETADDRINFO
/* Now that we've checked for any last minute results above, see if there are
any responses still pending when the EXPIRE_HAPPY_EYEBALLS_DNS timer
/* Now that we have checked for any last minute results above, see if there
are any responses still pending when the EXPIRE_HAPPY_EYEBALLS_DNS timer
expires. */
if(res
&& res->num_pending
@ -523,7 +523,7 @@ CURLcode Curl_resolver_wait_resolv(struct Curl_easy *data,
*entry = data->state.async.dns;
if(result)
/* close the connection, since we can't return failure here without
/* close the connection, since we cannot return failure here without
cleaning up this connection properly. */
connclose(data->conn, "c-ares resolve failed");
@ -603,46 +603,46 @@ static void query_completed_cb(void *arg, /* (struct connectdata *) */
/* If there are responses still pending, we presume they must be the
complementary IPv4 or IPv6 lookups that we started in parallel in
Curl_resolver_getaddrinfo() (for Happy Eyeballs). If we've got a
Curl_resolver_getaddrinfo() (for Happy Eyeballs). If we have got a
"definitive" response from one of a set of parallel queries, we need to
think about how long we're willing to wait for more responses. */
think about how long we are willing to wait for more responses. */
if(res->num_pending
/* Only these c-ares status values count as "definitive" for these
purposes. For example, ARES_ENODATA is what we expect when there is
no IPv6 entry for a domain name, and that's not a reason to get more
no IPv6 entry for a domain name, and that is not a reason to get more
aggressive in our timeouts for the other response. Other errors are
either a result of bad input (which should affect all parallel
requests), local or network conditions, non-definitive server
responses, or us cancelling the request. */
&& (status == ARES_SUCCESS || status == ARES_ENOTFOUND)) {
/* Right now, there can only be up to two parallel queries, so don't
/* Right now, there can only be up to two parallel queries, so do not
bother handling any other cases. */
DEBUGASSERT(res->num_pending == 1);
/* It's possible that one of these parallel queries could succeed
quickly, but the other could always fail or timeout (when we're
/* it is possible that one of these parallel queries could succeed
quickly, but the other could always fail or timeout (when we are
talking to a pool of DNS servers that can only successfully resolve
IPv4 address, for example).
It's also possible that the other request could always just take
it is also possible that the other request could always just take
longer because it needs more time or only the second DNS server can
fulfill it successfully. But, to align with the philosophy of Happy
Eyeballs, we don't want to wait _too_ long or users will think
requests are slow when IPv6 lookups don't actually work (but IPv4 ones
do).
Eyeballs, we do not want to wait _too_ long or users will think
requests are slow when IPv6 lookups do not actually work (but IPv4
ones do).
So, now that we have a usable answer (some IPv4 addresses, some IPv6
addresses, or "no such domain"), we start a timeout for the remaining
pending responses. Even though it is typical that this resolved
request came back quickly, that needn't be the case. It might be that
this completing request didn't get a result from the first DNS server
or even the first round of the whole DNS server pool. So it could
already be quite some time after we issued the DNS queries in the
first place. Without modifying c-ares, we can't know exactly where in
its retry cycle we are. We could guess based on how much time has
gone by, but it doesn't really matter. Happy Eyeballs tells us that,
given usable information in hand, we simply don't want to wait "too
much longer" after we get a result.
this completing request did not get a result from the first DNS
server or even the first round of the whole DNS server pool. So it
could already be quite some time after we issued the DNS queries in
the first place. Without modifying c-ares, we cannot know exactly
where in its retry cycle we are. We could guess based on how much
time has gone by, but it does not really matter. Happy Eyeballs tells
us that, given usable information in hand, we simply do not want to
wait "too much longer" after we get a result.
We simply wait an additional amount of time equal to the default
c-ares query timeout. That is enough time for a typical parallel
@ -653,7 +653,7 @@ static void query_completed_cb(void *arg, /* (struct connectdata *) */
request, which seems bearable. The downside is that c-ares might race
with us to issue one more retry just before we give up, but it seems
better to "waste" that request instead of trying to guess the perfect
timeout to prevent it. After all, we don't even know where in the
timeout to prevent it. After all, we do not even know where in the
c-ares retry cycle each request is.
*/
res->happy_eyeballs_dns_time = Curl_now();
@ -849,7 +849,7 @@ CURLcode Curl_set_dns_servers(struct Curl_easy *data,
/* If server is NULL or empty, this would purge all DNS servers
* from ares library, which will cause any and all queries to fail.
* So, just return OK if none are configured and don't actually make
* So, just return OK if none are configured and do not actually make
* any changes to c-ares. This lets c-ares use its defaults, which
* it gets from the OS (for instance from /etc/resolv.conf on Linux).
*/

View File

@ -286,7 +286,7 @@ static CURLcode getaddrinfo_complete(struct Curl_easy *data)
result = Curl_addrinfo_callback(data, tsd->sock_error, tsd->res);
/* The tsd->res structure has been copied to async.dns and perhaps the DNS
cache. Set our copy to NULL so destroy_thread_sync_data doesn't free it.
cache. Set our copy to NULL so destroy_thread_sync_data does not free it.
*/
tsd->res = NULL;
@ -790,7 +790,7 @@ void Curl_resolver_kill(struct Curl_easy *data)
{
struct thread_data *td = data->state.async.tdata;
/* If we're still resolving, we must wait for the threads to fully clean up,
/* If we are still resolving, we must wait for the threads to fully clean up,
unfortunately. Otherwise, we can simply cancel to clean up any resolver
data. */
#ifdef _WIN32

View File

@ -161,7 +161,7 @@ struct Curl_addrinfo *Curl_resolver_getaddrinfo(struct Curl_easy *data,
int *waitp);
#ifndef CURLRES_ASYNCH
/* convert these functions if an asynch resolver isn't used */
/* convert these functions if an asynch resolver is not used */
#define Curl_resolver_cancel(x) Curl_nop_stmt
#define Curl_resolver_kill(x) Curl_nop_stmt
#define Curl_resolver_is_resolved(x,y) CURLE_COULDNT_RESOLVE_HOST

View File

@ -48,7 +48,7 @@ void Curl_bufref_init(struct bufref *br)
}
/*
* Free the buffer and re-init the necessary fields. It doesn't touch the
* Free the buffer and re-init the necessary fields. It does not touch the
* 'signature' field and thus this buffer reference can be reused.
*/

View File

@ -324,7 +324,7 @@ static CURLcode empty_header(struct Curl_easy *data)
result = hyper_each_header(data, NULL, 0, NULL, 0) ?
CURLE_WRITE_ERROR : CURLE_OK;
if(result)
failf(data, "hyperstream: couldn't pass blank header");
failf(data, "hyperstream: could not pass blank header");
/* Hyper does chunked decoding itself. If it was added during
* response header processing, remove it again. */
Curl_cwriter_remove_by_name(data, "chunked");
@ -421,7 +421,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
data->req.done = TRUE;
infof(data, "hyperstream is done");
if(!k->bodywritten) {
/* hyper doesn't always call the body write callback */
/* hyper does not always call the body write callback */
result = Curl_http_firstwrite(data);
}
break;
@ -439,7 +439,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
*didwhat = KEEP_RECV;
if(!resp) {
failf(data, "hyperstream: couldn't get response");
failf(data, "hyperstream: could not get response");
return CURLE_RECV_ERROR;
}
@ -462,7 +462,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
headers = hyper_response_headers(resp);
if(!headers) {
failf(data, "hyperstream: couldn't get response headers");
failf(data, "hyperstream: could not get response headers");
result = CURLE_RECV_ERROR;
break;
}
@ -505,7 +505,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
resp_body = hyper_response_body(resp);
if(!resp_body) {
failf(data, "hyperstream: couldn't get response body");
failf(data, "hyperstream: could not get response body");
result = CURLE_RECV_ERROR;
break;
}
@ -669,7 +669,7 @@ static int uploadstreamed(void *userdata, hyper_context *ctx,
goto out;
}
/* increasing the writebytecount here is a little premature but we
don't know exactly when the body is sent */
do not know exactly when the body is sent */
data->req.writebytecount += fillcount;
Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
rc = HYPER_POLL_READY;
@ -772,7 +772,7 @@ static void http1xx_cb(void *arg, struct hyper_response *resp)
if(!result) {
headers = hyper_response_headers(resp);
if(!headers) {
failf(data, "hyperstream: couldn't get 1xx response headers");
failf(data, "hyperstream: could not get 1xx response headers");
result = CURLE_RECV_ERROR;
}
}

View File

@ -181,8 +181,8 @@ static void h1_tunnel_go_state(struct Curl_cfilter *cf,
data->info.httpcode = 0; /* clear it as it might've been used for the
proxy */
/* If a proxy-authorization header was used for the proxy, then we should
make sure that it isn't accidentally used for the document request
after we've connected. So let's free and clear it here. */
make sure that it is not accidentally used for the document request
after we have connected. So let's free and clear it here. */
Curl_safefree(data->state.aptr.proxyuserpwd);
#ifdef USE_HYPER
data->state.hconnect = FALSE;
@ -221,8 +221,8 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf,
int http_minor;
CURLcode result;
/* This only happens if we've looped here due to authentication
reasons, and we don't really use the newly cloned URL here
/* This only happens if we have looped here due to authentication
reasons, and we do not really use the newly cloned URL here
then. Just free() it. */
Curl_safefree(data->req.newurl);
@ -421,7 +421,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(ts->cl) {
/* A Content-Length based body: simply count down the counter
and make sure to break out of the loop when we're done! */
and make sure to break out of the loop when we are done! */
ts->cl--;
if(ts->cl <= 0) {
ts->keepon = KEEPON_DONE;
@ -439,7 +439,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(result)
return result;
if(Curl_httpchunk_is_done(data, &ts->ch)) {
/* we're done reading chunks! */
/* we are done reading chunks! */
infof(data, "chunk reading DONE");
ts->keepon = KEEPON_DONE;
}
@ -474,7 +474,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(result)
return result;
/* Newlines are CRLF, so the CR is ignored as the line isn't
/* Newlines are CRLF, so the CR is ignored as the line is not
really terminated until the LF comes. Treat a following CR
as end-of-headers as well.*/
@ -497,7 +497,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
}
else {
/* without content-length or chunked encoding, we
can't keep the connection alive since the close is
cannot keep the connection alive since the close is
the end signal so we bail out at once instead */
CURL_TRC_CF(data, cf, "CONNECT: no content-length or chunked");
ts->keepon = KEEPON_DONE;
@ -517,7 +517,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
return result;
Curl_dyn_reset(&ts->rcvbuf);
} /* while there's buffer left and loop is requested */
} /* while there is buffer left and loop is requested */
if(error)
result = CURLE_RECV_ERROR;
@ -665,8 +665,8 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf,
goto error;
}
/* This only happens if we've looped here due to authentication
reasons, and we don't really use the newly cloned URL here
/* This only happens if we have looped here due to authentication
reasons, and we do not really use the newly cloned URL here
then. Just free() it. */
Curl_safefree(data->req.newurl);
@ -954,7 +954,7 @@ static CURLcode H1_CONNECT(struct Curl_cfilter *cf,
DEBUGASSERT(ts->tunnel_state == H1_TUNNEL_RESPONSE);
if(data->info.httpproxycode/100 != 2) {
/* a non-2xx response and we have no next url to try. */
/* a non-2xx response and we have no next URL to try. */
Curl_safefree(data->req.newurl);
/* failure, close this connection to avoid reuse */
streamclose(conn, "proxy CONNECT failure");
@ -1033,9 +1033,9 @@ static void cf_h1_proxy_adjust_pollset(struct Curl_cfilter *cf,
* and not waiting on something, we are tunneling. */
curl_socket_t sock = Curl_conn_cf_get_socket(cf, data);
if(ts) {
/* when we've sent a CONNECT to a proxy, we should rather either
/* when we have sent a CONNECT to a proxy, we should rather either
wait for the socket to become readable to be able to get the
response headers or if we're still sending the request, wait
response headers or if we are still sending the request, wait
for write. */
if(tunnel_want_send(ts))
Curl_pollset_set_out_only(data, ps, sock);

View File

@ -162,8 +162,8 @@ static void h2_tunnel_go_state(struct Curl_cfilter *cf,
CURL_TRC_CF(data, cf, "[%d] new tunnel state 'failed'", ts->stream_id);
ts->state = new_state;
/* If a proxy-authorization header was used for the proxy, then we should
make sure that it isn't accidentally used for the document request
after we've connected. So let's free and clear it here. */
make sure that it is not accidentally used for the document request
after we have connected. So let's free and clear it here. */
Curl_safefree(data->state.aptr.proxyuserpwd);
break;
}
@ -1266,7 +1266,7 @@ static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
if(ctx->tunnel.error == NGHTTP2_REFUSED_STREAM) {
CURL_TRC_CF(data, cf, "[%d] REFUSED_STREAM, try again on a new "
"connection", ctx->tunnel.stream_id);
connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
connclose(cf->conn, "REFUSED_STREAM"); /* do not use this anymore */
*err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
return -1;
}
@ -1359,7 +1359,7 @@ static ssize_t cf_h2_proxy_recv(struct Curl_cfilter *cf,
result = proxy_h2_progress_egress(cf, data);
if(result == CURLE_AGAIN) {
/* pending data to send, need to be called again. Ideally, we'd
/* pending data to send, need to be called again. Ideally, we would
* monitor the socket for POLLOUT, but we might not be in SENDING
* transfer state any longer and are unable to make this happen.
*/
@ -1542,8 +1542,8 @@ static bool proxy_h2_connisalive(struct Curl_cfilter *cf,
return FALSE;
if(*input_pending) {
/* This happens before we've sent off a request and the connection is
not in use by any other transfer, there shouldn't be any data here,
/* This happens before we have sent off a request and the connection is
not in use by any other transfer, there should not be any data here,
only "protocol frames" */
CURLcode result;
ssize_t nread = -1;

View File

@ -555,7 +555,7 @@ CURLcode Curl_cf_https_setup(struct Curl_easy *data,
if(data->state.httpwant == CURL_HTTP_VERSION_3ONLY) {
result = Curl_conn_may_http3(data, conn);
if(result) /* can't do it */
if(result) /* cannot do it */
goto out;
try_h3 = TRUE;
try_h21 = FALSE;

View File

@ -394,7 +394,7 @@ CURLcode Curl_socket_open(struct Curl_easy *data,
struct Curl_sockaddr_ex dummy;
if(!addr)
/* if the caller doesn't want info back, use a local temp copy */
/* if the caller does not want info back, use a local temp copy */
addr = &dummy;
Curl_sock_assign_addr(addr, ai, transport);
@ -611,7 +611,7 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
*/
if(setsockopt(sockfd, SOL_SOCKET, SO_BINDTODEVICE,
iface, (curl_socklen_t)strlen(iface) + 1) == 0) {
/* This is often "errno 1, error: Operation not permitted" if you're
/* This is often "errno 1, error: Operation not permitted" if you are
* not running as root or another suitable privileged user. If it
* succeeds it means the parameter was a valid interface and not an IP
* address. Return immediately.
@ -818,8 +818,8 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
* Gisle Vanem could reproduce the former problems with this function, but
* could avoid them by adding this SleepEx() call below:
*
* "I don't have Rational Quantify, but the hint from his post was
* ntdll::NtRemoveIoCompletion(). So I'd assume the SleepEx (or maybe
* "I do not have Rational Quantify, but the hint from his post was
* ntdll::NtRemoveIoCompletion(). I would assume the SleepEx (or maybe
* just Sleep(0) would be enough?) would release whatever
* mutex/critical-section the ntdll call is waiting on.
*
@ -837,14 +837,14 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
if(0 != getsockopt(sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &errSize))
err = SOCKERRNO;
#ifdef _WIN32_WCE
/* Old WinCE versions don't support SO_ERROR */
/* Old WinCE versions do not support SO_ERROR */
if(WSAENOPROTOOPT == err) {
SET_SOCKERRNO(0);
err = 0;
}
#endif
#if defined(EBADIOCTL) && defined(__minix)
/* Minix 3.1.x doesn't support getsockopt on UDP sockets */
/* Minix 3.1.x does not support getsockopt on UDP sockets */
if(EBADIOCTL == err) {
SET_SOCKERRNO(0);
err = 0;
@ -854,7 +854,7 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
/* we are connected, awesome! */
rc = TRUE;
else
/* This wasn't a successful connect */
/* This was not a successful connect */
rc = FALSE;
if(error)
*error = err;
@ -1153,8 +1153,8 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf,
DEBUGASSERT(ctx->sock == CURL_SOCKET_BAD);
ctx->started_at = Curl_now();
#ifdef SOCK_NONBLOCK
/* Don't tuck SOCK_NONBLOCK into socktype when opensocket callback is set
* because we wouldn't know how socketype is about to be used in the
/* Do not tuck SOCK_NONBLOCK into socktype when opensocket callback is set
* because we would not know how socketype is about to be used in the
* callback, SOCK_NONBLOCK might get factored out before calling socket().
*/
if(!data->set.fopensocket)
@ -1878,7 +1878,7 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf,
/* Currently, cf->ctx->sock is always non-blocking because the only
* caller to cf_udp_setup_quic() is cf_udp_connect() that passes the
* non-blocking socket created by cf_socket_open() to it. Thus, we
* don't need to call curlx_nonblock() in cf_udp_setup_quic() anymore.
* do not need to call curlx_nonblock() in cf_udp_setup_quic() anymore.
*/
switch(ctx->addr.family) {
#if defined(__linux__) && defined(IP_MTU_DISCOVER)

View File

@ -85,10 +85,10 @@ struct easy_pollset;
* the pollset. Filters, whose filter "below" is not connected, should
* also do no adjustments.
*
* Examples: a TLS handshake, while ongoing, might remove POLL_IN
* when it needs to write, or vice versa. A HTTP/2 filter might remove
* POLL_OUT when a stream window is exhausted and a WINDOW_UPDATE needs
* to be received first and add instead POLL_IN.
* Examples: a TLS handshake, while ongoing, might remove POLL_IN when it
* needs to write, or vice versa. An HTTP/2 filter might remove POLL_OUT when
* a stream window is exhausted and a WINDOW_UPDATE needs to be received first
* and add instead POLL_IN.
*
* @param cf the filter to ask
* @param data the easy handle the pollset is about

View File

@ -657,7 +657,7 @@ static void connc_discard_conn(struct conncache *connc,
* handler belonging to the connection. Protocols like 'file:' rely on
* being invoked to clean up their allocations in the easy handle.
* When a connection comes from the cache, the transfer is no longer
* there and we use the cache's own closure handle.
* there and we use the cache is own closure handle.
*/
struct Curl_easy *data = last_data? last_data : connc->closure_handle;
bool done = FALSE;
@ -667,7 +667,7 @@ static void connc_discard_conn(struct conncache *connc,
DEBUGASSERT(!conn->bundle);
/*
* If this connection isn't marked to force-close, leave it open if there
* If this connection is not marked to force-close, leave it open if there
* are other users of it
*/
if(CONN_INUSE(conn) && !aborted) {
@ -945,7 +945,7 @@ void Curl_conncache_multi_perform(struct Curl_multi *multi)
* disassociated from an easy handle.
*
* This function MUST NOT reset state in the Curl_easy struct if that
* isn't strictly bound to the life-time of *this* particular connection.
* is not strictly bound to the life-time of *this* particular connection.
*
*/
static void connc_disconnect(struct Curl_easy *data,

View File

@ -137,7 +137,7 @@ void Curl_conncache_print(struct conncache *connc);
* Tear down the connection. If `aborted` is FALSE, the connection
* will be shut down first before discarding. If the shutdown
* is not immediately complete, the connection
* will be placed into the cache's shutdown queue.
* will be placed into the cache is shutdown queue.
*/
void Curl_conncache_disconnect(struct Curl_easy *data,
struct connectdata *conn,

View File

@ -90,7 +90,7 @@
/*
* Curl_timeleft() returns the amount of milliseconds left allowed for the
* transfer/connection. If the value is 0, there's no timeout (ie there's
* transfer/connection. If the value is 0, there is no timeout (ie there is
* infinite time left). If the value is negative, the timeout time has already
* elapsed.
* @param data the transfer to check on
@ -360,7 +360,7 @@ void Curl_conncontrol(struct connectdata *conn,
#endif
)
{
/* close if a connection, or a stream that isn't multiplexed. */
/* close if a connection, or a stream that is not multiplexed. */
/* This function will be called both before and after this connection is
associated with a transfer. */
bool closeit, is_multiplex;
@ -508,7 +508,7 @@ static void baller_initiate(struct Curl_cfilter *cf,
CURLcode result;
/* Don't close a previous cfilter yet to ensure that the next IP's
/* Do not close a previous cfilter yet to ensure that the next IP's
socket gets a different file descriptor, which can prevent bugs when
the curl_multi_socket_action interface is used with certain select()
replacements such as kqueue. */
@ -788,7 +788,7 @@ evaluate:
}
/*
* Connect to the given host with timeout, proxy or remote doesn't matter.
* Connect to the given host with timeout, proxy or remote does not matter.
* There might be more than one IP address to try out.
*/
static CURLcode start_connect(struct Curl_cfilter *cf,
@ -1001,7 +1001,7 @@ static CURLcode cf_he_connect(struct Curl_cfilter *cf,
CF_CTRL_CONN_INFO_UPDATE, 0, NULL);
if(cf->conn->handler->protocol & PROTO_FAMILY_SSH)
Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
Curl_pgrsTime(data, TIMER_APPCONNECT); /* we are connected already */
Curl_verboseconnect(data, cf->conn, cf->sockindex);
data->info.numconnects++; /* to track the # of connections made */
}

View File

@ -32,7 +32,7 @@
struct Curl_dns_entry;
struct ip_quadruple;
/* generic function that returns how much time there's left to run, according
/* generic function that returns how much time there is left to run, according
to the timeouts set */
timediff_t Curl_timeleft(struct Curl_easy *data,
struct curltime *nowp,
@ -45,7 +45,7 @@ timediff_t Curl_timeleft(struct Curl_easy *data,
void Curl_shutdown_start(struct Curl_easy *data, int sockindex,
struct curltime *nowp);
/* return how much time there's left to shutdown the connection at
/* return how much time there is left to shutdown the connection at
* sockindex. */
timediff_t Curl_shutdown_timeleft(struct connectdata *conn, int sockindex,
struct curltime *nowp);

View File

@ -192,7 +192,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
zp->zlib_init != ZLIB_GZIP_INFLATING)
return exit_zlib(data, z, &zp->zlib_init, CURLE_WRITE_ERROR);
/* Dynamically allocate a buffer for decompression because it's uncommonly
/* Dynamically allocate a buffer for decompression because it is uncommonly
large to hold on the stack */
decomp = malloc(DSIZ);
if(!decomp)
@ -246,7 +246,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
to fix and continue anyway */
if(zp->zlib_init == ZLIB_INIT) {
/* Do not use inflateReset2(): only available since zlib 1.2.3.4. */
(void) inflateEnd(z); /* don't care about the return code */
(void) inflateEnd(z); /* do not care about the return code */
if(inflateInit2(z, -MAX_WBITS) == Z_OK) {
z->next_in = orig_in;
z->avail_in = nread;
@ -266,7 +266,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
}
free(decomp);
/* We're about to leave this call so the `nread' data bytes won't be seen
/* We are about to leave this call so the `nread' data bytes will not be seen
again. If we are in a state that would wrongly allow restart in raw mode
at the next call, assume output has already started. */
if(nread && zp->zlib_init == ZLIB_INIT)
@ -388,7 +388,7 @@ static gzip_status check_gzip_header(unsigned char const *data, ssize_t len,
flags = data[3];
if(method != Z_DEFLATED || (flags & RESERVED) != 0) {
/* Can't handle this compression method or unknown flag */
/* cannot handle this compression method or unknown flag */
return GZIP_BAD;
}
@ -474,10 +474,10 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
return exit_zlib(data, z, &zp->zlib_init, CURLE_WRITE_ERROR);
#else
/* This next mess is to get around the potential case where there isn't
/* This next mess is to get around the potential case where there is not
* enough data passed in to skip over the gzip header. If that happens, we
* malloc a block and copy what we have then wait for the next call. If
* there still isn't enough (this is definitely a worst-case scenario), we
* there still is not enough (this is definitely a worst-case scenario), we
* make the block bigger, copy the next part in and keep waiting.
*
* This is only required with zlib versions < 1.2.0.4 as newer versions
@ -499,11 +499,11 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
break;
case GZIP_UNDERFLOW:
/* We need more data so we can find the end of the gzip header. It's
/* We need more data so we can find the end of the gzip header. it is
* possible that the memory block we malloc here will never be freed if
* the transfer abruptly aborts after this point. Since it's unlikely
* the transfer abruptly aborts after this point. Since it is unlikely
* that circumstances will be right for this code path to be followed in
* the first place, and it's even more unlikely for a transfer to fail
* the first place, and it is even more unlikely for a transfer to fail
* immediately afterwards, it should seldom be a problem.
*/
z->avail_in = (uInt) nbytes;
@ -513,7 +513,7 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
}
memcpy(z->next_in, buf, z->avail_in);
zp->zlib_init = ZLIB_GZIP_HEADER; /* Need more gzip header data state */
/* We don't have any data to inflate yet */
/* We do not have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
@ -540,14 +540,14 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
/* Don't point into the malloced block since we just freed it */
/* Do not point into the malloced block since we just freed it */
z->next_in = (Bytef *) buf + hlen + nbytes - z->avail_in;
z->avail_in = z->avail_in - (uInt)hlen;
zp->zlib_init = ZLIB_GZIP_INFLATING; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We still don't have any data to inflate! */
/* We still do not have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
@ -572,11 +572,11 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
}
if(z->avail_in == 0) {
/* We don't have any data to inflate; wait until next time */
/* We do not have any data to inflate; wait until next time */
return CURLE_OK;
}
/* We've parsed the header, now uncompress the data */
/* We have parsed the header, now uncompress the data */
return inflate_stream(data, writer, type, ZLIB_GZIP_INFLATING);
#endif
}
@ -966,7 +966,7 @@ static const struct Curl_cwtype *find_unencode_writer(const char *name,
return NULL;
}
/* Set-up the unencoding stack from the Content-Encoding header value.
/* Setup the unencoding stack from the Content-Encoding header value.
* See RFC 7231 section 3.1.2.2. */
CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
const char *enclist, int is_transfer)

View File

@ -61,7 +61,7 @@ struct Cookies *Curl_cookie_getlist(struct CookieInfo *cookie,
boolean informs the cookie if a secure connection is achieved or
not.
It shall only return cookies that haven't expired.
It shall only return cookies that have not expired.
Example set of cookies:
@ -150,7 +150,7 @@ static bool cookie_tailmatch(const char *cookie_domain,
}
/*
* matching cookie path and url path
* matching cookie path and URL path
* RFC6265 5.1.4 Paths and Path-Match
*/
static bool pathmatch(const char *cookie_path, const char *request_uri)
@ -386,11 +386,11 @@ static void remove_expired(struct CookieInfo *cookies)
/*
* If the earliest expiration timestamp in the jar is in the future we can
* skip scanning the whole jar and instead exit early as there won't be any
* cookies to evict. If we need to evict however, reset the next_expiration
* counter in order to track the next one. In case the recorded first
* expiration is the max offset, then perform the safe fallback of checking
* all cookies.
* skip scanning the whole jar and instead exit early as there will not be
* any cookies to evict. If we need to evict however, reset the
* next_expiration counter in order to track the next one. In case the
* recorded first expiration is the max offset, then perform the safe
* fallback of checking all cookies.
*/
if(now < cookies->next_expiration &&
cookies->next_expiration != CURL_OFF_T_MAX)
@ -415,7 +415,7 @@ static void remove_expired(struct CookieInfo *cookies)
}
else {
/*
* If this cookie has an expiration timestamp earlier than what we've
* If this cookie has an expiration timestamp earlier than what we have
* seen so far then record it for the next round of expirations.
*/
if(co->expires && co->expires < cookies->next_expiration)
@ -510,7 +510,7 @@ Curl_cookie_add(struct Curl_easy *data,
/* First, alloc and init a new struct for it */
co = calloc(1, sizeof(struct Cookie));
if(!co)
return NULL; /* bail out if we're this low on memory */
return NULL; /* bail out if we are this low on memory */
if(httpheader) {
/* This line was read off an HTTP-header */
@ -648,7 +648,7 @@ Curl_cookie_add(struct Curl_easy *data,
else if((nlen == 8) && strncasecompare("httponly", namep, 8))
co->httponly = TRUE;
else if(sep)
/* there was a '=' so we're not done parsing this field */
/* there was a '=' so we are not done parsing this field */
done = FALSE;
}
if(done)
@ -682,7 +682,7 @@ Curl_cookie_add(struct Curl_easy *data,
#ifndef USE_LIBPSL
/*
* Without PSL we don't know when the incoming cookie is set on a
* Without PSL we do not know when the incoming cookie is set on a
* TLD or otherwise "protected" suffix. To reduce risk, we require a
* dot OR the exact hostname being "localhost".
*/
@ -781,7 +781,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
* Else, this is the second (or more) name we don't know about!
* Else, this is the second (or more) name we do not know about!
*/
}
else {
@ -836,7 +836,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
* If we didn't get a cookie name, or a bad one, the this is an illegal
* If we did not get a cookie name, or a bad one, the this is an illegal
* line so bail out.
*/
if(badcookie || !co->name) {
@ -869,7 +869,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
if(lineptr[0]=='#') {
/* don't even try the comments */
/* do not even try the comments */
free(co);
return NULL;
}
@ -909,7 +909,7 @@ Curl_cookie_add(struct Curl_easy *data,
case 2:
/* The file format allows the path field to remain not filled in */
if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
/* only if the path doesn't look like a boolean option! */
/* only if the path does not look like a boolean option! */
co->path = strdup(ptr);
if(!co->path)
badcookie = TRUE;
@ -921,7 +921,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
break;
}
/* this doesn't look like a path, make one up! */
/* this does not look like a path, make one up! */
co->path = strdup("/");
if(!co->path)
badcookie = TRUE;
@ -1004,7 +1004,7 @@ Curl_cookie_add(struct Curl_easy *data,
if(!c->running && /* read from a file */
c->newsession && /* clean session cookies */
!co->expires) { /* this is a session cookie since it doesn't expire! */
!co->expires) { /* this is a session cookie since it does not expire! */
freecookie(co);
return NULL;
}
@ -1025,7 +1025,7 @@ Curl_cookie_add(struct Curl_easy *data,
#ifdef USE_LIBPSL
/*
* Check if the domain is a Public Suffix and if yes, ignore the cookie. We
* must also check that the data handle isn't NULL since the psl code will
* must also check that the data handle is not NULL since the psl code will
* dereference it.
*/
if(data && (domain && co->domain && !Curl_host_is_ipnum(co->domain))) {
@ -1125,10 +1125,10 @@ Curl_cookie_add(struct Curl_easy *data,
if(replace_old && !co->livecookie && clist->livecookie) {
/*
* Both cookies matched fine, except that the already present cookie is
* "live", which means it was set from a header, while the new one was
* read from a file and thus isn't "live". "live" cookies are preferred
* so the new cookie is freed.
* Both cookies matched fine, except that the already present cookie
* is "live", which means it was set from a header, while the new one
* was read from a file and thus is not "live". "live" cookies are
* preferred so the new cookie is freed.
*/
freecookie(co);
return NULL;
@ -1179,7 +1179,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
* Now that we've added a new cookie to the jar, update the expiration
* Now that we have added a new cookie to the jar, update the expiration
* tracker in case it is the next one to expire.
*/
if(co->expires && (co->expires < c->next_expiration))
@ -1212,12 +1212,12 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
FILE *handle = NULL;
if(!inc) {
/* we didn't get a struct, create one */
/* we did not get a struct, create one */
c = calloc(1, sizeof(struct CookieInfo));
if(!c)
return NULL; /* failed to get memory */
/*
* Initialize the next_expiration time to signal that we don't have enough
* Initialize the next_expiration time to signal that we do not have enough
* information yet.
*/
c->next_expiration = CURL_OFF_T_MAX;
@ -1272,7 +1272,7 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
}
data->state.cookie_engine = TRUE;
}
c->running = TRUE; /* now, we're running */
c->running = TRUE; /* now, we are running */
return c;
}
@ -1368,7 +1368,7 @@ fail:
* should send to the server if used now. The secure boolean informs the cookie
* if a secure connection is achieved or not.
*
* It shall only return cookies that haven't expired.
* It shall only return cookies that have not expired.
*/
struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
struct CookieInfo *c,
@ -1394,7 +1394,7 @@ struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
co = c->cookies[myhash];
while(co) {
/* if the cookie requires we're secure we must only continue if we are! */
/* if the cookie requires we are secure we must only continue if we are! */
if(co->secure?secure:TRUE) {
/* now check if the domain is correct */

View File

@ -75,7 +75,7 @@ struct CookieInfo {
/** Limits for INCOMING cookies **/
/* The longest we allow a line to be when reading a cookie from a HTTP header
/* The longest we allow a line to be when reading a cookie from an HTTP header
or from a cookie jar */
#define MAX_COOKIE_LINE 5000

View File

@ -367,7 +367,7 @@ struct namebuff {
/*
* Curl_ip2addr()
*
* This function takes an internet address, in binary form, as input parameter
* This function takes an Internet address, in binary form, as input parameter
* along with its address family and the string version of the address, and it
* returns a Curl_addrinfo chain filled in correctly with information for the
* given address/host
@ -519,7 +519,7 @@ struct Curl_addrinfo *Curl_unix2addr(const char *path, bool *longpath,
*
* This is strictly for memory tracing and are using the same style as the
* family otherwise present in memdebug.c. I put these ones here since they
* require a bunch of structs I didn't want to include in memdebug.c
* require a bunch of structs I did not want to include in memdebug.c
*/
void
@ -543,7 +543,7 @@ curl_dbg_freeaddrinfo(struct addrinfo *freethis,
*
* This is strictly for memory tracing and are using the same style as the
* family otherwise present in memdebug.c. I put these ones here since they
* require a bunch of structs I didn't want to include in memdebug.c
* require a bunch of structs I did not want to include in memdebug.c
*/
int

View File

@ -44,9 +44,9 @@
/*
* Curl_addrinfo is our internal struct definition that we use to allow
* consistent internal handling of this data. We use this even when the
* system provides an addrinfo structure definition. And we use this for
* all sorts of IPv4 and IPV6 builds.
* consistent internal handling of this data. We use this even when the system
* provides an addrinfo structure definition. We use this for all sorts of
* IPv4 and IPV6 builds.
*/
struct Curl_addrinfo {

View File

@ -728,7 +728,7 @@ ${SIZEOF_TIME_T_CODE}
/* if GSASL is in use */
#cmakedefine USE_GSASL 1
/* Define to 1 if you don't want the OpenSSL configuration to be loaded
/* Define to 1 if you do not want the OpenSSL configuration to be loaded
automatically */
#cmakedefine CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG 1

View File

@ -36,7 +36,7 @@
* Curl_des_set_odd_parity()
*
* This is used to apply odd parity to the given byte array. It is typically
* used by when a cryptography engine doesn't have its own version.
* used by when a cryptography engine does not have its own version.
*
* The function is a port of the Java based oddParity() function over at:
*

View File

@ -30,7 +30,7 @@
* Curl_read16_le()
*
* This function converts a 16-bit integer from the little endian format, as
* used in the incoming package to whatever endian format we're using
* used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:
@ -49,7 +49,7 @@ unsigned short Curl_read16_le(const unsigned char *buf)
* Curl_read32_le()
*
* This function converts a 32-bit integer from the little endian format, as
* used in the incoming package to whatever endian format we're using
* used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:
@ -68,7 +68,7 @@ unsigned int Curl_read32_le(const unsigned char *buf)
* Curl_read16_be()
*
* This function converts a 16-bit integer from the big endian format, as
* used in the incoming package to whatever endian format we're using
* used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:

View File

@ -49,9 +49,10 @@ char *curlx_convert_wchar_to_UTF8(const wchar_t *str_w);
* Allocated memory should be free'd with curlx_unicodefree().
*
* Note: Because these are curlx functions their memory usage is not tracked
* by the curl memory tracker memdebug. You'll notice that curlx function-like
* macros call free and strdup in parentheses, eg (strdup)(ptr), and that's to
* ensure that the curl memdebug override macros do not replace them.
* by the curl memory tracker memdebug. you will notice that curlx
* function-like macros call free and strdup in parentheses, eg (strdup)(ptr),
* and that is to ensure that the curl memdebug override macros do not replace
* them.
*/
#if defined(UNICODE) && defined(_WIN32)

View File

@ -110,7 +110,7 @@
#elif defined(USE_WIN32_CRYPTO)
# include <wincrypt.h>
#else
# error "Can't compile NTLM support without a crypto library with DES."
# error "cannot compile NTLM support without a crypto library with DES."
# define CURL_NTLM_NOT_SUPPORTED
#endif
@ -150,7 +150,7 @@ static void extend_key_56_to_64(const unsigned char *key_56, char *key)
#if defined(USE_OPENSSL_DES) || defined(USE_WOLFSSL)
/*
* Turns a 56 bit key into the 64 bit, odd parity key and sets the key. The
* Turns a 56-bit key into a 64-bit, odd parity key and sets the key. The
* key schedule ks is also set.
*/
static void setup_des_key(const unsigned char *key_56,

View File

@ -236,7 +236,7 @@ static CURLcode rtmp_connect(struct Curl_easy *data, bool *done)
r->m_sb.sb_socket = (int)conn->sock[FIRSTSOCKET];
/* We have to know if it's a write before we send the
/* We have to know if it is a write before we send the
* connect request packet
*/
if(data->state.upload)

View File

@ -50,7 +50,7 @@
#ifdef _WIN32
/*
* Don't include unneeded stuff in Windows headers to avoid compiler
* Do not include unneeded stuff in Windows headers to avoid compiler
* warnings and macro clashes.
* Make sure to define this macro before including any Windows headers.
*/
@ -308,7 +308,7 @@
/*
* Use getaddrinfo to resolve the IPv4 address literal. If the current network
* interface doesn't support IPv4, but supports IPv6, NAT64, and DNS64,
* interface does not support IPv4, but supports IPv6, NAT64, and DNS64,
* performing this task will result in a synthesized IPv6 address.
*/
#if defined(__APPLE__) && !defined(USE_ARES)
@ -470,7 +470,7 @@
#endif
/*
* Default sizeof(off_t) in case it hasn't been defined in config file.
* Default sizeof(off_t) in case it has not been defined in config file.
*/
#ifndef SIZEOF_OFF_T
@ -537,7 +537,7 @@
#endif
#ifndef SIZE_T_MAX
/* some limits.h headers have this defined, some don't */
/* some limits.h headers have this defined, some do not */
#if defined(SIZEOF_SIZE_T) && (SIZEOF_SIZE_T > 4)
#define SIZE_T_MAX 18446744073709551615U
#else
@ -546,7 +546,7 @@
#endif
#ifndef SSIZE_T_MAX
/* some limits.h headers have this defined, some don't */
/* some limits.h headers have this defined, some do not */
#if defined(SIZEOF_SIZE_T) && (SIZEOF_SIZE_T > 4)
#define SSIZE_T_MAX 9223372036854775807
#else
@ -555,7 +555,7 @@
#endif
/*
* Arg 2 type for gethostname in case it hasn't been defined in config file.
* Arg 2 type for gethostname in case it has not been defined in config file.
*/
#ifndef GETHOSTNAME_TYPE_ARG2
@ -770,7 +770,7 @@
#endif
/*
* shutdown() flags for systems that don't define them
* shutdown() flags for systems that do not define them
*/
#ifndef SHUT_RD
@ -818,7 +818,7 @@ endings either CRLF or LF so 't' is appropriate.
#define FOPEN_APPENDTEXT "a"
#endif
/* for systems that don't detect this in configure */
/* for systems that do not detect this in configure */
#ifndef CURL_SA_FAMILY_T
# if defined(HAVE_SA_FAMILY_T)
# define CURL_SA_FAMILY_T sa_family_t

View File

@ -106,7 +106,7 @@
#endif
/*
* Definition of timeval struct for platforms that don't have it.
* Definition of timeval struct for platforms that do not have it.
*/
#ifndef HAVE_STRUCT_TIMEVAL
@ -130,7 +130,7 @@ struct timeval {
#if defined(__minix)
/* Minix doesn't support recv on TCP sockets */
/* Minix does not support recv on TCP sockets */
#define sread(x,y,z) (ssize_t)read((RECV_TYPE_ARG1)(x), \
(RECV_TYPE_ARG2)(y), \
(RECV_TYPE_ARG3)(z))
@ -143,7 +143,7 @@ struct timeval {
*
* HAVE_RECV is defined if you have a function named recv()
* which is used to read incoming data from sockets. If your
* function has another name then don't define HAVE_RECV.
* function has another name then do not define HAVE_RECV.
*
* If HAVE_RECV is defined then RECV_TYPE_ARG1, RECV_TYPE_ARG2,
* RECV_TYPE_ARG3, RECV_TYPE_ARG4 and RECV_TYPE_RETV must also
@ -151,7 +151,7 @@ struct timeval {
*
* HAVE_SEND is defined if you have a function named send()
* which is used to write outgoing data on a connected socket.
* If yours has another name then don't define HAVE_SEND.
* If yours has another name then do not define HAVE_SEND.
*
* If HAVE_SEND is defined then SEND_TYPE_ARG1, SEND_QUAL_ARG2,
* SEND_TYPE_ARG2, SEND_TYPE_ARG3, SEND_TYPE_ARG4 and
@ -170,7 +170,7 @@ struct timeval {
#if defined(__minix)
/* Minix doesn't support send on TCP sockets */
/* Minix does not support send on TCP sockets */
#define swrite(x,y,z) (ssize_t)write((SEND_TYPE_ARG1)(x), \
(SEND_TYPE_ARG2)(y), \
(SEND_TYPE_ARG3)(z))
@ -226,7 +226,7 @@ struct timeval {
/*
* 'bool' exists on platforms with <stdbool.h>, i.e. C99 platforms.
* On non-C99 platforms there's no bool, so define an enum for that.
* On non-C99 platforms there is no bool, so define an enum for that.
* On C99 platforms 'false' and 'true' also exist. Enum uses a
* global namespace though, so use bool_false and bool_true.
*/

View File

@ -270,9 +270,9 @@ Curl_sha512_256_finish(unsigned char *digest,
* ** written by Evgeny Grin (Karlson2k) for GNU libmicrohttpd. ** *
* ** The author ported the code to libcurl. The ported code is provided ** *
* ** under curl license. ** *
* ** This is a minimal version with minimal optimisations. Performance ** *
* ** This is a minimal version with minimal optimizations. Performance ** *
* ** can be significantly improved. Big-endian store and load macros ** *
* ** are obvious targets for optimisation. ** */
* ** are obvious targets for optimization. ** */
#ifdef __GNUC__
# if defined(__has_attribute) && defined(__STDC_VERSION__)
@ -328,7 +328,7 @@ MHDx_rotr64(curl_uint64_t value, unsigned int bits)
bits %= 64;
if(0 == bits)
return value;
/* Defined in a form which modern compiler could optimise. */
/* Defined in a form which modern compiler could optimize. */
return (value >> bits) | (value << (64 - bits));
}
@ -474,7 +474,7 @@ MHDx_sha512_256_transform(curl_uint64_t H[SHA512_256_HASH_SIZE_WORDS],
See FIPS PUB 180-4 section 5.2.2, 6.7, 6.4. */
curl_uint64_t W[16];
/* 'Ch' and 'Maj' macro functions are defined with widely-used optimisation.
/* 'Ch' and 'Maj' macro functions are defined with widely-used optimization.
See FIPS PUB 180-4 formulae 4.8, 4.9. */
#define Sha512_Ch(x,y,z) ( (z) ^ ((x) & ((y) ^ (z))) )
#define Sha512_Maj(x,y,z) ( ((x) & (y)) ^ ((z) & ((x) ^ (y))) )

View File

@ -228,8 +228,8 @@ static CURLcode cw_out_ptr_flush(struct cw_out_ctx *ctx,
if(CURL_WRITEFUNC_PAUSE == nwritten) {
if(data->conn && data->conn->handler->flags & PROTOPT_NONETWORK) {
/* Protocols that work without network cannot be paused. This is
actually only FILE:// just now, and it can't pause since the
transfer isn't done using the "normal" procedure. */
actually only FILE:// just now, and it cannot pause since the
transfer is not done using the "normal" procedure. */
failf(data, "Write callback asked for PAUSE when not supported");
return CURLE_WRITE_ERROR;
}

View File

@ -455,9 +455,9 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data,
* TODO: Figure out the conditions under which we want to make
* a request for an HTTPS RR when we are not doing ECH. For now,
* making this request breaks a bunch of DoH tests, e.g. test2100,
* where the additional request doesn't match the pre-cooked data
* files, so there's a bit of work attached to making the request
* in a non-ECH use-case. For the present, we'll only make the
* where the additional request does not match the pre-cooked data
* files, so there is a bit of work attached to making the request
* in a non-ECH use-case. For the present, we will only make the
* request when ECH is enabled in the build and is being used for
* the curl operation.
*/
@ -531,7 +531,7 @@ static unsigned int get32bit(const unsigned char *doh, unsigned int index)
/* avoid undefined behavior by casting to unsigned before shifting
24 bits, possibly into the sign bit. codegen is same, but
ub sanitizer won't be upset */
ub sanitizer will not be upset */
return ((unsigned)doh[0] << 24) | ((unsigned)doh[1] << 16) |
((unsigned)doh[2] << 8) | doh[3];
}
@ -923,7 +923,7 @@ static CURLcode doh2ai(const struct dohentry *de, const char *hostname,
CURL_SA_FAMILY_T addrtype;
if(de->addr[i].type == DNS_TYPE_AAAA) {
#ifndef USE_IPV6
/* we can't handle IPv6 addresses */
/* we cannot handle IPv6 addresses */
continue;
#else
ss_size = sizeof(struct sockaddr_in6);
@ -1046,7 +1046,7 @@ UNITTEST void de_cleanup(struct dohentry *d)
*
* The input buffer pointer will be modified so it points to
* just after the end of the DNS name encoding on output. (And
* that's why it's an "unsigned char **" :-)
* that is why it is an "unsigned char **" :-)
*/
static CURLcode local_decode_rdata_name(unsigned char **buf, size_t *remaining,
char **dnsname)
@ -1105,7 +1105,7 @@ static CURLcode local_decode_rdata_alpn(unsigned char *rrval, size_t len,
* output is comma-sep list of the strings
* implementations may or may not handle quoting of comma within
* string values, so we might see a comma within the wire format
* version of a string, in which case we'll precede that by a
* version of a string, in which case we will precede that by a
* backslash - same goes for a backslash character, and of course
* we need to use two backslashes in strings when we mean one;-)
*/
@ -1154,7 +1154,7 @@ err:
#ifdef DEBUGBUILD
static CURLcode test_alpn_escapes(void)
{
/* we'll use an example from draft-ietf-dnsop-svcb, figure 10 */
/* we will use an example from draft-ietf-dnsop-svcb, figure 10 */
static unsigned char example[] = {
0x08, /* length 8 */
0x66, 0x5c, 0x6f, 0x6f, 0x2c, 0x62, 0x61, 0x72, /* value "f\\oo,bar" */
@ -1185,7 +1185,7 @@ static CURLcode Curl_doh_decode_httpsrr(unsigned char *rrval, size_t len,
char *dnsname = NULL;
#ifdef DEBUGBUILD
/* a few tests of escaping, shouldn't be here but ok for now */
/* a few tests of escaping, should not be here but ok for now */
if(test_alpn_escapes() != CURLE_OK)
return CURLE_OUT_OF_MEMORY;
#endif
@ -1349,7 +1349,7 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data,
if(Curl_trc_ft_is_verbose(data, &Curl_doh_trc)) {
infof(data, "[DoH] Host name: %s", dohp->host);
infof(data, "[DoH] hostname: %s", dohp->host);
showdoh(data, &de);
}

View File

@ -51,7 +51,7 @@ void Curl_dyn_init(struct dynbuf *s, size_t toobig)
}
/*
* free the buffer and re-init the necessary fields. It doesn't touch the
* free the buffer and re-init the necessary fields. It does not touch the
* 'init' field and thus this buffer can be reused to add data to again.
*/
void Curl_dyn_free(struct dynbuf *s)
@ -71,7 +71,7 @@ static CURLcode dyn_nappend(struct dynbuf *s,
size_t a = s->allc;
size_t fit = len + indx + 1; /* new string + old string + zero byte */
/* try to detect if there's rubbish in the struct */
/* try to detect if there is rubbish in the struct */
DEBUGASSERT(s->init == DYNINIT);
DEBUGASSERT(s->toobig);
DEBUGASSERT(indx < s->toobig);

View File

@ -155,14 +155,14 @@ CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
const char *name, const char *value);
/**
* Add a single header from a HTTP/1.1 formatted line at the end. Line
* Add a single header from an HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored.
*/
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line);
/**
* Add a single header from a HTTP/1.1 formatted line at the end. Line
* Add a single header from an HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored.
*/

View File

@ -242,7 +242,7 @@ CURLcode curl_global_init_mem(long flags, curl_malloc_callback m,
global_init_lock();
if(initialized) {
/* Already initialized, don't do it again, but bump the variable anyway to
/* Already initialized, do not do it again, but bump the variable anyway to
work like curl_global_init() and require the same amount of cleanup
calls. */
initialized++;
@ -268,7 +268,8 @@ CURLcode curl_global_init_mem(long flags, curl_malloc_callback m,
/**
* curl_global_cleanup() globally cleanups curl, uses the value of
* "easy_init_flags" to determine what needs to be cleaned up and what doesn't.
* "easy_init_flags" to determine what needs to be cleaned up and what does
* not.
*/
void curl_global_cleanup(void)
{
@ -627,7 +628,7 @@ static CURLcode wait_or_timeout(struct Curl_multi *multi, struct events *ev)
if(mcode)
return CURLE_URL_MALFORMAT;
/* we don't really care about the "msgs_in_queue" value returned in the
/* we do not really care about the "msgs_in_queue" value returned in the
second argument */
msg = curl_multi_info_read(multi, &pollrc);
if(msg) {
@ -656,7 +657,7 @@ static CURLcode easy_events(struct Curl_multi *multi)
return wait_or_timeout(multi, &evs);
}
#else /* DEBUGBUILD */
/* when not built with debug, this function doesn't exist */
/* when not built with debug, this function does not exist */
#define easy_events(x) CURLE_NOT_BUILT_IN
#endif
@ -706,7 +707,7 @@ static CURLcode easy_transfer(struct Curl_multi *multi)
* easy handle, destroys the multi handle and returns the easy handle's return
* code.
*
* REALITY: it can't just create and destroy the multi handle that easily. It
* REALITY: it cannot just create and destroy the multi handle that easily. It
* needs to keep it around since if this easy handle is used again by this
* function, the same multi handle must be reused so that the same pools and
* caches can be used.
@ -768,7 +769,7 @@ static CURLcode easy_perform(struct Curl_easy *data, bool events)
/* run the transfer */
result = events ? easy_events(multi) : easy_transfer(multi);
/* ignoring the return code isn't nice, but atm we can't really handle
/* ignoring the return code is not nice, but atm we cannot really handle
a failure here, room for future improvement! */
(void)curl_multi_remove_handle(multi, data);
@ -1090,7 +1091,7 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action)
bool keep_changed, unpause_read, not_all_paused;
if(!GOOD_EASY_HANDLE(data) || !data->conn)
/* crazy input, don't continue */
/* crazy input, do not continue */
return CURLE_BAD_FUNCTION_ARGUMENT;
if(Curl_is_in_callback(data))

View File

@ -42,7 +42,7 @@ static struct curl_easyoption *lookup(const char *name, CURLoption id)
}
else {
if((o->id == id) && !(o->flags & CURLOT_FLAG_ALIAS))
/* don't match alias options */
/* do not match alias options */
return o;
}
o++;

View File

@ -177,11 +177,11 @@ static CURLcode file_connect(struct Curl_easy *data, bool *done)
return result;
#ifdef DOS_FILESYSTEM
/* If the first character is a slash, and there's
/* If the first character is a slash, and there is
something that looks like a drive at the beginning of
the path, skip the slash. If we remove the initial
slash in all cases, paths without drive letters end up
relative to the current directory which isn't how
relative to the current directory which is not how
browsers work.
Some browsers accept | instead of : as the drive letter
@ -308,7 +308,7 @@ static CURLcode file_upload(struct Curl_easy *data)
bool eos = FALSE;
/*
* Since FILE: doesn't do the full init, we need to provide some extra
* Since FILE: does not do the full init, we need to provide some extra
* assignments here.
*/
@ -331,7 +331,7 @@ static CURLcode file_upload(struct Curl_easy *data)
fd = open(file->path, mode, data->set.new_file_perms);
if(fd < 0) {
failf(data, "Can't open %s for writing", file->path);
failf(data, "cannot open %s for writing", file->path);
return CURLE_WRITE_ERROR;
}
@ -343,7 +343,7 @@ static CURLcode file_upload(struct Curl_easy *data)
if(data->state.resume_from < 0) {
if(fstat(fd, &file_stat)) {
close(fd);
failf(data, "Can't get the size of %s", file->path);
failf(data, "cannot get the size of %s", file->path);
return CURLE_WRITE_ERROR;
}
data->state.resume_from = (curl_off_t)file_stat.st_size;
@ -413,7 +413,7 @@ out:
* file_do() is the protocol-specific function for the do-phase, separated
* from the connect-phase above. Other protocols merely setup the transfer in
* the do-phase, to have it done in the main transfer loop but since some
* platforms we support don't allow select()ing etc on file handles (as
* platforms we support do not allow select()ing etc on file handles (as
* opposed to sockets) we instead perform the whole do-operation in this
* function.
*/
@ -518,7 +518,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
* of the stream if the filesize could be determined */
if(data->state.resume_from < 0) {
if(!fstated) {
failf(data, "Can't get the size of file.");
failf(data, "cannot get the size of file.");
return CURLE_READ_ERROR;
}
data->state.resume_from += (curl_off_t)statbuf.st_size;
@ -526,7 +526,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
if(data->state.resume_from > 0) {
/* We check explicitly if we have a start offset, because
* expected_size may be -1 if we don't know how large the file is,
* expected_size may be -1 if we do not know how large the file is,
* in which case we should not adjust it. */
if(data->state.resume_from <= expected_size)
expected_size -= data->state.resume_from;
@ -570,7 +570,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
if(!S_ISDIR(statbuf.st_mode)) {
while(!result) {
ssize_t nread;
/* Don't fill a whole buffer if we want less than all data */
/* Do not fill a whole buffer if we want less than all data */
size_t bytestoread;
if(size_known) {

View File

@ -216,8 +216,8 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
struct curl_forms *forms = NULL;
char *array_value = NULL; /* value read from an array */
/* This is a state variable, that if TRUE means that we're parsing an
array that we got passed to us. If FALSE we're parsing the input
/* This is a state variable, that if TRUE means that we are parsing an
array that we got passed to us. If FALSE we are parsing the input
va_list arguments. */
bool array_state = FALSE;
@ -260,7 +260,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
switch(option) {
case CURLFORM_ARRAY:
if(array_state)
/* we don't support an array from within an array */
/* we do not support an array from within an array */
return_value = CURL_FORMADD_ILLEGAL_ARRAY;
else {
forms = va_arg(params, struct curl_forms *);
@ -429,7 +429,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
array_state?array_value:va_arg(params, char *);
if(userp) {
current_form->userp = userp;
current_form->value = userp; /* this isn't strictly true but we
current_form->value = userp; /* this is not strictly true but we
derive a value from this later on
and we need this non-NULL to be
accepted as a fine form part */
@ -599,7 +599,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
}
if(!(form->flags & HTTPPOST_PTRNAME) &&
(form == first_form) ) {
/* Note that there's small risk that form->name is NULL here if the
/* Note that there is small risk that form->name is NULL here if the
app passed in a bad combo, so we better check for that first. */
if(form->name) {
/* copy name (without strdup; possibly not null-terminated) */
@ -880,7 +880,7 @@ CURLcode Curl_getformdata(struct Curl_easy *data,
if(post->flags & (HTTPPOST_FILENAME | HTTPPOST_READFILE)) {
if(!strcmp(file->contents, "-")) {
/* There are a few cases where the code below won't work; in
/* There are a few cases where the code below will not work; in
particular, freopen(stdin) by the caller is not guaranteed
to result as expected. This feature has been kept for backward
compatibility: use of "-" pseudo filename should be avoided. */

117
lib/ftp.c
View File

@ -648,7 +648,7 @@ static CURLcode InitiateTransfer(struct Curl_easy *data)
return result;
if(conn->proto.ftpc.state_saved == FTP_STOR) {
/* When we know we're uploading a specified file, we can get the file
/* When we know we are uploading a specified file, we can get the file
size prior to the actual upload. */
Curl_pgrsSetUploadSize(data, data->state.infilesize);
@ -673,7 +673,7 @@ static CURLcode InitiateTransfer(struct Curl_easy *data)
*
* AllowServerConnect()
*
* When we've issue the PORT command, we have told the server to connect to
* When we have issue the PORT command, we have told the server to connect to
* us. This function checks whether data connection is established if so it is
* accepted.
*
@ -805,7 +805,7 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
{
/*
* We cannot read just one byte per read() and then go back to select() as
* the OpenSSL read() doesn't grok that properly.
* the OpenSSL read() does not grok that properly.
*
* Alas, read as much as possible, split up into lines, use the ending
* line in a response or continue reading. */
@ -848,16 +848,16 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
*
* A caution here is that the ftp_readresp() function has a cache that may
* contain pieces of a response from the previous invoke and we need to
* make sure we don't just wait for input while there is unhandled data in
* make sure we do not just wait for input while there is unhandled data in
* that cache. But also, if the cache is there, we call ftp_readresp() and
* the cache wasn't good enough to continue we must not just busy-loop
* the cache was not good enough to continue we must not just busy-loop
* around this function.
*
*/
if(Curl_dyn_len(&pp->recvbuf) && (cache_skip < 2)) {
/*
* There's a cache left since before. We then skipping the wait for
* There is a cache left since before. We then skipping the wait for
* socket action, unless this is the same cache like the previous round
* as then the cache was deemed not enough to act on and we then need to
* wait for more data anyway.
@ -894,7 +894,7 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
*nreadp += nread;
} /* while there's buffer left and loop is requested */
} /* while there is buffer left and loop is requested */
pp->pending_resp = FALSE;
@ -947,7 +947,7 @@ static int ftp_domore_getsock(struct Curl_easy *data,
CURL_TRC_FTP(data, "[%s] ftp_domore_getsock()", FTP_DSTATE(data));
if(FTP_STOP == ftpc->state) {
/* if stopped and still in this state, then we're also waiting for a
/* if stopped and still in this state, then we are also waiting for a
connect on the secondary connection */
DEBUGASSERT(conn->sock[SECONDARYSOCKET] != CURL_SOCKET_BAD ||
(conn->cfilter[SECONDARYSOCKET] &&
@ -1244,7 +1244,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
goto out;
}
port = port_min;
possibly_non_local = FALSE; /* don't try this again */
possibly_non_local = FALSE; /* do not try this again */
continue;
}
if(error != EADDRINUSE && error != EACCES) {
@ -1443,7 +1443,7 @@ static CURLcode ftp_state_prepare_transfer(struct Curl_easy *data)
struct connectdata *conn = data->conn;
if(ftp->transfer != PPTRANSFER_BODY) {
/* doesn't transfer any data */
/* does not transfer any data */
/* still possibly do PRE QUOTE jobs */
ftp_state(data, FTP_RETR_PREQUOTE);
@ -1589,13 +1589,13 @@ static CURLcode ftp_state_list(struct Curl_easy *data)
static CURLcode ftp_state_retr_prequote(struct Curl_easy *data)
{
/* We've sent the TYPE, now we must send the list of prequote strings */
/* We have sent the TYPE, now we must send the list of prequote strings */
return ftp_state_quote(data, TRUE, FTP_RETR_PREQUOTE);
}
static CURLcode ftp_state_stor_prequote(struct Curl_easy *data)
{
/* We've sent the TYPE, now we must send the list of prequote strings */
/* We have sent the TYPE, now we must send the list of prequote strings */
return ftp_state_quote(data, TRUE, FTP_STOR_PREQUOTE);
}
@ -1607,7 +1607,7 @@ static CURLcode ftp_state_type(struct Curl_easy *data)
struct ftp_conn *ftpc = &conn->proto.ftpc;
/* If we have selected NOBODY and HEADER, it means that we only want file
information. Which in FTP can't be much more than the file size and
information. Which in FTP cannot be much more than the file size and
date. */
if(data->req.no_body && ftpc->file &&
ftp_need_type(conn, data->state.prefer_ascii)) {
@ -1667,13 +1667,13 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
if((data->state.resume_from && !sizechecked) ||
((data->state.resume_from > 0) && sizechecked)) {
/* we're about to continue the uploading of a file */
/* we are about to continue the uploading of a file */
/* 1. get already existing file's size. We use the SIZE command for this
which may not exist in the server! The SIZE command is not in
RFC959. */
/* 2. This used to set REST. But since we can do append, we
don't another ftp command. We just skip the source file
do not another ftp command. We just skip the source file
offset and then we APPEND the rest on the file instead */
/* 3. pass file-size number of bytes in the source file */
@ -1706,7 +1706,7 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
failf(data, "Could not seek stream");
return CURLE_FTP_COULDNT_USE_REST;
}
/* seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
/* seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@ -1737,15 +1737,15 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
/* no data to transfer */
Curl_xfer_setup_nop(data);
/* Set ->transfer so that we won't get any error in
* ftp_done() because we didn't transfer anything! */
/* Set ->transfer so that we will not get any error in
* ftp_done() because we did not transfer anything! */
ftp->transfer = PPTRANSFER_NONE;
ftp_state(data, FTP_STOP);
return CURLE_OK;
}
}
/* we've passed, proceed as normal */
/* we have passed, proceed as normal */
} /* resume_from */
result = Curl_pp_sendf(data, &ftpc->pp, append?"APPE %s":"STOR %s",
@ -1843,7 +1843,7 @@ static CURLcode ftp_state_quote(struct Curl_easy *data,
behavior.
In addition: asking for the size for 'TYPE A' transfers is not
constructive since servers don't report the converted size. So
constructive since servers do not report the converted size. So
skip it.
*/
result = Curl_pp_sendf(data, &ftpc->pp, "RETR %s", ftpc->file);
@ -1881,7 +1881,7 @@ static CURLcode ftp_epsv_disable(struct Curl_easy *data,
&& !(conn->bits.tunnel_proxy || conn->bits.socksproxy)
#endif
) {
/* We can't disable EPSV when doing IPv6, so this is instead a fail */
/* We cannot disable EPSV when doing IPv6, so this is instead a fail */
failf(data, "Failed EPSV attempt, exiting");
return CURLE_WEIRD_SERVER_REPLY;
}
@ -2045,7 +2045,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
if(conn->bits.proxy) {
/*
* This connection uses a proxy and we need to connect to the proxy again
* here. We don't want to rely on a former host lookup that might've
* here. We do not want to rely on a former host lookup that might've
* expired now, instead we remake the lookup here and now!
*/
const char * const host_name = conn->bits.socksproxy ?
@ -2060,7 +2060,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
connectport = (unsigned short)conn->primary.remote_port;
if(!addr) {
failf(data, "Can't resolve proxy host %s:%hu", host_name, connectport);
failf(data, "cannot resolve proxy host %s:%hu", host_name, connectport);
return CURLE_COULDNT_RESOLVE_PROXY;
}
}
@ -2087,7 +2087,8 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
connectport = ftpc->newport; /* we connect to the remote port */
if(!addr) {
failf(data, "Can't resolve new host %s:%hu", ftpc->newhost, connectport);
failf(data, "cannot resolve new host %s:%hu",
ftpc->newhost, connectport);
return CURLE_FTP_CANT_GET_HOST;
}
}
@ -2097,7 +2098,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
CURL_CF_SSL_ENABLE : CURL_CF_SSL_DISABLE);
if(result) {
Curl_resolv_unlock(data, addr); /* we're done using this address */
Curl_resolv_unlock(data, addr); /* we are done using this address */
if(ftpc->count1 == 0 && ftpcode == 229)
return ftp_epsv_disable(data, conn);
@ -2115,7 +2116,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
/* this just dumps information about this second connection */
ftp_pasv_verbose(data, addr->addr, ftpc->newhost, connectport);
Curl_resolv_unlock(data, addr); /* we're done using this address */
Curl_resolv_unlock(data, addr); /* we are done using this address */
Curl_safefree(conn->secondaryhostname);
conn->secondary_port = ftpc->newport;
@ -2203,7 +2204,7 @@ static CURLcode client_write_header(struct Curl_easy *data,
* call to Curl_client_write() so it does the right thing.
*
* Notice that we cannot enable this flag for FTP in general,
* as an FTP transfer might involve a HTTP proxy connection and
* as an FTP transfer might involve an HTTP proxy connection and
* headers from CONNECT should not automatically be part of the
* output. */
CURLcode result;
@ -2370,17 +2371,17 @@ static CURLcode ftp_state_retr(struct Curl_easy *data,
/* We always (attempt to) get the size of downloads, so it is done before
this even when not doing resumes. */
if(filesize == -1) {
infof(data, "ftp server doesn't support SIZE");
/* We couldn't get the size and therefore we can't know if there really
infof(data, "ftp server does not support SIZE");
/* We could not get the size and therefore we cannot know if there really
is a part of the file left to get, although the server will just
close the connection when we start the connection so it won't cause
close the connection when we start the connection so it will not cause
us any harm, just not make us exit as nicely. */
}
else {
/* We got a file size report, so we check that there actually is a
part of the file left to get, or else we go home. */
if(data->state.resume_from< 0) {
/* We're supposed to download the last abs(from) bytes */
/* We are supposed to download the last abs(from) bytes */
if(filesize < -data->state.resume_from) {
failf(data, "Offset (%" CURL_FORMAT_CURL_OFF_T
") was beyond file size (%" CURL_FORMAT_CURL_OFF_T ")",
@ -2409,8 +2410,8 @@ static CURLcode ftp_state_retr(struct Curl_easy *data,
Curl_xfer_setup_nop(data);
infof(data, "File already completely downloaded");
/* Set ->transfer so that we won't get any error in ftp_done()
* because we didn't transfer the any file */
/* Set ->transfer so that we will not get any error in ftp_done()
* because we did not transfer the any file */
ftp->transfer = PPTRANSFER_NONE;
ftp_state(data, FTP_STOP);
return CURLE_OK;
@ -2618,7 +2619,7 @@ static CURLcode ftp_state_get_resp(struct Curl_easy *data,
!data->set.ignorecl &&
(ftp->downloadsize < 1)) {
/*
* It seems directory listings either don't show the size or very
* It seems directory listings either do not show the size or very
* often uses size 0 anyway. ASCII transfers may very well turn out
* that the transferred amount of data is not the same as this line
* tells, why using this number in those cases only confuses us.
@ -2689,7 +2690,7 @@ static CURLcode ftp_state_get_resp(struct Curl_easy *data,
else {
if((instate == FTP_LIST) && (ftpcode == 450)) {
/* simply no matching files in the dir listing */
ftp->transfer = PPTRANSFER_NONE; /* don't download anything */
ftp->transfer = PPTRANSFER_NONE; /* do not download anything */
ftp_state(data, FTP_STOP); /* this phase is over */
}
else {
@ -2862,7 +2863,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
#endif
if(data->set.use_ssl && !conn->bits.ftp_use_control_ssl) {
/* We don't have a SSL/TLS control connection yet, but FTPS is
/* We do not have a SSL/TLS control connection yet, but FTPS is
requested. Try a FTPS connection now */
ftpc->count3 = 0;
@ -2879,7 +2880,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
default:
failf(data, "unsupported parameter to CURLOPT_FTPSSLAUTH: %d",
(int)data->set.ftpsslauth);
return CURLE_UNKNOWN_OPTION; /* we don't know what to do */
return CURLE_UNKNOWN_OPTION; /* we do not know what to do */
}
result = Curl_pp_sendf(data, &ftpc->pp, "AUTH %s",
ftpauth[ftpc->count1]);
@ -3074,7 +3075,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
data->state.most_recent_ftp_entrypath = ftpc->entrypath;
}
else {
/* couldn't get the path */
/* could not get the path */
Curl_dyn_free(&out);
infof(data, "Failed to figure out path");
}
@ -3173,7 +3174,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
else {
/* return failure */
failf(data, "Server denied you to change to the given directory");
ftpc->cwdfail = TRUE; /* don't remember this path as we failed
ftpc->cwdfail = TRUE; /* do not remember this path as we failed
to enter it */
result = CURLE_REMOTE_ACCESS_DENIED;
}
@ -3378,7 +3379,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
case CURLE_REMOTE_FILE_NOT_FOUND:
case CURLE_WRITE_ERROR:
/* the connection stays alive fine even though this happened */
case CURLE_OK: /* doesn't affect the control connection's status */
case CURLE_OK: /* does not affect the control connection's status */
if(!premature)
break;
@ -3444,7 +3445,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
/* free the dir tree and file parts */
freedirs(ftpc);
/* shut down the socket to inform the server we're done */
/* shut down the socket to inform the server we are done */
#ifdef _WIN32_WCE
shutdown(conn->sock[SECONDARYSOCKET], 2); /* SD_BOTH */
@ -3538,9 +3539,9 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
if((-1 != data->req.size) &&
(data->req.size != data->req.bytecount) &&
#ifdef CURL_DO_LINEEND_CONV
/* Most FTP servers don't adjust their file SIZE response for CRLFs, so
* we'll check to see if the discrepancy can be explained by the number
* of CRLFs we've changed to LFs.
/* Most FTP servers do not adjust their file SIZE response for CRLFs,
* so we will check to see if the discrepancy can be explained by the
* number of CRLFs we have changed to LFs.
*/
((data->req.size + data->state.crlf_conversions) !=
data->req.bytecount) &&
@ -3675,7 +3676,7 @@ static CURLcode ftp_nb_type(struct Curl_easy *data,
* ftp_pasv_verbose()
*
* This function only outputs some informationals about this second connection
* when we've issued a PASV command before and thus we have connected to a
* when we have issued a PASV command before and thus we have connected to a
* possibly new IP address.
*
*/
@ -3716,7 +3717,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
* complete */
struct FTP *ftp = NULL;
/* if the second connection isn't done yet, wait for it to have
/* if the second connection is not done yet, wait for it to have
* connected to the remote host. When using proxy tunneling, this
* means the tunnel needs to have been establish. However, we
* can not expect the remote host to talk to us in any way yet.
@ -3744,20 +3745,20 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
*completep = (int)complete;
/* if we got an error or if we don't wait for a data connection return
/* if we got an error or if we do not wait for a data connection return
immediately */
if(result || !ftpc->wait_data_conn)
return result;
/* if we reach the end of the FTP state machine here, *complete will be
TRUE but so is ftpc->wait_data_conn, which says we need to wait for the
data connection and therefore we're not actually complete */
data connection and therefore we are not actually complete */
*completep = 0;
}
if(ftp->transfer <= PPTRANSFER_INFO) {
/* a transfer is about to take place, or if not a file name was given
so we'll do a SIZE on it later and then we need the right TYPE first */
/* a transfer is about to take place, or if not a filename was given so we
will do a SIZE on it later and then we need the right TYPE first */
if(ftpc->wait_data_conn) {
bool serv_conned;
@ -3796,7 +3797,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
result = Curl_range(data);
if(result == CURLE_OK && data->req.maxdownload >= 0) {
/* Don't check for successful transfer */
/* Do not check for successful transfer */
ftpc->dont_check = TRUE;
}
@ -3960,7 +3961,7 @@ static CURLcode init_wc_data(struct Curl_easy *data)
if(data->set.ftp_filemethod == FTPFILE_NOCWD)
data->set.ftp_filemethod = FTPFILE_MULTICWD;
/* try to parse ftp url */
/* try to parse ftp URL */
result = ftp_parse_url_path(data);
if(result) {
goto fail;
@ -4078,7 +4079,7 @@ static CURLcode wc_statemach(struct Curl_easy *data)
if(result)
return result;
/* we don't need the Curl_fileinfo of first file anymore */
/* we do not need the Curl_fileinfo of first file anymore */
Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
if(wildcard->filelist.size == 0) { /* remains only one file to down. */
@ -4233,7 +4234,7 @@ static CURLcode ftp_disconnect(struct Curl_easy *data,
bad in any way, sending quit and waiting around here will make the
disconnect wait in vain and cause more problems than we need to.
ftp_quit() will check the state of ftp->ctl_valid. If it's ok it
ftp_quit() will check the state of ftp->ctl_valid. If it is ok it
will try to send the QUIT command, otherwise it will just return.
*/
if(dead_connection)
@ -4362,7 +4363,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data)
++compLen;
/* we skip empty path components, like "x//y" since the FTP command
CWD requires a parameter and a non-existent parameter a) doesn't
CWD requires a parameter and a non-existent parameter a) does not
work on many servers and b) has no effect on the others. */
if(compLen > 0) {
char *comp = Curl_memdup0(curPos, compLen);
@ -4439,7 +4440,7 @@ static CURLcode ftp_dophase_done(struct Curl_easy *data, bool connected)
/* no data to transfer */
Curl_xfer_setup_nop(data);
else if(!connected)
/* since we didn't connect now, we want do_more to get called */
/* since we did not connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
ftpc->ctl_valid = TRUE; /* seems good */
@ -4544,10 +4545,10 @@ static CURLcode ftp_setup_connection(struct Curl_easy *data,
}
data->req.p.ftp = ftp;
ftp->path = &data->state.up.path[1]; /* don't include the initial slash */
ftp->path = &data->state.up.path[1]; /* do not include the initial slash */
/* FTP URLs support an extension like ";type=<typecode>" that
* we'll try to get now! */
* we will try to get now! */
type = strstr(ftp->path, ";type=");
if(!type)

View File

@ -61,7 +61,7 @@ enum {
FTP_STOR_PREQUOTE,
FTP_POSTQUOTE,
FTP_CWD, /* change dir */
FTP_MKD, /* if the dir didn't exist */
FTP_MKD, /* if the dir did not exist */
FTP_MDTM, /* to figure out the datestamp */
FTP_TYPE, /* to set type when doing a head-like request */
FTP_LIST_TYPE, /* set type when about to do a dir list */

View File

@ -37,7 +37,7 @@ static char *GetEnv(const char *variable)
return NULL;
#elif defined(_WIN32)
/* This uses Windows API instead of C runtime getenv() to get the environment
variable since some changes aren't always visible to the latter. #4774 */
variable since some changes are not always visible to the latter. #4774 */
char *buf = NULL;
char *tmp;
DWORD bufsize;
@ -54,8 +54,8 @@ static char *GetEnv(const char *variable)
buf = tmp;
bufsize = rc;
/* It's possible for rc to be 0 if the variable was found but empty.
Since getenv doesn't make that distinction we ignore it as well. */
/* it is possible for rc to be 0 if the variable was found but empty.
Since getenv does not make that distinction we ignore it as well. */
rc = GetEnvironmentVariableA(variable, buf, bufsize);
if(!rc || rc == bufsize || rc > max) {
free(buf);

View File

@ -209,9 +209,9 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done)
if(!timeout_ms)
timeout_ms = TIMEDIFF_T_MAX;
/* Don't busyloop. The entire loop thing is a work-around as it causes a
/* Do not busyloop. The entire loop thing is a work-around as it causes a
BLOCKING behavior which is a NO-NO. This function should rather be
split up in a do and a doing piece where the pieces that aren't
split up in a do and a doing piece where the pieces that are not
possible to send now will be sent in the doing function repeatedly
until the entire request is sent.
*/

View File

@ -139,7 +139,7 @@ void *Curl_hash_add2(struct Curl_hash *h, void *key, size_t key_len, void *p,
/* Insert the data in the hash. If there already was a match in the hash, that
* data is replaced. This function also "lazily" allocates the table if
* needed, as it isn't done in the _init function (anymore).
* needed, as it is not done in the _init function (anymore).
*
* @unittest: 1305
* @unittest: 1602

View File

@ -114,7 +114,7 @@ CURLHcode curl_easy_header(CURL *easy,
break;
}
}
if(!e) /* this shouldn't happen */
if(!e) /* this should not happen */
return CURLHE_MISSING;
}
/* this is the name we want */
@ -302,7 +302,7 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header,
/* line folding, append value to the previous header's value */
return unfold_value(data, header, hlen);
else {
/* Can't unfold without a previous header. Instead of erroring, just
/* cannot unfold without a previous header. Instead of erroring, just
pass the leading blanks. */
while(hlen && ISBLANK(*header)) {
header++;

View File

@ -42,7 +42,7 @@
* Generic HMAC algorithm.
*
* This module computes HMAC digests based on any hash function. Parameters
* and computing procedures are set-up dynamically at HMAC computation context
* and computing procedures are setup dynamically at HMAC computation context
* initialization.
*/

View File

@ -84,8 +84,8 @@
* source file are these:
*
* CURLRES_IPV6 - this host has getaddrinfo() and family, and thus we use
* that. The host may not be able to resolve IPv6, but we don't really have to
* take that into account. Hosts that aren't IPv6-enabled have CURLRES_IPV4
* that. The host may not be able to resolve IPv6, but we do not really have to
* take that into account. Hosts that are not IPv6-enabled have CURLRES_IPV4
* defined.
*
* CURLRES_ARES - is defined if libcurl is built to use c-ares for
@ -238,7 +238,7 @@ void Curl_hostcache_prune(struct Curl_easy *data)
int timeout = data->set.dns_cache_timeout;
if(!data->dns.hostcache)
/* NULL hostcache means we can't do it */
/* NULL hostcache means we cannot do it */
return;
if(data->share)
@ -283,14 +283,14 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
size_t entry_len = create_hostcache_id(hostname, 0, port,
entry_id, sizeof(entry_id));
/* See if it's already in our dns cache */
/* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
/* No entry found in cache, check if we might have a wildcard entry */
if(!dns && data->state.wildcard_resolve) {
entry_len = create_hostcache_id("*", 1, port, entry_id, sizeof(entry_id));
/* See if it's already in our dns cache */
/* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
}
@ -329,7 +329,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
}
if(!found) {
infof(data, "Hostname in DNS cache doesn't have needed family, zapped");
infof(data, "Hostname in DNS cache does not have needed family, zapped");
dns = NULL; /* the memory deallocation is being handled by the hash */
Curl_hash_delete(data->dns.hostcache, entry_id, entry_len + 1);
}
@ -349,7 +349,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
* Returns the Curl_dns_entry entry pointer or NULL if not in the cache.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
* use, or we'll leak memory!
* use, or we will leak memory!
*/
struct Curl_dns_entry *
Curl_fetch_addr(struct Curl_easy *data,
@ -602,7 +602,7 @@ static struct Curl_addrinfo *get_localhost(int port, const char *name)
bool Curl_ipv6works(struct Curl_easy *data)
{
if(data) {
/* the nature of most system is that IPv6 status doesn't come and go
/* the nature of most system is that IPv6 status does not come and go
during a program's lifetime so we only probe the first time and then we
have the info kept for fast reuse */
DEBUGASSERT(data);
@ -618,7 +618,7 @@ bool Curl_ipv6works(struct Curl_easy *data)
/* probe to see if we have a working IPv6 stack */
curl_socket_t s = socket(PF_INET6, SOCK_DGRAM, 0);
if(s == CURL_SOCKET_BAD)
/* an IPv6 address was requested but we can't get/use one */
/* an IPv6 address was requested but we cannot get/use one */
ipv6_works = 0;
else {
ipv6_works = 1;
@ -662,11 +662,11 @@ static bool tailmatch(const char *full, const char *part)
/*
* Curl_resolv() is the main name resolve function within libcurl. It resolves
* a name and returns a pointer to the entry in the 'entry' argument (if one
* is provided). This function might return immediately if we're using asynch
* is provided). This function might return immediately if we are using asynch
* resolves. See the return codes.
*
* The cache entry we return will get its 'inuse' counter increased when this
* function is used. You MUST call Curl_resolv_unlock() later (when you're
* function is used. You MUST call Curl_resolv_unlock() later (when you are
* done using this struct) to decrease the counter again.
*
* Return codes:
@ -813,7 +813,7 @@ enum resolve_t Curl_resolv(struct Curl_easy *data,
if(respwait) {
/* the response to our resolve call will come asynchronously at
a later time, good or bad */
/* First, check that we haven't received the info by now */
/* First, check that we have not received the info by now */
result = Curl_resolv_check(data, &dns);
if(result) /* error detected */
return CURLRESOLV_ERROR;
@ -864,11 +864,11 @@ void alarmfunc(int sig)
/*
* Curl_resolv_timeout() is the same as Curl_resolv() but specifies a
* timeout. This function might return immediately if we're using asynch
* timeout. This function might return immediately if we are using asynch
* resolves. See the return codes.
*
* The cache entry we return will get its 'inuse' counter increased when this
* function is used. You MUST call Curl_resolv_unlock() later (when you're
* function is used. You MUST call Curl_resolv_unlock() later (when you are
* done using this struct) to decrease the counter again.
*
* If built with a synchronous resolver and use of signals is not
@ -934,7 +934,7 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
will generate a signal and we will siglongjmp() from that here.
This technique has problems (see alarmfunc).
This should be the last thing we do before calling Curl_resolv(),
as otherwise we'd have to worry about variables that get modified
as otherwise we would have to worry about variables that get modified
before we invoke Curl_resolv() (and thus use "volatile"). */
curl_simple_lock_lock(&curl_jmpenv_lock);
@ -955,7 +955,7 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
keep_copysig = TRUE; /* yes, we have a copy */
sigact.sa_handler = alarmfunc;
#ifdef SA_RESTART
/* HPUX doesn't have SA_RESTART but defaults to that behavior! */
/* HPUX does not have SA_RESTART but defaults to that behavior! */
sigact.sa_flags &= ~SA_RESTART;
#endif
/* now set the new struct */
@ -1022,7 +1022,7 @@ clean_up:
((alarm_set >= 0x80000000) && (prev_alarm < 0x80000000)) ) {
/* if the alarm time-left reached zero or turned "negative" (counted
with unsigned values), we should fire off a SIGALRM here, but we
won't, and zero would be to switch it off so we never set it to
will not, and zero would be to switch it off so we never set it to
less than 1! */
alarm(1);
rc = CURLRESOLV_TIMEDOUT;
@ -1150,7 +1150,7 @@ CURLcode Curl_loadhostpairs(struct Curl_easy *data)
if(data->share)
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
/* delete entry, ignore if it didn't exist */
/* delete entry, ignore if it did not exist */
Curl_hash_delete(data->dns.hostcache, entry_id, entry_len + 1);
if(data->share)
@ -1264,7 +1264,7 @@ err:
if(data->share)
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
/* See if it's already in our dns cache */
/* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
if(dns) {
@ -1362,7 +1362,7 @@ static void show_resolve_info(struct Curl_easy *data,
if(!result)
result = Curl_dyn_add(d, buf);
if(result) {
infof(data, "too many IP, can't show");
infof(data, "too many IP, cannot show");
goto fail;
}
}

View File

@ -80,7 +80,7 @@ struct Curl_https_rrinfo {
char *alpns; /* keytag = 1 */
bool no_def_alpn; /* keytag = 2 */
/*
* we don't support ports (keytag = 3) as we don't support
* we do not support ports (keytag = 3) as we do not support
* port-switching yet
*/
unsigned char *ipv4hints; /* keytag = 4 */
@ -97,7 +97,7 @@ struct Curl_dns_entry {
#ifdef USE_HTTPSRR
struct Curl_https_rrinfo *hinfo;
#endif
/* timestamp == 0 -- permanent CURLOPT_RESOLVE entry (doesn't time out) */
/* timestamp == 0 -- permanent CURLOPT_RESOLVE entry (does not time out) */
time_t timestamp;
/* use-counter, use Curl_resolv_unlock to release reference */
long inuse;
@ -114,7 +114,7 @@ bool Curl_host_is_ipnum(const char *hostname);
* and port.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
* use, or we'll leak memory!
* use, or we will leak memory!
*/
/* return codes */
enum resolve_t {
@ -200,7 +200,7 @@ void Curl_printable_address(const struct Curl_addrinfo *ip,
* Returns the Curl_dns_entry entry pointer or NULL if not in the cache.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
* use, or we'll leak memory!
* use, or we will leak memory!
*/
struct Curl_dns_entry *
Curl_fetch_addr(struct Curl_easy *data,

View File

@ -62,7 +62,7 @@ bool Curl_ipvalid(struct Curl_easy *data, struct connectdata *conn)
{
(void)data;
if(conn->ip_version == CURL_IPRESOLVE_V6)
/* An IPv6 address was requested and we can't get/use one */
/* An IPv6 address was requested and we cannot get/use one */
return FALSE;
return TRUE; /* OK, proceed */
@ -193,8 +193,8 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
* small. Previous versions are known to return ERANGE for the same
* problem.
*
* This wouldn't be such a big problem if older versions wouldn't
* sometimes return EAGAIN on a common failure case. Alas, we can't
* This would not be such a big problem if older versions would not
* sometimes return EAGAIN on a common failure case. Alas, we cannot
* assume that EAGAIN *or* ERANGE means ERANGE for any given version of
* glibc.
*
@ -210,9 +210,9 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
* gethostbyname_r() in glibc:
*
* In glibc 2.2.5 the interface is different (this has also been
* discovered in glibc 2.1.1-6 as shipped by Redhat 6). What I can't
* discovered in glibc 2.1.1-6 as shipped by Redhat 6). What I cannot
* explain, is that tests performed on glibc 2.2.4-34 and 2.2.4-32
* (shipped/upgraded by Redhat 7.2) don't show this behavior!
* (shipped/upgraded by Redhat 7.2) do not show this behavior!
*
* In this "buggy" version, the return code is -1 on error and 'errno'
* is set to the ERANGE or EAGAIN code. Note that 'errno' is not a
@ -223,7 +223,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
#elif defined(HAVE_GETHOSTBYNAME_R_3)
/* AIX, Digital Unix/Tru64, HPUX 10, more? */
/* For AIX 4.3 or later, we don't use gethostbyname_r() at all, because of
/* For AIX 4.3 or later, we do not use gethostbyname_r() at all, because of
* the plain fact that it does not return unique full buffers on each
* call, but instead several of the pointers in the hostent structs will
* point to the same actual data! This have the unfortunate down-side that
@ -237,7 +237,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
*
* Troels Walsted Hansen helped us work this out on March 3rd, 2003.
*
* [*] = much later we've found out that it isn't at all "completely
* [*] = much later we have found out that it is not at all "completely
* thread-safe", but at least the gethostbyname() function is.
*/
@ -253,7 +253,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
(struct hostent *)buf,
(struct hostent_data *)((char *)buf +
sizeof(struct hostent)));
h_errnop = SOCKERRNO; /* we don't deal with this, but set it anyway */
h_errnop = SOCKERRNO; /* we do not deal with this, but set it anyway */
}
else
res = -1; /* failure, too smallish buffer size */
@ -263,8 +263,8 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
h = buf; /* result expected in h */
/* This is the worst kind of the different gethostbyname_r() interfaces.
* Since we don't know how big buffer this particular lookup required,
* we can't realloc down the huge alloc without doing closer analysis of
* Since we do not know how big buffer this particular lookup required,
* we cannot realloc down the huge alloc without doing closer analysis of
* the returned data. Thus, we always use CURL_HOSTENT_SIZE for every
* name lookup. Fixing this would require an extra malloc() and then
* calling Curl_addrinfo_copy() that subsequent realloc()s down the new
@ -280,7 +280,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
#else /* (HAVE_GETADDRINFO && HAVE_GETADDRINFO_THREADSAFE) ||
HAVE_GETHOSTBYNAME_R */
/*
* Here is code for platforms that don't have a thread safe
* Here is code for platforms that do not have a thread safe
* getaddrinfo() nor gethostbyname_r() function or for which
* gethostbyname() is the preferred one.
*/

View File

@ -393,7 +393,7 @@ CURLcode Curl_hsts_save(struct Curl_easy *data, struct hsts *h,
free(tempstore);
skipsave:
if(data->set.hsts_write) {
/* if there's a write callback */
/* if there is a write callback */
struct curl_index i; /* count */
i.total = h->list.size;
i.index = 0;

View File

@ -410,9 +410,9 @@ static CURLcode http_perhapsrewind(struct Curl_easy *data,
curl_off_t upload_remain = (expectsend >= 0)? (expectsend - bytessent) : -1;
bool little_upload_remains = (upload_remain >= 0 && upload_remain < 2000);
bool needs_rewind = Curl_creader_needs_rewind(data);
/* By default, we'd like to abort the transfer when little or
* unknown amount remains. But this may be overridden by authentications
* further below! */
/* By default, we would like to abort the transfer when little or unknown
* amount remains. This may be overridden by authentications further
* below! */
bool abort_upload = (!data->req.upload_done && !little_upload_remains);
const char *ongoing_auth = NULL;
@ -475,7 +475,7 @@ static CURLcode http_perhapsrewind(struct Curl_easy *data,
/* We decided to abort the ongoing transfer */
streamclose(conn, "Mid-auth HTTP and much data left to send");
/* FIXME: questionable manipulation here, can we do this differently? */
data->req.size = 0; /* don't download any more than 0 bytes */
data->req.size = 0; /* do not download any more than 0 bytes */
}
return CURLE_OK;
}
@ -548,7 +548,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data)
/* no (known) authentication available,
authentication is not "done" yet and
no authentication seems to be required and
we didn't try HEAD or GET */
we did not try HEAD or GET */
if((data->state.httpreq != HTTPREQ_GET) &&
(data->state.httpreq != HTTPREQ_HEAD)) {
data->req.newurl = strdup(data->state.url); /* clone URL */
@ -738,13 +738,13 @@ Curl_http_output_auth(struct Curl_easy *data,
if(authhost->want && !authhost->picked)
/* The app has selected one or more methods, but none has been picked
so far by a server round-trip. Then we set the picked one to the
want one, and if this is one single bit it'll be used instantly. */
want one, and if this is one single bit it will be used instantly. */
authhost->picked = authhost->want;
if(authproxy->want && !authproxy->picked)
/* The app has selected one or more methods, but none has been picked so
far by a proxy round-trip. Then we set the picked one to the want one,
and if this is one single bit it'll be used instantly. */
and if this is one single bit it will be used instantly. */
authproxy->picked = authproxy->want;
#ifndef CURL_DISABLE_PROXY
@ -759,7 +759,7 @@ Curl_http_output_auth(struct Curl_easy *data,
#else
(void)proxytunnel;
#endif /* CURL_DISABLE_PROXY */
/* we have no proxy so let's pretend we're done authenticating
/* we have no proxy so let's pretend we are done authenticating
with it */
authproxy->done = TRUE;
@ -933,7 +933,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_DIGEST;
/* We call this function on input Digest headers even if Digest
* authentication isn't activated yet, as we need to store the
* authentication is not activated yet, as we need to store the
* incoming data from this header in case we are going to use
* Digest */
result = Curl_input_digest(data, proxy, auth);
@ -952,7 +952,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_BASIC;
if(authp->picked == CURLAUTH_BASIC) {
/* We asked for Basic authentication but got a 40X back
anyway, which basically means our name+password isn't
anyway, which basically means our name+password is not
valid. */
authp->avail = CURLAUTH_NONE;
infof(data, "Authentication problem. Ignoring this.");
@ -968,7 +968,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_BEARER;
if(authp->picked == CURLAUTH_BEARER) {
/* We asked for Bearer authentication but got a 40X back
anyway, which basically means our token isn't valid. */
anyway, which basically means our token is not valid. */
authp->avail = CURLAUTH_NONE;
infof(data, "Authentication problem. Ignoring this.");
data->state.authproblem = TRUE;
@ -988,7 +988,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
/* there may be multiple methods on one line, so keep reading */
while(*auth && *auth != ',') /* read up to the next comma */
auth++;
if(*auth == ',') /* if we're on a comma, skip it */
if(*auth == ',') /* if we are on a comma, skip it */
auth++;
while(*auth && ISSPACE(*auth))
auth++;
@ -1011,8 +1011,8 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
DEBUGASSERT(data->conn);
/*
** If we haven't been asked to fail on error,
** don't fail.
** If we have not been asked to fail on error,
** do not fail.
*/
if(!data->set.http_fail_on_error)
return FALSE;
@ -1032,7 +1032,7 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
return FALSE;
/*
** Any code >= 400 that's not 401 or 407 is always
** Any code >= 400 that is not 401 or 407 is always
** a terminal error
*/
if((httpcode != 401) && (httpcode != 407))
@ -1044,22 +1044,19 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
DEBUGASSERT((httpcode == 401) || (httpcode == 407));
/*
** Examine the current authentication state to see if this
** is an error. The idea is for this function to get
** called after processing all the headers in a response
** message. So, if we've been to asked to authenticate a
** particular stage, and we've done it, we're OK. But, if
** we're already completely authenticated, it's not OK to
** get another 401 or 407.
** Examine the current authentication state to see if this is an error. The
** idea is for this function to get called after processing all the headers
** in a response message. So, if we have been to asked to authenticate a
** particular stage, and we have done it, we are OK. If we are already
** completely authenticated, it is not OK to get another 401 or 407.
**
** It is possible for authentication to go stale such that
** the client needs to reauthenticate. Once that info is
** available, use it here.
** It is possible for authentication to go stale such that the client needs
** to reauthenticate. Once that info is available, use it here.
*/
/*
** Either we're not authenticating, or we're supposed to
** be authenticating something else. This is an error.
** Either we are not authenticating, or we are supposed to be authenticating
** something else. This is an error.
*/
if((httpcode == 401) && !data->state.aptr.user)
return TRUE;
@ -1098,7 +1095,7 @@ Curl_compareheader(const char *headerline, /* line to check */
DEBUGASSERT(content);
if(!strncasecompare(headerline, header, hlen))
return FALSE; /* doesn't start with header */
return FALSE; /* does not start with header */
/* pass the header */
start = &headerline[hlen];
@ -1110,11 +1107,11 @@ Curl_compareheader(const char *headerline, /* line to check */
/* find the end of the header line */
end = strchr(start, '\r'); /* lines end with CRLF */
if(!end) {
/* in case there's a non-standard compliant line here */
/* in case there is a non-standard compliant line here */
end = strchr(start, '\n');
if(!end)
/* hm, there's no line ending here, use the zero byte! */
/* hm, there is no line ending here, use the zero byte! */
end = strchr(start, '\0');
}
@ -1145,7 +1142,7 @@ CURLcode Curl_http_connect(struct Curl_easy *data, bool *done)
}
/* this returns the socket to wait for in the DO and DOING state for the multi
interface and then we're always _sending_ a request and thus we wait for
interface and then we are always _sending_ a request and thus we wait for
the single socket to become writable only */
int Curl_http_getsock_do(struct Curl_easy *data,
struct connectdata *conn,
@ -1167,7 +1164,7 @@ CURLcode Curl_http_done(struct Curl_easy *data,
{
struct connectdata *conn = data->conn;
/* Clear multipass flag. If authentication isn't done yet, then it will get
/* Clear multipass flag. If authentication is not done yet, then it will get
* a chance to be set back to true when we output the next auth header */
data->state.authhost.multipass = FALSE;
data->state.authproxy.multipass = FALSE;
@ -1185,8 +1182,8 @@ CURLcode Curl_http_done(struct Curl_easy *data,
(data->req.bytecount +
data->req.headerbytecount -
data->req.deductheadercount) <= 0) {
/* If this connection isn't simply closed to be retried, AND nothing was
read from the HTTP server (that counts), this can't be right so we
/* If this connection is not simply closed to be retried, AND nothing was
read from the HTTP server (that counts), this cannot be right so we
return an error here */
failf(data, "Empty reply from server");
/* Mark it as closed to avoid the "left intact" message */
@ -1345,7 +1342,7 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
DEBUGASSERT(name && value);
if(data->state.aptr.host &&
/* a Host: header was sent already, don't pass on any custom Host:
/* a Host: header was sent already, do not pass on any custom Host:
header as that will produce *two* in the same request! */
hd_name_eq(name, namelen, STRCONST("Host:")))
;
@ -1358,18 +1355,18 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
hd_name_eq(name, namelen, STRCONST("Content-Type:")))
;
else if(data->req.authneg &&
/* while doing auth neg, don't allow the custom length since
/* while doing auth neg, do not allow the custom length since
we will force length zero then */
hd_name_eq(name, namelen, STRCONST("Content-Length:")))
;
else if(data->state.aptr.te &&
/* when asking for Transfer-Encoding, don't pass on a custom
/* when asking for Transfer-Encoding, do not pass on a custom
Connection: */
hd_name_eq(name, namelen, STRCONST("Connection:")))
;
else if((conn->httpversion >= 20) &&
hd_name_eq(name, namelen, STRCONST("Transfer-Encoding:")))
/* HTTP/2 doesn't support chunked requests */
/* HTTP/2 does not support chunked requests */
;
else if((hd_name_eq(name, namelen, STRCONST("Authorization:")) ||
hd_name_eq(name, namelen, STRCONST("Cookie:"))) &&
@ -1491,8 +1488,9 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data,
char *compare = semicolonp ? semicolonp : headers->data;
if(data->state.aptr.host &&
/* a Host: header was sent already, don't pass on any custom Host:
header as that will produce *two* in the same request! */
/* a Host: header was sent already, do not pass on any custom
Host: header as that will produce *two* in the same
request! */
checkprefix("Host:", compare))
;
else if(data->state.httpreq == HTTPREQ_POST_FORM &&
@ -1504,18 +1502,18 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data,
checkprefix("Content-Type:", compare))
;
else if(data->req.authneg &&
/* while doing auth neg, don't allow the custom length since
/* while doing auth neg, do not allow the custom length since
we will force length zero then */
checkprefix("Content-Length:", compare))
;
else if(data->state.aptr.te &&
/* when asking for Transfer-Encoding, don't pass on a custom
/* when asking for Transfer-Encoding, do not pass on a custom
Connection: */
checkprefix("Connection:", compare))
;
else if((conn->httpversion >= 20) &&
checkprefix("Transfer-Encoding:", compare))
/* HTTP/2 doesn't support chunked requests */
/* HTTP/2 does not support chunked requests */
;
else if((checkprefix("Authorization:", compare) ||
checkprefix("Cookie:", compare)) &&
@ -1756,7 +1754,7 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
(conn->remote_port == PORT_HTTPS)) ||
((conn->given->protocol&(CURLPROTO_HTTP|CURLPROTO_WS)) &&
(conn->remote_port == PORT_HTTP)) )
/* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
/* if(HTTPS on port 443) OR (HTTP on port 80) then do not include
the port number in the host string */
aptr->host = aprintf("Host: %s%s%s\r\n", conn->bits.ipv6_ip?"[":"",
host, conn->bits.ipv6_ip?"]":"");
@ -1766,7 +1764,7 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
conn->remote_port);
if(!aptr->host)
/* without Host: we can't make a nice request */
/* without Host: we cannot make a nice request */
return CURLE_OUT_OF_MEMORY;
}
return CURLE_OK;
@ -1817,7 +1815,7 @@ CURLcode Curl_http_target(struct Curl_easy *data,
}
if(strcasecompare("http", data->state.up.scheme)) {
/* when getting HTTP, we don't want the userinfo the URL */
/* when getting HTTP, we do not want the userinfo the URL */
uc = curl_url_set(h, CURLUPART_USER, NULL, 0);
if(uc) {
curl_url_cleanup(h);
@ -1838,7 +1836,7 @@ CURLcode Curl_http_target(struct Curl_easy *data,
curl_url_cleanup(h);
/* target or url */
/* target or URL */
result = Curl_dyn_add(r, data->set.str[STRING_TARGET]?
data->set.str[STRING_TARGET]:url);
free(url);
@ -2041,7 +2039,7 @@ static CURLcode http_resume(struct Curl_easy *data, Curl_HttpReq httpreq)
if(data->state.resume_from < 0) {
/*
* This is meant to get the size of the present remote-file by itself.
* We don't support this now. Bail out!
* We do not support this now. Bail out!
*/
data->state.resume_from = 0;
}
@ -2126,7 +2124,7 @@ static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r,
if(data->req.upgr101 != UPGR101_INIT)
return CURLE_OK;
/* For really small puts we don't use Expect: headers at all, and for
/* For really small puts we do not use Expect: headers at all, and for
the somewhat bigger ones we allow the app to disable it. Just make
sure that the expect100header is always set to the preferred value
here. */
@ -2178,7 +2176,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
case HTTPREQ_POST_MIME:
#endif
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
we do not upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length).
We do not override a custom "Content-Length" header, but during
authentication negotiation that header is suppressed.
@ -2187,7 +2185,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
(data->req.authneg ||
!Curl_checkheaders(data, STRCONST("Content-Length")))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
although it is not very wise to actually set your own */
result = Curl_dyn_addf(r,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
"\r\n", req_clen);
@ -2323,7 +2321,7 @@ CURLcode Curl_http_range(struct Curl_easy *data,
{
if(data->state.use_range) {
/*
* A range is selected. We use different headers whether we're downloading
* A range is selected. We use different headers whether we are downloading
* or uploading and we always let customized headers override our internal
* ones if any such are specified.
*/
@ -2341,7 +2339,7 @@ CURLcode Curl_http_range(struct Curl_easy *data,
free(data->state.aptr.rangeline);
if(data->set.set_resume_from < 0) {
/* Upload resume was asked for, but we don't know the size of the
/* Upload resume was asked for, but we do not know the size of the
remote part so we tell the server (and act accordingly) that we
upload the whole file (again) */
data->state.aptr.rangeline =
@ -2385,12 +2383,12 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(data->req.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
and we're set to close anyway. */
and we are set to close anyway. */
k->keepon &= ~KEEP_RECV;
k->done = TRUE;
return CURLE_OK;
}
/* We have a new url to load, but since we want to be able to reuse this
/* We have a new URL to load, but since we want to be able to reuse this
connection properly, we read the full response in "ignore more" */
k->ignorebody = TRUE;
infof(data, "Ignoring the response-body");
@ -2401,7 +2399,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(k->size == data->state.resume_from) {
/* The resume point is at the end of file, consider this fine even if it
doesn't allow resume from here. */
does not allow resume from here. */
infof(data, "The entire document is already downloaded");
streamclose(conn, "already downloaded");
/* Abort download */
@ -2410,10 +2408,10 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
return CURLE_OK;
}
/* we wanted to resume a download, although the server doesn't seem to
* support this and we did this with a GET (if it wasn't a GET we did a
/* we wanted to resume a download, although the server does not seem to
* support this and we did this with a GET (if it was not a GET we did a
* POST or PUT resume) */
failf(data, "HTTP server doesn't seem to support "
failf(data, "HTTP server does not seem to support "
"byte ranges. Cannot resume.");
return CURLE_RANGE_ERROR;
}
@ -2425,7 +2423,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(!Curl_meets_timecondition(data, k->timeofdoc)) {
k->done = TRUE;
/* We're simulating an HTTP 304 from server so we return
/* We are simulating an HTTP 304 from server so we return
what should have been returned from the server */
data->info.httpcode = 304;
infof(data, "Simulate an HTTP 304 response");
@ -2447,7 +2445,7 @@ CURLcode Curl_transferencode(struct Curl_easy *data)
/* When we are to insert a TE: header in the request, we must also insert
TE in a Connection: header, so we need to merge the custom provided
Connection: header and prevent the original to get sent. Note that if
the user has inserted his/her own TE: header we don't do this magic
the user has inserted his/her own TE: header we do not do this magic
but then assume that the user will handle it all! */
char *cptr = Curl_checkheaders(data, STRCONST("Connection"));
#define TE_HEADER "TE: gzip\r\n"
@ -2693,7 +2691,7 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
if(!(conn->handler->flags&PROTOPT_SSL) &&
conn->httpversion < 20 &&
(data->state.httpwant == CURL_HTTP_VERSION_2)) {
/* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
/* append HTTP2 upgrade magic stuff to the HTTP request if it is not done
over SSL */
result = Curl_http2_request_upgrade(&req, data);
if(result) {
@ -3017,13 +3015,13 @@ CURLcode Curl_http_header(struct Curl_easy *data,
* connection will be kept alive for our pleasure.
* Default action for 1.0 is to close.
*/
connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
connkeep(conn, "Proxy-Connection keep-alive"); /* do not close */
infof(data, "HTTP/1.0 proxy connection set to keep alive");
}
else if((conn->httpversion == 11) && conn->bits.httpproxy &&
HD_IS_AND_SAYS(hd, hdlen, "Proxy-Connection:", "close")) {
/*
* We get an HTTP/1.1 response from a proxy and it says it'll
* We get an HTTP/1.1 response from a proxy and it says it will
* close down after this transfer.
*/
connclose(conn, "Proxy-Connection: asked to close after done");
@ -3148,7 +3146,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
if(result)
return result;
if(!k->chunk && data->set.http_transfer_encoding) {
/* if this isn't chunked, only close can signal the end of this
/* if this is not chunked, only close can signal the end of this
* transfer as Content-Length is said not to be trusted for
* transfer-encoding! */
connclose(conn, "HTTP/1.1 transfer-encoding without chunks");
@ -3220,7 +3218,7 @@ CURLcode Curl_http_statusline(struct Curl_easy *data,
/*
* This code executes as part of processing the header. As a
* result, it's not totally clear how to interpret the
* result, it is not totally clear how to interpret the
* response code yet as that depends on what other headers may
* be present. 401 and 407 may be errors, but may be OK
* depending on how authentication is working. Other codes
@ -3311,7 +3309,7 @@ static CURLcode verify_header(struct Curl_easy *data,
/* the first "header" is the status-line and it has no colon */
return CURLE_OK;
if(((hd[0] == ' ') || (hd[0] == '\t')) && k->headerline > 2)
/* line folding, can't happen on line 2 */
/* line folding, cannot happen on line 2 */
;
else {
ptr = memchr(hd, ':', hdlen);
@ -3581,7 +3579,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
case HTTPREQ_POST_MIME:
/* We got an error response. If this happened before the whole
* request body has been sent we stop sending and mark the
* connection for closure after we've read the entire response.
* connection for closure after we have read the entire response.
*/
if(!Curl_req_done_sending(data)) {
if((k->httpcode == 417) && Curl_http_exp100_is_selected(data)) {
@ -3644,7 +3642,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
k->download_done = TRUE;
/* If max download size is *zero* (nothing) we already have
nothing and can safely return ok now! But for HTTP/2, we'd
nothing and can safely return ok now! But for HTTP/2, we would
like to call http2_handle_stream_close to properly close a
stream. In order to do this, we keep reading until we
close the stream. */
@ -3709,14 +3707,14 @@ static CURLcode http_rw_hd(struct Curl_easy *data,
or else we consider this to be the body right away! */
bool fine_statusline = FALSE;
k->httpversion = 0; /* Don't know yet */
k->httpversion = 0; /* Do not know yet */
if(data->conn->handler->protocol & PROTO_FAMILY_HTTP) {
/*
* https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.2
*
* The response code is always a three-digit number in HTTP as the spec
* says. We allow any three-digit number here, but we cannot make
* guarantees on future behaviors since it isn't within the protocol.
* guarantees on future behaviors since it is not within the protocol.
*/
const char *p = hd;
@ -4459,7 +4457,7 @@ static CURLcode cr_exp100_read(struct Curl_easy *data,
*eos = FALSE;
return CURLE_OK;
}
/* we've waited long enough, continue anyway */
/* we have waited long enough, continue anyway */
http_exp100_continue(data, reader);
infof(data, "Done waiting for 100-continue");
FALLTHROUGH();

View File

@ -146,7 +146,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data);
selected to use no auth at all. Ie, we actively select no auth, as opposed
to not having one selected. The other CURLAUTH_* defines are present in the
public curl/curl.h header. */
#define CURLAUTH_PICKNONE (1<<30) /* don't use auth */
#define CURLAUTH_PICKNONE (1<<30) /* do not use auth */
/* MAX_INITIAL_POST_SIZE indicates the number of bytes that will make the POST
data get included in the initial data chunk sent to the server. If the
@ -235,7 +235,7 @@ struct httpreq {
};
/**
* Create a HTTP request struct.
* Create an HTTP request struct.
*/
CURLcode Curl_http_req_make(struct httpreq **preq,
const char *method, size_t m_len,
@ -285,7 +285,7 @@ struct http_resp {
};
/**
* Create a HTTP response struct.
* Create an HTTP response struct.
*/
CURLcode Curl_http_resp_make(struct http_resp **presp,
int status,

View File

@ -83,11 +83,10 @@
/* spare chunks we keep for a full window */
#define H2_STREAM_POOL_SPARES (H2_STREAM_WINDOW_SIZE / H2_CHUNK_SIZE)
/* We need to accommodate the max number of streams with their window
* sizes on the overall connection. Streams might become PAUSED which
* will block their received QUOTA in the connection window. And if we
* run out of space, the server is blocked from sending us any data.
* See #10988 for an issue with this. */
/* We need to accommodate the max number of streams with their window sizes on
* the overall connection. Streams might become PAUSED which will block their
* received QUOTA in the connection window. If we run out of space, the server
* is blocked from sending us any data. See #10988 for an issue with this. */
#define HTTP2_HUGE_WINDOW_SIZE (100 * H2_STREAM_WINDOW_SIZE)
#define H2_SETTINGS_IV_LEN 3
@ -610,8 +609,8 @@ static bool http2_connisalive(struct Curl_cfilter *cf, struct Curl_easy *data,
return FALSE;
if(*input_pending) {
/* This happens before we've sent off a request and the connection is
not in use by any other transfer, there shouldn't be any data here,
/* This happens before we have sent off a request and the connection is
not in use by any other transfer, there should not be any data here,
only "protocol frames" */
CURLcode result;
ssize_t nread = -1;
@ -1701,7 +1700,7 @@ static ssize_t http2_handle_stream_close(struct Curl_cfilter *cf,
if(stream->error == NGHTTP2_REFUSED_STREAM) {
CURL_TRC_CF(data, cf, "[%d] REFUSED_STREAM, try again on a new "
"connection", stream->id);
connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
connclose(cf->conn, "REFUSED_STREAM"); /* do not use this anymore */
data->state.refused_stream = TRUE;
*err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
return -1;
@ -1808,7 +1807,7 @@ static void h2_pri_spec(struct cf_h2_ctx *ctx,
}
/*
* Check if there's been an update in the priority /
* Check if there is been an update in the priority /
* dependency settings and if so it submits a PRIORITY frame with the updated
* info.
* Flush any out data pending in the network buffer.
@ -2006,7 +2005,7 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
out:
result = h2_progress_egress(cf, data);
if(result == CURLE_AGAIN) {
/* pending data to send, need to be called again. Ideally, we'd
/* pending data to send, need to be called again. Ideally, we would
* monitor the socket for POLLOUT, but we might not be in SENDING
* transfer state any longer and are unable to make this happen.
*/
@ -2774,7 +2773,7 @@ bool Curl_http2_may_switch(struct Curl_easy *data,
data->state.httpwant == CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
#ifndef CURL_DISABLE_PROXY
if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
/* We don't support HTTP/2 proxies yet. Also it's debatable
/* We do not support HTTP/2 proxies yet. Also it is debatable
whether or not this setting should apply to HTTP/2 proxies. */
infof(data, "Ignoring HTTP/2 prior knowledge due to proxy");
return FALSE;
@ -2798,7 +2797,7 @@ CURLcode Curl_http2_switch(struct Curl_easy *data,
if(result)
return result;
conn->httpversion = 20; /* we know we're on HTTP/2 now */
conn->httpversion = 20; /* we know we are on HTTP/2 now */
conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);
@ -2822,7 +2821,7 @@ CURLcode Curl_http2_switch_at(struct Curl_cfilter *cf, struct Curl_easy *data)
return result;
cf_h2 = cf->next;
cf->conn->httpversion = 20; /* we know we're on HTTP/2 now */
cf->conn->httpversion = 20; /* we know we are on HTTP/2 now */
cf->conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
cf->conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);
@ -2875,7 +2874,7 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data,
" after upgrade: len=%zu", nread);
}
conn->httpversion = 20; /* we know we're on HTTP/2 now */
conn->httpversion = 20; /* we know we are on HTTP/2 now */
conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);

View File

@ -620,7 +620,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
":%" MAX_SIGV4_LEN_TXT "s",
provider0, provider1, region, service);
if(!provider0[0]) {
failf(data, "first aws-sigv4 provider can't be empty");
failf(data, "first aws-sigv4 provider cannot be empty");
result = CURLE_BAD_FUNCTION_ARGUMENT;
goto fail;
}
@ -810,7 +810,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
"SignedHeaders=%s, "
"Signature=%s\r\n"
/*
* date_header is added here, only if it wasn't
* date_header is added here, only if it was not
* user-specified (using CURLOPT_HTTPHEADER).
* date_header includes \r\n
*/

View File

@ -182,7 +182,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
case CHUNK_LF:
/* waiting for the LF after a chunk size */
if(*buf == 0x0a) {
/* we're now expecting data to come, unless size was zero! */
/* we are now expecting data to come, unless size was zero! */
if(0 == ch->datasize) {
ch->state = CHUNK_TRAILER; /* now check for trailers */
}
@ -289,9 +289,9 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
break;
}
else {
/* no trailer, we're on the final CRLF pair */
/* no trailer, we are on the final CRLF pair */
ch->state = CHUNK_TRAILER_POSTCR;
break; /* don't advance the pointer */
break; /* do not advance the pointer */
}
}
else {
@ -344,7 +344,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
blen--;
(*pconsumed)++;
/* Record the length of any data left in the end of the buffer
even if there's no more chunks to read */
even if there is no more chunks to read */
ch->datasize = blen;
ch->state = CHUNK_DONE;
CURL_TRC_WRITE(data, "http_chunk, response complete");
@ -470,7 +470,7 @@ const struct Curl_cwtype Curl_httpchunk_unencoder = {
sizeof(struct chunked_writer)
};
/* max length of a HTTP chunk that we want to generate */
/* max length of an HTTP chunk that we want to generate */
#define CURL_CHUNKED_MINLEN (1024)
#define CURL_CHUNKED_MAXLEN (64 * 1024)

View File

@ -38,7 +38,7 @@ struct connectdata;
#define CHUNK_MAXNUM_LEN (SIZEOF_CURL_OFF_T * 2)
typedef enum {
/* await and buffer all hexadecimal digits until we get one that isn't a
/* await and buffer all hexadecimal digits until we get one that is not a
hexadecimal digit. When done, we go CHUNK_LF */
CHUNK_HEX,
@ -54,9 +54,9 @@ typedef enum {
big deal. */
CHUNK_POSTLF,
/* Used to mark that we're out of the game. NOTE: that there's a 'datasize'
field in the struct that will tell how many bytes that were not passed to
the client in the end of the last buffer! */
/* Used to mark that we are out of the game. NOTE: that there is a
'datasize' field in the struct that will tell how many bytes that were
not passed to the client in the end of the last buffer! */
CHUNK_STOP,
/* At this point optional trailer headers can be found, unless the next line

View File

@ -95,7 +95,7 @@ CURLcode Curl_input_negotiate(struct Curl_easy *data, struct connectdata *conn,
Curl_http_auth_cleanup_negotiate(conn);
}
else if(state != GSS_AUTHNONE) {
/* The server rejected our authentication and hasn't supplied any more
/* The server rejected our authentication and has not supplied any more
negotiation mechanisms */
Curl_http_auth_cleanup_negotiate(conn);
return CURLE_LOGIN_DENIED;
@ -218,7 +218,7 @@ CURLcode Curl_output_negotiate(struct Curl_easy *data,
if(*state == GSS_AUTHDONE || *state == GSS_AUTHSUCC) {
/* connection is already authenticated,
* don't send a header in future requests */
* do not send a header in future requests */
authp->done = TRUE;
}

View File

@ -200,7 +200,7 @@ CURLcode Curl_output_ntlm(struct Curl_easy *data, bool proxy)
Curl_bufref_init(&ntlmmsg);
/* connection is already authenticated, don't send a header in future
/* connection is already authenticated, do not send a header in future
* requests so go directly to NTLMSTATE_LAST */
if(*state == NTLMSTATE_TYPE3)
*state = NTLMSTATE_LAST;

View File

@ -512,7 +512,7 @@ static CURLcode imap_perform_login(struct Curl_easy *data,
char *passwd;
/* Check we have a username and password to authenticate with and end the
connect phase if we don't */
connect phase if we do not */
if(!data->state.aptr.user) {
imap_state(data, IMAP_STOP);
@ -612,7 +612,7 @@ static CURLcode imap_perform_authentication(struct Curl_easy *data,
saslprogress progress;
/* Check if already authenticated OR if there is enough data to authenticate
with and end the connect phase if we don't */
with and end the connect phase if we do not */
if(imapc->preauth ||
!Curl_sasl_can_authenticate(&imapc->sasl, data)) {
imap_state(data, IMAP_STOP);
@ -1187,7 +1187,7 @@ static CURLcode imap_state_fetch_resp(struct Curl_easy *data,
chunk = (size_t)size;
if(!chunk) {
/* no size, we're done with the data */
/* no size, we are done with the data */
imap_state(data, IMAP_STOP);
return CURLE_OK;
}
@ -1225,7 +1225,7 @@ static CURLcode imap_state_fetch_resp(struct Curl_easy *data,
}
}
else {
/* We don't know how to parse this line */
/* We do not know how to parse this line */
failf(data, "Failed to parse FETCH response.");
result = CURLE_WEIRD_SERVER_REPLY;
}

View File

@ -168,7 +168,7 @@ static char *inet_ntop6(const unsigned char *src, char *dst, size_t size)
*tp++ = ':';
*tp++ = '\0';
/* Check for overflow, copy, and we're done.
/* Check for overflow, copy, and we are done.
*/
if((size_t)(tp - tmp) > size) {
errno = ENOSPC;
@ -185,10 +185,9 @@ static char *inet_ntop6(const unsigned char *src, char *dst, size_t size)
* Returns NULL on error and errno set with the specific
* error, EAFNOSUPPORT or ENOSPC.
*
* On Windows we store the error in the thread errno, not
* in the winsock error code. This is to avoid losing the
* actual last winsock error. So when this function returns
* NULL, check errno not SOCKERRNO.
* On Windows we store the error in the thread errno, not in the winsock error
* code. This is to avoid losing the actual last winsock error. When this
* function returns NULL, check errno not SOCKERRNO.
*/
char *Curl_inet_ntop(int af, const void *src, char *buf, size_t size)
{

View File

@ -48,7 +48,7 @@
#endif
/*
* WARNING: Don't even consider trying to compile this on a system where
* WARNING: Do not even consider trying to compile this on a system where
* sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
*/
@ -61,12 +61,12 @@ static int inet_pton6(const char *src, unsigned char *dst);
* to network format (which is usually some kind of binary format).
* return:
* 1 if the address was valid for the specified address family
* 0 if the address wasn't valid (`dst' is untouched in this case)
* 0 if the address was not valid (`dst' is untouched in this case)
* -1 if some other error occurred (`dst' is untouched in this case, too)
* notice:
* On Windows we store the error in the thread errno, not
* in the winsock error code. This is to avoid losing the
* actual last winsock error. So when this function returns
* actual last winsock error. When this function returns
* -1, check errno not SOCKERRNO.
* author:
* Paul Vixie, 1996.
@ -92,7 +92,7 @@ Curl_inet_pton(int af, const char *src, void *dst)
* return:
* 1 if `src' is a valid dotted quad, else 0.
* notice:
* does not touch `dst' unless it's returning 1.
* does not touch `dst' unless it is returning 1.
* author:
* Paul Vixie, 1996.
*/
@ -147,7 +147,7 @@ inet_pton4(const char *src, unsigned char *dst)
* return:
* 1 if `src' is a valid [RFC1884 2.2] address, else 0.
* notice:
* (1) does not touch `dst' unless it's returning 1.
* (1) does not touch `dst' unless it is returning 1.
* (2) :: in a full address is silently ignored.
* credit:
* inspired by Mark Andrews.
@ -221,7 +221,7 @@ inet_pton6(const char *src, unsigned char *dst)
if(colonp) {
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
* overlapping regions, we will do the shift by hand.
*/
const ssize_t n = tp - colonp;
ssize_t i;

View File

@ -178,7 +178,7 @@ krb5_encode(void *app_data, const void *from, int length, int level, void **to)
if(maj != GSS_S_COMPLETE)
return -1;
/* malloc a new buffer, in case gss_release_buffer doesn't work as
/* malloc a new buffer, in case gss_release_buffer does not work as
expected */
*to = malloc(enc.length);
if(!*to)
@ -227,7 +227,7 @@ krb5_auth(void *app_data, struct Curl_easy *data, struct connectdata *conn)
/* this loop will execute twice (once for service, once for host) */
for(;;) {
/* this really shouldn't be repeated here, but can't help it */
/* this really should not be repeated here, but cannot help it */
if(service == srv_host) {
result = ftpsend(data, conn, "AUTH GSSAPI");
if(result)
@ -329,7 +329,7 @@ krb5_auth(void *app_data, struct Curl_easy *data, struct connectdata *conn)
size_t len = Curl_dyn_len(&pp->recvbuf);
p = Curl_dyn_ptr(&pp->recvbuf);
if((len < 4) || (p[0] != '2' && p[0] != '3')) {
infof(data, "Server didn't accept auth data");
infof(data, "Server did not accept auth data");
ret = AUTH_ERROR;
break;
}
@ -878,7 +878,7 @@ static CURLcode choose_mech(struct Curl_easy *data, struct connectdata *conn)
if(ret != AUTH_CONTINUE) {
if(ret != AUTH_OK) {
/* Mechanism has dumped the error to stderr, don't error here. */
/* Mechanism has dumped the error to stderr, do not error here. */
return CURLE_USE_SSL_FAILED;
}
DEBUGASSERT(ret == AUTH_OK);

View File

@ -375,7 +375,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
if(ldap_ssl) {
#ifdef HAVE_LDAP_SSL
#ifdef USE_WIN32_LDAP
/* Win32 LDAP SDK doesn't support insecure mode without CA! */
/* Win32 LDAP SDK does not support insecure mode without CA! */
server = ldap_sslinit(host, (curl_ldap_num_t)conn->primary.remote_port, 1);
ldap_set_option(server, LDAP_OPT_SSL, LDAP_OPT_ON);
#else

View File

@ -38,8 +38,8 @@ CURLcode Curl_macos_init(void)
/*
* The automagic conversion from IPv4 literals to IPv6 literals only
* works if the SCDynamicStoreCopyProxies system function gets called
* first. As Curl currently doesn't support system-wide HTTP proxies, we
* therefore don't use any value this function might return.
* first. As Curl currently does not support system-wide HTTP proxies, we
* therefore do not use any value this function might return.
*
* This function is only available on macOS and is not needed for
* IPv4-only builds, hence the conditions for defining

View File

@ -217,7 +217,7 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
}
#else
/* When no other crypto library is available, or the crypto library doesn't
/* When no other crypto library is available, or the crypto library does not
* support MD4, we use this code segment this implementation of it
*
* This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
@ -239,13 +239,13 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
* There is ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
* unsigned integer data type will do), there's no compile-time endianness
* unsigned integer data type will do), there is no compile-time endianness
* configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
@ -294,7 +294,7 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx);
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
* does not work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define MD4_SET(n) \

View File

@ -172,7 +172,7 @@ static void my_md5_final(unsigned char *digest, my_md5_ctx *ctx)
/* For Apple operating systems: CommonCrypto has the functions we need.
These functions are available on Tiger and later, as well as iOS 2.0
and later. If you're building for an older cat, well, sorry.
and later. If you are building for an older cat, well, sorry.
Declaring the functions as static like this seems to be a bit more
reliable than defining COMMON_DIGEST_FOR_OPENSSL on older cats. */
@ -264,13 +264,13 @@ static void my_md5_final(unsigned char *digest, my_md5_ctx *ctx)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
* There is ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
* unsigned integer data type will do), there's no compile-time endianness
* unsigned integer data type will do), there is no compile-time endianness
* configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
@ -324,7 +324,7 @@ static void my_md5_final(unsigned char *result, my_md5_ctx *ctx);
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
* does not work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define MD5_SET(n) \

View File

@ -30,7 +30,7 @@
#include "urldata.h"
#define MEMDEBUG_NODEFINES /* don't redefine the standard functions */
#define MEMDEBUG_NODEFINES /* do not redefine the standard functions */
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@ -44,7 +44,7 @@ struct memdebug {
double d;
void *p;
} mem[1];
/* I'm hoping this is the thing with the strictest alignment
/* I am hoping this is the thing with the strictest alignment
* requirements. That also means we waste some space :-( */
};
@ -53,7 +53,7 @@ struct memdebug {
* remain so. For advanced analysis, record a log file and write perl scripts
* to analyze them!
*
* Don't use these with multithreaded test programs!
* Do not use these with multithreaded test programs!
*/
FILE *curl_dbg_logfile = NULL;
@ -84,7 +84,7 @@ void curl_dbg_memdebug(const char *logname)
else
curl_dbg_logfile = stderr;
#ifdef MEMDEBUG_LOG_SYNC
/* Flush the log file after every line so the log isn't lost in a crash */
/* Flush the log file after every line so the log is not lost in a crash */
if(curl_dbg_logfile)
setbuf(curl_dbg_logfile, (char *)NULL);
#endif
@ -103,7 +103,7 @@ void curl_dbg_memlimit(long limit)
}
}
/* returns TRUE if this isn't allowed! */
/* returns TRUE if this is not allowed! */
static bool countcheck(const char *func, int line, const char *source)
{
/* if source is NULL, then the call is made internally and this check

View File

@ -1147,7 +1147,7 @@ static void mime_subparts_free(void *ptr)
curl_mime *mime = (curl_mime *) ptr;
if(mime && mime->parent) {
mime->parent->freefunc = NULL; /* Be sure we won't be called again. */
mime->parent->freefunc = NULL; /* Be sure we will not be called again. */
cleanup_part_content(mime->parent); /* Avoid dangling pointer in part. */
}
curl_mime_free(mime);
@ -1159,7 +1159,7 @@ static void mime_subparts_unbind(void *ptr)
curl_mime *mime = (curl_mime *) ptr;
if(mime && mime->parent) {
mime->parent->freefunc = NULL; /* Be sure we won't be called again. */
mime->parent->freefunc = NULL; /* Be sure we will not be called again. */
cleanup_part_content(mime->parent); /* Avoid dangling pointer in part. */
mime->parent = NULL;
}
@ -1186,7 +1186,7 @@ void curl_mime_free(curl_mime *mime)
curl_mimepart *part;
if(mime) {
mime_subparts_unbind(mime); /* Be sure it's not referenced anymore. */
mime_subparts_unbind(mime); /* Be sure it is not referenced anymore. */
while(mime->firstpart) {
part = mime->firstpart;
mime->firstpart = part->nextpart;
@ -1554,7 +1554,7 @@ CURLcode Curl_mime_set_subparts(curl_mimepart *part,
while(root->parent && root->parent->parent)
root = root->parent->parent;
if(subparts == root) {
/* Can't add as a subpart of itself. */
/* cannot add as a subpart of itself. */
return CURLE_BAD_FUNCTION_ARGUMENT;
}
}
@ -1771,7 +1771,7 @@ CURLcode Curl_mime_prepare_headers(struct Curl_easy *data,
curl_slist_free_all(part->curlheaders);
part->curlheaders = NULL;
/* Be sure we won't access old headers later. */
/* Be sure we will not access old headers later. */
if(part->state.state == MIMESTATE_CURLHEADERS)
mimesetstate(&part->state, MIMESTATE_CURLHEADERS, NULL);
@ -2072,7 +2072,7 @@ static CURLcode cr_mime_resume_from(struct Curl_easy *data,
return CURLE_PARTIAL_FILE;
}
}
/* we've passed, proceed as normal */
/* we have passed, proceed as normal */
}
return CURLE_OK;
}

View File

@ -863,7 +863,7 @@ number:
str = (char *)iptr->val.str;
if(!str) {
/* Write null string if there's space. */
/* Write null string if there is space. */
if(prec == -1 || prec >= (int) sizeof(nilstr) - 1) {
str = nilstr;
len = sizeof(nilstr) - 1;
@ -1040,7 +1040,7 @@ static int addbyter(unsigned char outc, void *f)
{
struct nsprintf *infop = f;
if(infop->length < infop->max) {
/* only do this if we haven't reached max length yet */
/* only do this if we have not reached max length yet */
*infop->buffer++ = (char)outc; /* store */
infop->length++; /* we are now one byte larger */
return 0; /* fputc() returns like this on success */
@ -1062,10 +1062,10 @@ int curl_mvsnprintf(char *buffer, size_t maxlength, const char *format,
if(info.max) {
/* we terminate this with a zero byte */
if(info.max == info.length) {
/* we're at maximum, scrap the last letter */
/* we are at maximum, scrap the last letter */
info.buffer[-1] = 0;
DEBUGASSERT(retcode);
retcode--; /* don't count the nul byte */
retcode--; /* do not count the nul byte */
}
else
info.buffer[0] = 0;

View File

@ -135,7 +135,7 @@ static void init_completed(struct Curl_easy *data)
{
/* this is a completed transfer */
/* Important: reset the conn pointer so that we don't point to memory
/* Important: reset the conn pointer so that we do not point to memory
that could be freed anytime */
Curl_detach_connection(data);
Curl_expire_clear(data); /* stop all timers */
@ -175,7 +175,7 @@ static void mstate(struct Curl_easy *data, CURLMstate state
#endif
if(oldstate == state)
/* don't bother when the new state is the same as the old state */
/* do not bother when the new state is the same as the old state */
return;
data->mstate = state;
@ -191,7 +191,7 @@ static void mstate(struct Curl_easy *data, CURLMstate state
#endif
if(state == MSTATE_COMPLETED) {
/* changing to COMPLETED means there's one less easy handle 'alive' */
/* changing to COMPLETED means there is one less easy handle 'alive' */
DEBUGASSERT(data->multi->num_alive > 0);
data->multi->num_alive--;
if(!data->multi->num_alive) {
@ -354,11 +354,11 @@ static size_t hash_fd(void *key, size_t key_length, size_t slots_num)
* "Some tests at 7000 and 9000 connections showed that the socket hash lookup
* is somewhat of a bottle neck. Its current implementation may be a bit too
* limiting. It simply has a fixed-size array, and on each entry in the array
* it has a linked list with entries. So the hash only checks which list to
* scan through. The code I had used so for used a list with merely 7 slots
* (as that is what the DNS hash uses) but with 7000 connections that would
* make an average of 1000 nodes in each list to run through. I upped that to
* 97 slots (I believe a prime is suitable) and noticed a significant speed
* it has a linked list with entries. The hash only checks which list to scan
* through. The code I had used so for used a list with merely 7 slots (as
* that is what the DNS hash uses) but with 7000 connections that would make
* an average of 1000 nodes in each list to run through. I upped that to 97
* slots (I believe a prime is suitable) and noticed a significant speed
* increase. I need to reconsider the hash implementation or use a rather
* large default value like this. At 9000 connections I was still below 10us
* per call."
@ -552,10 +552,10 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi,
Curl_llist_init(&data->state.timeoutlist, NULL);
/*
* No failure allowed in this function beyond this point. And no
* modification of easy nor multi handle allowed before this except for
* potential multi's connection cache growing which won't be undone in this
* function no matter what.
* No failure allowed in this function beyond this point. No modification of
* easy nor multi handle allowed before this except for potential multi's
* connection cache growing which will not be undone in this function no
* matter what.
*/
if(data->set.errorbuffer)
data->set.errorbuffer[0] = 0;
@ -692,8 +692,8 @@ static CURLcode multi_done(struct Curl_easy *data,
case CURLE_ABORTED_BY_CALLBACK:
case CURLE_READ_ERROR:
case CURLE_WRITE_ERROR:
/* When we're aborted due to a callback return code it basically have to
be counted as premature as there is trouble ahead if we don't. We have
/* When we are aborted due to a callback return code it basically have to
be counted as premature as there is trouble ahead if we do not. We have
many callbacks and protocols work differently, we could potentially do
this more fine-grained in the future. */
premature = TRUE;
@ -757,8 +757,8 @@ static CURLcode multi_done(struct Curl_easy *data,
restrictions in our or the server's end
if premature is TRUE, it means this connection was said to be DONE before
the entire request operation is complete and thus we can't know in what
state it is for reusing, so we're forced to close it. In a perfect world
the entire request operation is complete and thus we cannot know in what
state it is for reusing, so we are forced to close it. In a perfect world
we can add code that keep track of if we really must close it here or not,
but currently we have no such detail knowledge.
*/
@ -880,7 +880,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
/* multi_done() clears the association between the easy handle and the
connection.
Note that this ignores the return code simply because there's
Note that this ignores the return code simply because there is
nothing really useful to do with it anyway! */
(void)multi_done(data, data->result, premature);
}
@ -914,7 +914,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
what we want */
data->mstate = MSTATE_COMPLETED;
/* This ignores the return code even in case of problems because there's
/* This ignores the return code even in case of problems because there is
nothing more to do about that, here */
(void)singlesocket(multi, easy); /* to let the application know what sockets
that vanish with this handle */
@ -926,7 +926,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
/* This removes a handle that was part the multi interface that used
CONNECT_ONLY, that connection is now left alive but since this handle
has bits.close set nothing can use that transfer anymore and it is
forbidden from reuse. And this easy handle cannot find the connection
forbidden from reuse. This easy handle cannot find the connection
anymore once removed from the multi handle
Better close the connection here, at once.
@ -953,12 +953,12 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
#endif
/* as this was using a shared connection cache we clear the pointer to that
since we're not part of that multi handle anymore */
since we are not part of that multi handle anymore */
data->state.conn_cache = NULL;
data->multi = NULL; /* clear the association to this multi handle */
/* make sure there's no pending message in the queue sent from this easy
/* make sure there is no pending message in the queue sent from this easy
handle */
for(e = multi->msglist.head; e; e = e->next) {
struct Curl_message *msg = e->ptr;
@ -1225,7 +1225,7 @@ CURLMcode curl_multi_fdset(struct Curl_multi *multi,
for(i = 0; i < ps.num; i++) {
if(!FDSET_SOCK(ps.sockets[i]))
/* pretend it doesn't exist */
/* pretend it does not exist */
continue;
if(ps.actions[i] & CURL_POLL_IN)
FD_SET(ps.sockets[i], read_fd_set);
@ -1282,7 +1282,7 @@ out:
}
#ifdef USE_WINSOCK
/* Reset FD_WRITE for TCP sockets. Nothing is actually sent. UDP sockets can't
/* Reset FD_WRITE for TCP sockets. Nothing is actually sent. UDP sockets cannot
* be reset this way because an empty datagram would be sent. #9203
*
* "On Windows the internal state of FD_WRITE as returned from
@ -1528,7 +1528,7 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
#endif
long sleep_ms = 0;
/* Avoid busy-looping when there's nothing particular to wait for */
/* Avoid busy-looping when there is nothing particular to wait for */
if(!curl_multi_timeout(multi, &sleep_ms) && sleep_ms) {
if(sleep_ms > timeout_ms)
sleep_ms = timeout_ms;
@ -1606,7 +1606,7 @@ CURLMcode curl_multi_wakeup(struct Curl_multi *multi)
The write socket is set to non-blocking, this way this function
cannot block, making it safe to call even from the same thread
that will call curl_multi_wait(). If swrite() returns that it
would block, it's considered successful because it means that
would block, it is considered successful because it means that
previous calls to this function will wake up the poll(). */
if(wakeup_write(multi->wakeup_pair[1], buf, sizeof(buf)) < 0) {
int err = SOCKERRNO;
@ -1670,7 +1670,7 @@ CURLMcode Curl_multi_add_perform(struct Curl_multi *multi,
if(!rc) {
struct SingleRequest *k = &data->req;
/* pass in NULL for 'conn' here since we don't want to init the
/* pass in NULL for 'conn' here since we do not want to init the
connection, only this transfer */
Curl_init_do(data, NULL);
@ -1702,7 +1702,7 @@ static CURLcode multi_do(struct Curl_easy *data, bool *done)
* second connection.
*
* 'complete' can return 0 for incomplete, 1 for done and -1 for go back to
* DOING state there's more work to do!
* DOING state there is more work to do!
*/
static CURLcode multi_do_more(struct Curl_easy *data, int *complete)
@ -1830,10 +1830,10 @@ static CURLcode protocol_connect(struct Curl_easy *data,
&& conn->bits.protoconnstart) {
/* We already are connected, get back. This may happen when the connect
worked fine in the first call, like when we connect to a local server
or proxy. Note that we don't know if the protocol is actually done.
or proxy. Note that we do not know if the protocol is actually done.
Unless this protocol doesn't have any protocol-connect callback, as
then we know we're done. */
Unless this protocol does not have any protocol-connect callback, as
then we know we are done. */
if(!conn->handler->connecting)
*protocol_done = TRUE;
@ -1850,7 +1850,7 @@ static CURLcode protocol_connect(struct Curl_easy *data,
else
*protocol_done = TRUE;
/* it has started, possibly even completed but that knowledge isn't stored
/* it has started, possibly even completed but that knowledge is not stored
in this bit! */
if(!result)
conn->bits.protoconnstart = TRUE;
@ -1969,7 +1969,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
if(!result) {
*nowp = Curl_pgrsTime(data, TIMER_POSTQUEUE);
if(async)
/* We're now waiting for an asynchronous name lookup */
/* We are now waiting for an asynchronous name lookup */
multistate(data, MSTATE_RESOLVING);
else {
/* after the connect has been sent off, go WAITCONNECT unless the
@ -2158,7 +2158,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
Curl_set_in_callback(data, false);
if(prereq_rc != CURL_PREREQFUNC_OK) {
failf(data, "operation aborted by pre-request callback");
/* failure in pre-request callback - don't do any other processing */
/* failure in pre-request callback - do not do any other
processing */
result = CURLE_ABORTED_BY_CALLBACK;
Curl_posttransfer(data);
multi_done(data, result, FALSE);
@ -2190,7 +2191,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* skip some states if it is important */
multi_done(data, CURLE_OK, FALSE);
/* if there's no connection left, skip the DONE state */
/* if there is no connection left, skip the DONE state */
multistate(data, data->conn ?
MSTATE_DONE : MSTATE_COMPLETED);
rc = CURLM_CALL_MULTI_PERFORM;
@ -2206,13 +2207,13 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* after DO, go DO_DONE... or DO_MORE */
else if(data->conn->bits.do_more) {
/* we're supposed to do more, but we need to sit down, relax
/* we are supposed to do more, but we need to sit down, relax
and wait a little while first */
multistate(data, MSTATE_DOING_MORE);
rc = CURLM_CALL_MULTI_PERFORM;
}
else {
/* we're done with the DO, now DID */
/* we are done with the DO, now DID */
multistate(data, MSTATE_DID);
rc = CURLM_CALL_MULTI_PERFORM;
}
@ -2255,12 +2256,12 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
else {
/* done didn't return OK or SEND_ERROR */
/* done did not return OK or SEND_ERROR */
result = drc;
}
}
else {
/* Have error handler disconnect conn if we can't retry */
/* Have error handler disconnect conn if we cannot retry */
stream_error = TRUE;
}
free(newurl);
@ -2327,7 +2328,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* Check if we can move pending requests to send pipe */
process_pending_handles(multi); /* multiplexed */
/* Only perform the transfer if there's a good socket to work with.
/* Only perform the transfer if there is a good socket to work with.
Having both BAD is a signal to skip immediately to DONE */
if((data->conn->sockfd != CURL_SOCKET_BAD) ||
(data->conn->writesockfd != CURL_SOCKET_BAD))
@ -2469,7 +2470,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
if(result) {
/*
* The transfer phase returned error, we mark the connection to get
* closed to prevent being reused. This is because we can't possibly
* closed to prevent being reused. This is because we cannot possibly
* know if the connection is in a good shape or not now. Unless it is
* a protocol which uses two "channels" like FTP, as then the error
* happened in the data connection.
@ -2512,8 +2513,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
else {
/* after the transfer is done, go DONE */
/* but first check to see if we got a location info even though we're
not following redirects */
/* but first check to see if we got a location info even though we
are not following redirects */
if(data->req.location) {
free(newurl);
newurl = data->req.location;
@ -2533,8 +2534,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else if(data->state.select_bits && !Curl_xfer_is_blocked(data)) {
/* This avoids CURLM_CALL_MULTI_PERFORM so that a very fast transfer
won't get stuck on this transfer at the expense of other concurrent
transfers */
will not get stuck on this transfer at the expense of other
concurrent transfers */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
free(newurl);
@ -2570,8 +2571,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
#endif
/* after we have DONE what we're supposed to do, go COMPLETED, and
it doesn't matter what the multi_done() returned! */
/* after we have DONE what we are supposed to do, go COMPLETED, and
it does not matter what the multi_done() returned! */
multistate(data, MSTATE_COMPLETED);
break;
@ -2606,7 +2607,7 @@ statemachine_end:
if(data->mstate < MSTATE_COMPLETED) {
if(result) {
/*
* If an error was returned, and we aren't in completed state now,
* If an error was returned, and we are not in completed state now,
* then we go to completed and consider this transfer aborted.
*/
@ -2618,12 +2619,12 @@ statemachine_end:
if(data->conn) {
if(stream_error) {
/* Don't attempt to send data over a connection that timed out */
/* Do not attempt to send data over a connection that timed out */
bool dead_connection = result == CURLE_OPERATION_TIMEDOUT;
struct connectdata *conn = data->conn;
/* This is where we make sure that the conn pointer is reset.
We don't have to do this in every case block above where a
We do not have to do this in every case block above where a
failure is detected */
Curl_detach_connection(data);
@ -2642,7 +2643,7 @@ statemachine_end:
multistate(data, MSTATE_COMPLETED);
rc = CURLM_CALL_MULTI_PERFORM;
}
/* if there's still a connection to use, call the progress function */
/* if there is still a connection to use, call the progress function */
else if(data->conn && Curl_pgrsUpdate(data)) {
/* aborted due to progress callback return code must close the
connection */
@ -2923,7 +2924,7 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
}
}
else {
/* this is a socket we didn't have before, add it to the hash! */
/* this is a socket we did not have before, add it to the hash! */
entry = sh_addentry(&multi->sockhash, s);
if(!entry)
/* fatal */
@ -3057,7 +3058,7 @@ CURLcode Curl_updatesocket(struct Curl_easy *data)
void Curl_multi_closed(struct Curl_easy *data, curl_socket_t s)
{
if(data) {
/* if there's still an easy handle associated with this connection */
/* if there is still an easy handle associated with this connection */
struct Curl_multi *multi = data->multi;
if(multi) {
/* this is set if this connection is part of a handle that is added to
@ -3176,7 +3177,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s);
if(!entry) {
/* Unmatched socket, we can't act on it but we ignore this fact. In
/* Unmatched socket, we cannot act on it but we ignore this fact. In
real-world tests it has been proved that libevent can in fact give
the application actions even though the socket was just previously
asked to get removed, so thus we better survive stray socket actions
@ -3198,18 +3199,18 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
DEBUGASSERT(data->magic == CURLEASY_MAGIC_NUMBER);
if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK))
/* set socket event bitmask if they're not locked */
/* set socket event bitmask if they are not locked */
data->state.select_bits |= (unsigned char)ev_bitmask;
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
/* Now we fall-through and do the timer-based stuff, since we don't want
/* Now we fall-through and do the timer-based stuff, since we do not want
to force the user to have to deal with timeouts as long as at least
one connection in fact has traffic. */
data = NULL; /* set data to NULL again to avoid calling
multi_runsingle() in case there's no need to */
multi_runsingle() in case there is no need to */
now = Curl_now(); /* get a newer time since the multi_runsingle() loop
may have taken some time */
}
@ -3252,7 +3253,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
}
}
/* Check if there's one (more) expired timer to deal with! This function
/* Check if there is one (more) expired timer to deal with! This function
extracts a matching node if there is one */
multi->timetree = Curl_splaygetbest(now, multi->timetree, &t);
@ -3406,8 +3407,8 @@ static CURLMcode multi_timeout(struct Curl_multi *multi,
if(Curl_splaycomparekeys(multi->timetree->key, now) > 0) {
/* some time left before expiration */
timediff_t diff = Curl_timediff_ceil(multi->timetree->key, now);
/* this should be safe even on 32-bit archs, as we don't use that overly
long timeouts */
/* this should be safe even on 32-bit archs, as we do not use that
overly long timeouts */
*timeout_ms = (long)diff;
}
else
@ -3451,7 +3452,7 @@ CURLMcode Curl_update_timer(struct Curl_multi *multi)
static const struct curltime none = {0, 0};
if(Curl_splaycomparekeys(none, multi->timer_lastcall)) {
multi->timer_lastcall = none;
/* there's no timeout now but there was one previously, tell the app to
/* there is no timeout now but there was one previously, tell the app to
disable it */
set_in_callback(multi, TRUE);
rc = multi->timer_cb(multi, -1, multi->timer_userp);
@ -3596,7 +3597,7 @@ void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id)
if(diff > 0) {
/* The current splay tree entry is sooner than this new expiry time.
We don't need to update our splay tree entry. */
We do not need to update our splay tree entry. */
return;
}

View File

@ -167,7 +167,7 @@ struct Curl_multi {
#endif
unsigned int max_concurrent_streams;
unsigned int maxconnects; /* if >0, a fixed limit of the maximum number of
entries we're allowed to grow the connection
entries we are allowed to grow the connection
cache to */
#define IPV6_UNKNOWN 0
#define IPV6_DEAD 1

View File

@ -237,7 +237,7 @@ static int parsenetrc(const char *host,
else if(strcasecompare("password", tok))
state_password = 1;
else if(strcasecompare("machine", tok)) {
/* ok, there's machine here go => */
/* ok, there is machine here go => */
state = HOSTFOUND;
state_our_login = FALSE;
}
@ -277,7 +277,7 @@ out:
/*
* @unittest: 1304
*
* *loginp and *passwordp MUST be allocated if they aren't NULL when passed
* *loginp and *passwordp MUST be allocated if they are not NULL when passed
* in.
*/
int Curl_parsenetrc(const char *host, char **loginp, char **passwordp,

View File

@ -27,7 +27,7 @@
#include "curl_setup.h"
#ifndef CURL_DISABLE_NETRC
/* returns -1 on failure, 0 if the host is found, 1 is the host isn't found */
/* returns -1 on failure, 0 if the host is found, 1 is the host is not found */
int Curl_parsenetrc(const char *host, char **loginp,
char **passwordp, char *filename);
/* Assume: (*passwordp)[0]=0, host[0] != 0.

View File

@ -53,7 +53,7 @@ int curlx_nonblock(curl_socket_t sockfd, /* operate on this */
if(flags < 0)
return -1;
/* Check if the current file status flags have already satisfied
* the request, if so, it's no need to call fcntl() to replicate it.
* the request, if so, it is no need to call fcntl() to replicate it.
*/
if(!!(flags & O_NONBLOCK) == !!nonblock)
return 0;

View File

@ -124,7 +124,7 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy)
char hostip[128];
/*
* If we don't have a hostname at all, like for example with a FILE
* If we do not have a hostname at all, like for example with a FILE
* transfer, we have nothing to interrogate the noproxy list with.
*/
if(!name || name[0] == '\0')
@ -142,7 +142,7 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy)
if(!strcmp("*", no_proxy))
return TRUE;
/* NO_PROXY was specified and it wasn't just an asterisk */
/* NO_PROXY was specified and it was not just an asterisk */
if(name[0] == '[') {
char *endptr;
@ -256,7 +256,7 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy)
while(*p == ',')
p++;
} /* while(*p) */
} /* NO_PROXY was specified and it wasn't just an asterisk */
} /* NO_PROXY was specified and it was not just an asterisk */
return FALSE;
}

View File

@ -1152,7 +1152,7 @@ ldapsb_tls_remove(Sockbuf_IO_Desc *sbiod)
return 0;
}
/* We don't need to do anything because libcurl does it already */
/* We do not need to do anything because libcurl does it already */
static int
ldapsb_tls_close(Sockbuf_IO_Desc *sbiod)
{

View File

@ -244,7 +244,7 @@ static int checkmonth(const char *check, size_t len)
}
/* return the time zone offset between GMT and the input one, in number
of seconds or -1 if the timezone wasn't found/legal */
of seconds or -1 if the timezone was not found/legal */
static int checktz(const char *check, size_t len)
{
@ -265,7 +265,7 @@ static int checktz(const char *check, size_t len)
static void skip(const char **date)
{
/* skip everything that aren't letters or digits */
/* skip everything that are not letters or digits */
while(**date && !ISALNUM(**date))
(*date)++;
}

View File

@ -119,7 +119,7 @@ CURLcode Curl_pp_statemach(struct Curl_easy *data,
interval_ms);
if(block) {
/* if we didn't wait, we don't have to spend time on this now */
/* if we did not wait, we do not have to spend time on this now */
if(Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
else
@ -179,7 +179,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data,
DEBUGASSERT(pp->sendthis == NULL);
if(!conn)
/* can't send without a connection! */
/* cannot send without a connection! */
return CURLE_SEND_ERROR;
Curl_dyn_reset(&pp->sendbuf);
@ -329,7 +329,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data,
char *nl = memchr(line, '\n', Curl_dyn_len(&pp->recvbuf));
if(nl) {
/* a newline is CRLF in pp-talk, so the CR is ignored as
the line isn't really terminated until the LF comes */
the line is not really terminated until the LF comes */
size_t length = nl - line + 1;
/* output debug output if that is requested */
@ -372,7 +372,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data,
break;
}
} while(1); /* while there's buffer left to scan */
} while(1); /* while there is buffer left to scan */
pp->pending_resp = FALSE;

View File

@ -37,7 +37,7 @@ struct connectdata;
typedef enum {
PPTRANSFER_BODY, /* yes do transfer a body */
PPTRANSFER_INFO, /* do still go through to get info/headers */
PPTRANSFER_NONE /* don't get anything and don't get info */
PPTRANSFER_NONE /* do not get anything and do not get info */
} curl_pp_transfer;
/*
@ -83,7 +83,7 @@ struct pingpong {
* Curl_pp_statemach()
*
* called repeatedly until done. Set 'wait' to make it wait a while on the
* socket if there's no traffic.
* socket if there is no traffic.
*/
CURLcode Curl_pp_statemach(struct Curl_easy *data, struct pingpong *pp,
bool block, bool disconnecting);

View File

@ -406,7 +406,7 @@ static CURLcode pop3_perform_user(struct Curl_easy *data,
CURLcode result = CURLE_OK;
/* Check we have a username and password to authenticate with and end the
connect phase if we don't */
connect phase if we do not */
if(!data->state.aptr.user) {
pop3_state(data, POP3_STOP);
@ -440,7 +440,7 @@ static CURLcode pop3_perform_apop(struct Curl_easy *data,
char secret[2 * MD5_DIGEST_LEN + 1];
/* Check we have a username and password to authenticate with and end the
connect phase if we don't */
connect phase if we do not */
if(!data->state.aptr.user) {
pop3_state(data, POP3_STOP);
@ -550,7 +550,7 @@ static CURLcode pop3_perform_authentication(struct Curl_easy *data,
saslprogress progress = SASL_IDLE;
/* Check we have enough data to authenticate with and end the
connect phase if we don't */
connect phase if we do not */
if(!Curl_sasl_can_authenticate(&pop3c->sasl, data)) {
pop3_state(data, POP3_STOP);
return result;
@ -758,7 +758,7 @@ static CURLcode pop3_state_capa_resp(struct Curl_easy *data, int pop3code,
}
}
else {
/* Clear text is supported when CAPA isn't recognised */
/* Clear text is supported when CAPA is not recognised */
if(pop3code != '+')
pop3c->authtypes |= POP3_TYPE_CLEARTEXT;
@ -931,7 +931,7 @@ static CURLcode pop3_state_command_resp(struct Curl_easy *data,
pop3c->eob = 2;
/* But since this initial CR LF pair is not part of the actual body, we set
the strip counter here so that these bytes won't be delivered. */
the strip counter here so that these bytes will not be delivered. */
pop3c->strip = 2;
if(pop3->transfer == PPTRANSFER_BODY) {
@ -1477,7 +1477,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
pop3c->eob++;
if(i) {
/* Write out the body part that didn't match */
/* Write out the body part that did not match */
result = Curl_client_write(data, CLIENTWRITE_BODY, &str[last],
i - last);
@ -1490,7 +1490,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
else if(pop3c->eob == 3)
pop3c->eob++;
else
/* If the character match wasn't at position 0 or 3 then restart the
/* If the character match was not at position 0 or 3 then restart the
pattern matching */
pop3c->eob = 1;
break;
@ -1499,7 +1499,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
if(pop3c->eob == 1 || pop3c->eob == 4)
pop3c->eob++;
else
/* If the character match wasn't at position 1 or 4 then start the
/* If the character match was not at position 1 or 4 then start the
search again */
pop3c->eob = 0;
break;
@ -1513,7 +1513,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
pop3c->eob = 0;
}
else
/* If the character match wasn't at position 2 then start the search
/* If the character match was not at position 2 then start the search
again */
pop3c->eob = 0;
break;

View File

@ -82,13 +82,13 @@ static char *max5data(curl_off_t bytes, char *max5)
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "k", bytes/ONE_KILOBYTE);
else if(bytes < CURL_OFF_T_C(100) * ONE_MEGABYTE)
/* 'XX.XM' is good as long as we're less than 100 megs */
/* 'XX.XM' is good as long as we are less than 100 megs */
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE,
(bytes%ONE_MEGABYTE) / (ONE_MEGABYTE/CURL_OFF_T_C(10)) );
else if(bytes < CURL_OFF_T_C(10000) * ONE_MEGABYTE)
/* 'XXXXM' is good until we're at 10000MB or above */
/* 'XXXXM' is good until we are at 10000MB or above */
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
else if(bytes < CURL_OFF_T_C(100) * ONE_GIGABYTE)
@ -109,7 +109,7 @@ static char *max5data(curl_off_t bytes, char *max5)
/* up to 10000PB, display without decimal: XXXXP */
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "P", bytes/ONE_PETABYTE);
/* 16384 petabytes (16 exabytes) is the maximum a 64 bit unsigned number can
/* 16384 petabytes (16 exabytes) is the maximum a 64-bit unsigned number can
hold, but our data type is signed so 8192PB will be the maximum. */
return max5;
@ -140,7 +140,7 @@ int Curl_pgrsDone(struct Curl_easy *data)
if(!(data->progress.flags & PGRS_HIDE) &&
!data->progress.callback)
/* only output if we don't use a progress callback and we're not
/* only output if we do not use a progress callback and we are not
* hidden */
fprintf(data->set.err, "\n");
@ -204,7 +204,7 @@ void Curl_pgrsTimeWas(struct Curl_easy *data, timerid timer,
case TIMER_STARTTRANSFER:
delta = &data->progress.t_starttransfer;
/* prevent updating t_starttransfer unless:
* 1) this is the first time we're setting t_starttransfer
* 1) this is the first time we are setting t_starttransfer
* 2) a redirect has occurred since the last time t_starttransfer was set
* This prevents repeated invocations of the function from incorrectly
* changing the t_starttransfer time.
@ -265,7 +265,7 @@ void Curl_pgrsStartNow(struct Curl_easy *data)
/*
* This is used to handle speed limits, calculating how many milliseconds to
* wait until we're back under the speed limit, if needed.
* wait until we are back under the speed limit, if needed.
*
* The way it works is by having a "starting point" (time & amount of data
* transferred by then) used in the speed computation, to be used instead of
@ -336,7 +336,7 @@ CURLcode Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size)
*/
void Curl_ratelimit(struct Curl_easy *data, struct curltime now)
{
/* don't set a new stamp unless the time since last update is long enough */
/* do not set a new stamp unless the time since last update is long enough */
if(data->set.max_recv_speed) {
if(Curl_timediff(now, data->progress.dl_limit_start) >=
MIN_RATE_LIMIT_PERIOD) {
@ -399,7 +399,7 @@ static curl_off_t trspeed(curl_off_t size, /* number of bytes */
return CURL_OFF_T_MAX;
}
/* returns TRUE if it's time to show the progress meter */
/* returns TRUE if it is time to show the progress meter */
static bool progress_calc(struct Curl_easy *data, struct curltime now)
{
bool timetoshow = FALSE;
@ -431,10 +431,10 @@ static bool progress_calc(struct Curl_easy *data, struct curltime now)
/* figure out how many index entries of data we have stored in our speeder
array. With N_ENTRIES filled in, we have about N_ENTRIES-1 seconds of
transfer. Imagine, after one second we have filled in two entries,
after two seconds we've filled in three entries etc. */
after two seconds we have filled in three entries etc. */
countindex = ((p->speeder_c >= CURR_TIME)? CURR_TIME:p->speeder_c) - 1;
/* first of all, we don't do this if there's no counted seconds yet */
/* first of all, we do not do this if there is no counted seconds yet */
if(countindex) {
int checkindex;
timediff_t span_ms;
@ -594,7 +594,7 @@ int Curl_pgrsUpdate(struct Curl_easy *data)
if(!(data->progress.flags & PGRS_HIDE)) {
if(data->set.fxferinfo) {
int result;
/* There's a callback set, call that */
/* There is a callback set, call that */
Curl_set_in_callback(data, true);
result = data->set.fxferinfo(data->set.progress_client,
data->progress.size_dl,

View File

@ -151,7 +151,7 @@ static CURLcode randit(struct Curl_easy *data, unsigned int *rnd)
#if defined(RANDOM_FILE) && !defined(_WIN32)
if(!seeded) {
/* if there's a random file to read a seed from, use it */
/* if there is a random file to read a seed from, use it */
int fd = open(RANDOM_FILE, O_RDONLY);
if(fd > -1) {
/* read random data into the randseed variable */

View File

@ -41,7 +41,7 @@
int Curl_rename(const char *oldpath, const char *newpath)
{
#ifdef _WIN32
/* rename() on Windows doesn't overwrite, so we can't use it here.
/* rename() on Windows does not overwrite, so we cannot use it here.
MoveFileEx() will overwrite and is usually atomic, however it fails
when there are open handles to the file. */
const int max_wait_ms = 1000;

View File

@ -204,7 +204,7 @@ static CURLcode xfer_send(struct Curl_easy *data,
}
}
#endif
/* Make sure this doesn't send more body bytes than what the max send
/* Make sure this does not send more body bytes than what the max send
speed says. The headers do not count to the max speed. */
if(data->set.max_send_speed) {
size_t body_bytes = blen - hds_len;
@ -255,7 +255,7 @@ static CURLcode req_set_upload_done(struct Curl_easy *data)
{
DEBUGASSERT(!data->req.upload_done);
data->req.upload_done = TRUE;
data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we're done sending */
data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we are done sending */
Curl_creader_done(data, data->req.upload_aborted);

View File

@ -54,7 +54,7 @@ enum upgrade101 {
* Request specific data in the easy handle (Curl_easy). Previously,
* these members were on the connectdata struct but since a conn struct may
* now be shared between different Curl_easys, we store connection-specific
* data here. This struct only keeps stuff that's interesting for *this*
* data here. This struct only keeps stuff that is interesting for *this*
* request, as it will be cleared between multiple ones
*/
struct SingleRequest {
@ -68,7 +68,7 @@ struct SingleRequest {
unsigned int headerbytecount; /* received server headers (not CONNECT
headers) */
unsigned int allheadercount; /* all received headers (server + CONNECT) */
unsigned int deductheadercount; /* this amount of bytes doesn't count when
unsigned int deductheadercount; /* this amount of bytes does not count when
we check if anything has been transferred
at the end of a connection. We use this
counter to make only a 100 reply (without

View File

@ -79,7 +79,7 @@ static unsigned int rtsp_conncheck(struct Curl_easy *data,
unsigned int checks_to_perform);
/* this returns the socket to wait for in the DO and DOING state for the multi
interface and then we're always _sending_ a request and thus we wait for
interface and then we are always _sending_ a request and thus we wait for
the single socket to become writable only */
static int rtsp_getsock_do(struct Curl_easy *data, struct connectdata *conn,
curl_socket_t *socks)
@ -261,7 +261,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
* Since all RTSP requests are included here, there is no need to
* support custom requests like HTTP.
**/
data->req.no_body = TRUE; /* most requests don't contain a body */
data->req.no_body = TRUE; /* most requests do not contain a body */
switch(rtspreq) {
default:
failf(data, "Got invalid RTSP request");
@ -952,7 +952,7 @@ CURLcode Curl_rtsp_parseheader(struct Curl_easy *data, const char *header)
/* Find the end of Session ID
*
* Allow any non whitespace content, up to the field separator or end of
* line. RFC 2326 isn't 100% clear on the session ID and for example
* line. RFC 2326 is not 100% clear on the session ID and for example
* gstreamer does url-encoded session ID's not covered by the standard.
*/
end = start;

View File

@ -33,7 +33,7 @@
#endif
#if !defined(HAVE_SELECT) && !defined(HAVE_POLL_FINE)
#error "We can't compile without select() or poll() support."
#error "We cannot compile without select() or poll() support."
#endif
#ifdef MSDOS
@ -85,7 +85,7 @@ int Curl_wait_ms(timediff_t timeout_ms)
#if TIMEDIFF_T_MAX >= ULONG_MAX
if(timeout_ms >= ULONG_MAX)
timeout_ms = ULONG_MAX-1;
/* don't use ULONG_MAX, because that is equal to INFINITE */
/* do not use ULONG_MAX, because that is equal to INFINITE */
#endif
Sleep((ULONG)timeout_ms);
#else
@ -135,7 +135,7 @@ static int our_select(curl_socket_t maxfd, /* highest socket number */
struct timeval *ptimeout;
#ifdef USE_WINSOCK
/* WinSock select() can't handle zero events. See the comment below. */
/* WinSock select() cannot handle zero events. See the comment below. */
if((!fds_read || fds_read->fd_count == 0) &&
(!fds_write || fds_write->fd_count == 0) &&
(!fds_err || fds_err->fd_count == 0)) {
@ -148,14 +148,14 @@ static int our_select(curl_socket_t maxfd, /* highest socket number */
#ifdef USE_WINSOCK
/* WinSock select() must not be called with an fd_set that contains zero
fd flags, or it will return WSAEINVAL. But, it also can't be called
fd flags, or it will return WSAEINVAL. But, it also cannot be called
with no fd_sets at all! From the documentation:
Any two of the parameters, readfds, writefds, or exceptfds, can be
given as null. At least one must be non-null, and any non-null
descriptor set must contain at least one handle to a socket.
It is unclear why WinSock doesn't just handle this for us instead of
It is unclear why WinSock does not just handle this for us instead of
calling this an error. Luckily, with WinSock, we can _also_ ask how
many bits are set on an fd_set. So, let's just check it beforehand.
*/
@ -361,7 +361,7 @@ int Curl_poll(struct pollfd ufds[], unsigned int nfds, timediff_t timeout_ms)
}
/*
Note also that WinSock ignores the first argument, so we don't worry
Note also that WinSock ignores the first argument, so we do not worry
about the fact that maxfd is computed incorrectly with WinSock (since
curl_socket_t is unsigned in such cases and thus -1 is the largest
value).

View File

@ -708,8 +708,8 @@ static CURLcode cr_in_read(struct Curl_easy *data,
case CURL_READFUNC_PAUSE:
if(data->conn->handler->flags & PROTOPT_NONETWORK) {
/* protocols that work without network cannot be paused. This is
actually only FILE:// just now, and it can't pause since the transfer
isn't done using the "normal" procedure. */
actually only FILE:// just now, and it cannot pause since the transfer
is not done using the "normal" procedure. */
failf(data, "Read callback asked for PAUSE when not supported");
return CURLE_READ_ERROR;
}
@ -786,7 +786,7 @@ static CURLcode cr_in_resume_from(struct Curl_easy *data,
failf(data, "Could not seek stream");
return CURLE_READ_ERROR;
}
/* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
/* when seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@ -820,7 +820,7 @@ static CURLcode cr_in_resume_from(struct Curl_easy *data,
return CURLE_PARTIAL_FILE;
}
}
/* we've passed, proceed as normal */
/* we have passed, proceed as normal */
return CURLE_OK;
}
@ -872,7 +872,7 @@ static CURLcode cr_in_rewind(struct Curl_easy *data,
}
/* no callback set or failure above, makes us fail at once */
failf(data, "necessary data rewind wasn't possible");
failf(data, "necessary data rewind was not possible");
return CURLE_SEND_FAIL_REWIND;
}
return CURLE_OK;
@ -1018,7 +1018,7 @@ static CURLcode cr_lc_read(struct Curl_easy *data,
return result;
start = i + 1;
if(!data->set.crlf && (data->state.infilesize != -1)) {
/* we're here only because FTP is in ASCII mode...
/* we are here only because FTP is in ASCII mode...
bump infilesize for the LF we just added */
data->state.infilesize++;
/* comment: this might work for FTP, but in HTTP we could not change

View File

@ -352,7 +352,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_FAILONERROR:
/*
* Don't output the >=400 error code HTML-page, but instead only
* Do not output the >=400 error code HTML-page, but instead only
* return error.
*/
data->set.http_fail_on_error = (0 != va_arg(param, long));
@ -622,7 +622,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
*
* If the encoding is set to "" we use an Accept-Encoding header that
* encompasses all the encodings we support.
* If the encoding is set to NULL we don't send an Accept-Encoding header
* If the encoding is set to NULL we do not send an Accept-Encoding header
* and ignore an received Content-Encoding header.
*
*/
@ -686,7 +686,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_POST:
/* Does this option serve a purpose anymore? Yes it does, when
CURLOPT_POSTFIELDS isn't used and the POST data is read off the
CURLOPT_POSTFIELDS is not used and the POST data is read off the
callback! */
if(va_arg(param, long)) {
data->set.method = HTTPREQ_POST;
@ -805,7 +805,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
data->state.cookielist = NULL;
if(!data->share || !data->share->cookies) {
/* throw away all existing cookies if this isn't a shared cookie
/* throw away all existing cookies if this is not a shared cookie
container */
Curl_cookie_clearall(data->cookies);
Curl_cookie_cleanup(data->cookies);
@ -817,7 +817,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_COOKIEJAR:
/*
* Set cookie file name to dump all cookies to when we're done.
* Set cookie filename to dump all cookies to when we are done.
*/
result = Curl_setstropt(&data->set.str[STRING_COOKIEJAR],
va_arg(param, char *));
@ -1047,7 +1047,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
/* switch off bits we can't support */
/* switch off bits we cannot support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
#endif
@ -1079,7 +1079,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
result = Curl_setstropt(&data->set.str[STRING_CUSTOMREQUEST],
va_arg(param, char *));
/* we don't set
/* we do not set
data->set.method = HTTPREQ_CUSTOM;
here, we continue as if we were using the already set type
and this just changes the actual request keyword */
@ -1125,7 +1125,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
auth |= CURLAUTH_DIGEST; /* set standard digest bit */
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
/* switch off bits we can't support */
/* switch off bits we cannot support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
#endif
@ -1155,7 +1155,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Set proxy server:port to use as proxy.
*
* If the proxy is set to "" (and CURLOPT_SOCKS_PROXY is set to "" or NULL)
* we explicitly say that we don't want to use a proxy
* we explicitly say that we do not want to use a proxy
* (even though there might be environment variables saying so).
*
* Setting it to NULL, means no proxy but allows the environment variables
@ -1169,7 +1169,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
/*
* Set proxy server:port to use as SOCKS proxy.
*
* If the proxy is set to "" or NULL we explicitly say that we don't want
* If the proxy is set to "" or NULL we explicitly say that we do not want
* to use the socks proxy.
*/
result = Curl_setstropt(&data->set.str[STRING_PRE_PROXY],
@ -1581,7 +1581,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Prefix the HOST with dash (-) to _remove_ the entry from the cache.
*
* This API can remove any entry from the DNS cache, but only entries
* that aren't actually in use right now will be pruned immediately.
* that are not actually in use right now will be pruned immediately.
*/
data->set.resolve = va_arg(param, struct curl_slist *);
data->state.resolve = data->set.resolve;
@ -1690,7 +1690,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
*/
data->set.fdebug = va_arg(param, curl_debug_callback);
/*
* if the callback provided is NULL, it'll use the default callback
* if the callback provided is NULL, it will use the default callback
*/
break;
case CURLOPT_DEBUGDATA:
@ -1986,7 +1986,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
arg = va_arg(param, long);
/* Obviously people are not reading documentation and too many thought
this argument took a boolean when it wasn't and misused it.
this argument took a boolean when it was not and misused it.
Treat 1 and 2 the same */
data->set.ssl.primary.verifyhost = !!(arg & 3);
@ -2249,7 +2249,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_BUFFERSIZE:
/*
* The application kindly asks for a differently sized receive buffer.
* If it seems reasonable, we'll use it.
* If it seems reasonable, we will use it.
*/
arg = va_arg(param, long);
@ -2629,7 +2629,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
data->set.http_te_skip = (0 == va_arg(param, long));
break;
#else
return CURLE_NOT_BUILT_IN; /* hyper doesn't support */
return CURLE_NOT_BUILT_IN; /* hyper does not support */
#endif
case CURLOPT_HTTP_CONTENT_DECODING:
@ -2710,7 +2710,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
}
case CURLOPT_DEFAULT_PROTOCOL:
/* Set the protocol to use when the URL doesn't include any protocol */
/* Set the protocol to use when the URL does not include any protocol */
result = Curl_setstropt(&data->set.str[STRING_DEFAULT_PROTOCOL],
va_arg(param, char *));
break;

View File

@ -78,7 +78,7 @@
# error "_UNICODE is defined but UNICODE is not defined"
# endif
/*
* Don't include unneeded stuff in Windows headers to avoid compiler
* Do not include unneeded stuff in Windows headers to avoid compiler
* warnings and macro clashes.
* Make sure to define this macro before including any Windows headers.
*/

View File

@ -65,7 +65,7 @@ curl_share_setopt(struct Curl_share *share, CURLSHoption option, ...)
return CURLSHE_INVALID;
if(share->dirty)
/* don't allow setting options while one or more handles are already
/* do not allow setting options while one or more handles are already
using this share */
return CURLSHE_IN_USE;
@ -269,7 +269,7 @@ Curl_share_lock(struct Curl_easy *data, curl_lock_data type,
if(share->lockfunc) /* only call this if set! */
share->lockfunc(data, type, accesstype, share->clientdata);
}
/* else if we don't share this, pretend successful lock */
/* else if we do not share this, pretend successful lock */
return CURLSHE_OK;
}

Some files were not shown because too many files have changed in this diff Show More