http2: support HTTP/2 to forward proxies, non-tunneling

- with `--proxy-http2` allow h2 ALPN negotiation to
  forward proxies
- applies to http: requests against a https: proxy only,
  as https: requests will auto-tunnel
- adding a HTTP/1 request parser in http1.c
- removed h2h3.c
- using new request parser in nghttp2 and all h3 backends
- adding test 2603 for request parser
- adding h2 proxy test cases to test_10_*

scorecard.py: request scoring accidentally always run curl
with '-v'. Removed that, expect double numbers.

labeller: added http1.* and h2-proxy sources to detection

Closes #10967
This commit is contained in:
Stefan Eissing 2023-04-14 11:38:14 +02:00 committed by Daniel Stenberg
parent fb1d62ff07
commit fc2f1e547a
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
28 changed files with 1522 additions and 824 deletions

3
.github/labeler.yml vendored
View File

@ -156,8 +156,9 @@ HTTP:
- all: ['docs/libcurl/opts/CURLOPT_TRAILER*'] - all: ['docs/libcurl/opts/CURLOPT_TRAILER*']
- all: ['docs/libcurl/opts/CURLOPT_TRANSFER_ENCODING*'] - all: ['docs/libcurl/opts/CURLOPT_TRANSFER_ENCODING*']
- all: ['lib/cf-https*'] - all: ['lib/cf-https*']
- all: ['lib/cf-h1*']
- all: ['lib/cf-h2*']
- all: ['lib/cookie.*'] - all: ['lib/cookie.*']
- all: ['lib/h2h3.*']
- all: ['lib/http*'] - all: ['lib/http*']
- all: ['tests/http*'] - all: ['tests/http*']
- all: ['tests/http-server.pl'] - all: ['tests/http-server.pl']

View File

@ -153,7 +153,6 @@ LIB_CFILES = \
getenv.c \ getenv.c \
getinfo.c \ getinfo.c \
gopher.c \ gopher.c \
h2h3.c \
hash.c \ hash.c \
headers.c \ headers.c \
hmac.c \ hmac.c \
@ -164,6 +163,7 @@ LIB_CFILES = \
hostsyn.c \ hostsyn.c \
hsts.c \ hsts.c \
http.c \ http.c \
http1.c \
http2.c \ http2.c \
http_chunks.c \ http_chunks.c \
http_digest.c \ http_digest.c \
@ -296,12 +296,12 @@ LIB_HFILES = \
ftplistparser.h \ ftplistparser.h \
getinfo.h \ getinfo.h \
gopher.h \ gopher.h \
h2h3.h \
hash.h \ hash.h \
headers.h \ headers.h \
hostip.h \ hostip.h \
hsts.h \ hsts.h \
http.h \ http.h \
http1.h \
http2.h \ http2.h \
http_chunks.h \ http_chunks.h \
http_digest.h \ http_digest.h \

View File

@ -142,6 +142,21 @@ static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
return n; return n;
} }
static void chunk_shift(struct buf_chunk *chunk)
{
if(chunk->r_offset) {
if(!chunk_is_empty(chunk)) {
size_t n = chunk->w_offset - chunk->r_offset;
memmove(chunk->x.data, chunk->x.data + chunk->r_offset, n);
chunk->w_offset -= chunk->r_offset;
chunk->r_offset = 0;
}
else {
chunk->r_offset = chunk->w_offset = 0;
}
}
}
static void chunk_list_free(struct buf_chunk **anchor) static void chunk_list_free(struct buf_chunk **anchor)
{ {
struct buf_chunk *chunk; struct buf_chunk *chunk;
@ -479,6 +494,13 @@ void Curl_bufq_skip(struct bufq *q, size_t amount)
} }
} }
void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount)
{
Curl_bufq_skip(q, amount);
if(q->tail)
chunk_shift(q->tail);
}
ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer, ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
void *writer_ctx, CURLcode *err) void *writer_ctx, CURLcode *err)
{ {

View File

@ -214,6 +214,12 @@ bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
*/ */
void Curl_bufq_skip(struct bufq *q, size_t amount); void Curl_bufq_skip(struct bufq *q, size_t amount);
/**
* Same as `skip` but shift tail data to the start afterwards,
* so that further writes will find room in tail.
*/
void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount);
typedef ssize_t Curl_bufq_writer(void *writer_ctx, typedef ssize_t Curl_bufq_writer(void *writer_ctx,
const unsigned char *buf, size_t len, const unsigned char *buf, size_t len,
CURLcode *err); CURLcode *err);

View File

@ -34,7 +34,7 @@
#include "bufq.h" #include "bufq.h"
#include "dynbuf.h" #include "dynbuf.h"
#include "dynhds.h" #include "dynhds.h"
#include "h2h3.h" #include "http1.h"
#include "http_proxy.h" #include "http_proxy.h"
#include "multiif.h" #include "multiif.h"
#include "cf-h2-proxy.h" #include "cf-h2-proxy.h"
@ -648,8 +648,8 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
return 0; return 0;
} }
if(namelen == sizeof(H2H3_PSEUDO_STATUS) - 1 && if(namelen == sizeof(HTTP_PSEUDO_STATUS) - 1 &&
memcmp(H2H3_PSEUDO_STATUS, name, namelen) == 0) { memcmp(HTTP_PSEUDO_STATUS, name, namelen) == 0) {
int http_status; int http_status;
struct http_resp *resp; struct http_resp *resp;
@ -783,60 +783,28 @@ static CURLcode h2_submit(int32_t *pstream_id,
nghttp2_data_source_read_callback read_callback, nghttp2_data_source_read_callback read_callback,
void *read_ctx) void *read_ctx)
{ {
struct dynhds h2_headers;
nghttp2_nv *nva = NULL; nghttp2_nv *nva = NULL;
unsigned int i; unsigned int i;
int32_t stream_id = -1; int32_t stream_id = -1;
size_t nheader, j; size_t nheader;
CURLcode result = CURLE_OUT_OF_MEMORY; CURLcode result;
(void)cf; (void)cf;
nheader = req->headers.hds_len + 1; /* ":method" is a MUST */ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
if(req->scheme) result = Curl_http_req_to_h2(&h2_headers, req, data);
++nheader; if(result)
if(req->authority)
++nheader;
if(req->path)
++nheader;
nva = malloc(sizeof(nghttp2_nv) * nheader);
if(!nva)
goto out; goto out;
nva[0].name = (unsigned char *)H2H3_PSEUDO_METHOD; nheader = Curl_dynhds_count(&h2_headers);
nva[0].namelen = sizeof(H2H3_PSEUDO_METHOD) - 1; nva = malloc(sizeof(nghttp2_nv) * nheader);
nva[0].value = (unsigned char *)req->method; if(!nva) {
nva[0].valuelen = strlen(req->method); result = CURLE_OUT_OF_MEMORY;
nva[0].flags = NGHTTP2_NV_FLAG_NONE; goto out;
i = 1;
if(req->scheme) {
nva[i].name = (unsigned char *)H2H3_PSEUDO_SCHEME;
nva[i].namelen = sizeof(H2H3_PSEUDO_SCHEME) - 1;
nva[i].value = (unsigned char *)req->scheme;
nva[i].valuelen = strlen(req->scheme);
nva[i].flags = NGHTTP2_NV_FLAG_NONE;
++i;
}
if(req->authority) {
nva[i].name = (unsigned char *)H2H3_PSEUDO_AUTHORITY;
nva[i].namelen = sizeof(H2H3_PSEUDO_AUTHORITY) - 1;
nva[i].value = (unsigned char *)req->authority;
nva[i].valuelen = strlen(req->authority);
nva[i].flags = NGHTTP2_NV_FLAG_NONE;
++i;
}
if(req->path) {
nva[i].name = (unsigned char *)H2H3_PSEUDO_PATH;
nva[i].namelen = sizeof(H2H3_PSEUDO_PATH) - 1;
nva[i].value = (unsigned char *)req->path;
nva[i].valuelen = strlen(req->path);
nva[i].flags = NGHTTP2_NV_FLAG_NONE;
++i;
} }
for(j = 0; i < nheader; i++, j++) { for(i = 0; i < nheader; ++i) {
struct dynhds_entry *e = Curl_dynhds_getn(&req->headers, j); struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
if(!e)
break;
nva[i].name = (unsigned char *)e->name; nva[i].name = (unsigned char *)e->name;
nva[i].namelen = e->namelen; nva[i].namelen = e->namelen;
nva[i].value = (unsigned char *)e->value; nva[i].value = (unsigned char *)e->value;
@ -866,7 +834,8 @@ static CURLcode h2_submit(int32_t *pstream_id,
result = CURLE_OK; result = CURLE_OK;
out: out:
Curl_safefree(nva); free(nva);
Curl_dynhds_free(&h2_headers);
*pstream_id = stream_id; *pstream_id = stream_id;
return result; return result;
} }
@ -881,7 +850,9 @@ static CURLcode submit_CONNECT(struct Curl_cfilter *cf,
infof(data, "Establish HTTP/2 proxy tunnel to %s", ts->authority); infof(data, "Establish HTTP/2 proxy tunnel to %s", ts->authority);
result = Curl_http_req_make(&req, "CONNECT", NULL, ts->authority, NULL); result = Curl_http_req_make(&req, "CONNECT", sizeof("CONNECT")-1,
NULL, 0, ts->authority, strlen(ts->authority),
NULL, 0);
if(result) if(result)
goto out; goto out;

View File

@ -34,7 +34,7 @@
static struct dynhds_entry * static struct dynhds_entry *
entry_new(const char *name, size_t namelen, entry_new(const char *name, size_t namelen,
const char *value, size_t valuelen) const char *value, size_t valuelen, int opts)
{ {
struct dynhds_entry *e; struct dynhds_entry *e;
char *p; char *p;
@ -50,9 +50,35 @@ entry_new(const char *name, size_t namelen,
e->value = p += namelen + 1; /* leave a \0 at the end of name */ e->value = p += namelen + 1; /* leave a \0 at the end of name */
memcpy(p, value, valuelen); memcpy(p, value, valuelen);
e->valuelen = valuelen; e->valuelen = valuelen;
if(opts & DYNHDS_OPT_LOWERCASE)
Curl_strntolower(e->name, e->name, e->namelen);
return e; return e;
} }
static struct dynhds_entry *
entry_append(struct dynhds_entry *e,
const char *value, size_t valuelen)
{
struct dynhds_entry *e2;
size_t valuelen2 = e->valuelen + 1 + valuelen;
char *p;
DEBUGASSERT(value);
e2 = calloc(1, sizeof(*e) + e->namelen + valuelen2 + 2);
if(!e2)
return NULL;
e2->name = p = ((char *)e2) + sizeof(*e2);
memcpy(p, e->name, e->namelen);
e2->namelen = e->namelen;
e2->value = p += e->namelen + 1; /* leave a \0 at the end of name */
memcpy(p, e->value, e->valuelen);
p += e->valuelen;
p[0] = ' ';
memcpy(p + 1, value, valuelen);
e2->valuelen = valuelen2;
return e2;
}
static void entry_free(struct dynhds_entry *e) static void entry_free(struct dynhds_entry *e)
{ {
free(e); free(e);
@ -67,6 +93,7 @@ void Curl_dynhds_init(struct dynhds *dynhds, size_t max_entries,
dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0; dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0;
dynhds->max_entries = max_entries; dynhds->max_entries = max_entries;
dynhds->max_strs_size = max_strs_size; dynhds->max_strs_size = max_strs_size;
dynhds->opts = 0;
} }
void Curl_dynhds_free(struct dynhds *dynhds) void Curl_dynhds_free(struct dynhds *dynhds)
@ -102,6 +129,11 @@ size_t Curl_dynhds_count(struct dynhds *dynhds)
return dynhds->hds_len; return dynhds->hds_len;
} }
void Curl_dynhds_set_opts(struct dynhds *dynhds, int opts)
{
dynhds->opts = opts;
}
struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n) struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n)
{ {
DEBUGASSERT(dynhds); DEBUGASSERT(dynhds);
@ -150,7 +182,7 @@ CURLcode Curl_dynhds_add(struct dynhds *dynhds,
if(dynhds->strs_len + namelen + valuelen > dynhds->max_strs_size) if(dynhds->strs_len + namelen + valuelen > dynhds->max_strs_size)
return CURLE_OUT_OF_MEMORY; return CURLE_OUT_OF_MEMORY;
entry = entry_new(name, namelen, value, valuelen); entry = entry_new(name, namelen, value, valuelen, dynhds->opts);
if(!entry) if(!entry)
goto out; goto out;
@ -203,33 +235,65 @@ CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
return Curl_dynhds_set(dynhds, name, strlen(name), value, strlen(value)); return Curl_dynhds_set(dynhds, name, strlen(name), value, strlen(value));
} }
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line) CURLcode Curl_dynhds_h1_add_line(struct dynhds *dynhds,
const char *line, size_t line_len)
{ {
const char *p; const char *p;
const char *name; const char *name;
size_t namelen; size_t namelen;
const char *value; const char *value;
size_t valuelen; size_t valuelen, i;
if(!line) if(!line || !line_len)
return CURLE_OK;
if((line[0] == ' ') || (line[0] == '\t')) {
struct dynhds_entry *e, *e2;
/* header continuation, yikes! */
if(!dynhds->hds_len)
return CURLE_BAD_FUNCTION_ARGUMENT;
while(line_len && ISBLANK(line[0])) {
++line;
--line_len;
}
if(!line_len)
return CURLE_BAD_FUNCTION_ARGUMENT;
e = dynhds->hds[dynhds->hds_len-1];
e2 = entry_append(e, line, line_len);
if(!e2)
return CURLE_OUT_OF_MEMORY;
dynhds->hds[dynhds->hds_len-1] = e2;
entry_free(e);
return CURLE_OK; return CURLE_OK;
p = strchr(line, ':');
if(!p) {
return CURLE_BAD_FUNCTION_ARGUMENT;
} }
else {
p = memchr(line, ':', line_len);
if(!p)
return CURLE_BAD_FUNCTION_ARGUMENT;
name = line;
namelen = p - line;
p++; /* move past the colon */
for(i = namelen + 1; i < line_len; ++i, ++p) {
if(!ISBLANK(*p))
break;
}
value = p;
valuelen = line_len - i;
name = line; p = memchr(value, '\r', valuelen);
namelen = p - line; if(!p)
p++; /* move past the colon */ p = memchr(value, '\n', valuelen);
while(ISBLANK(*p)) if(p)
p++; valuelen = (size_t)(p - value);
value = p;
p = strchr(value, '\r');
if(!p)
p = strchr(value, '\n');
valuelen = p? ((size_t)(p - value)) : strlen(value);
return Curl_dynhds_add(dynhds, name, namelen, value, valuelen); return Curl_dynhds_add(dynhds, name, namelen, value, valuelen);
}
}
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line)
{
return Curl_dynhds_h1_add_line(dynhds, line, line? strlen(line) : 0);
} }
size_t Curl_dynhds_count_name(struct dynhds *dynhds, size_t Curl_dynhds_count_name(struct dynhds *dynhds,

View File

@ -48,8 +48,12 @@ struct dynhds {
size_t max_entries; /* size limit number of entries */ size_t max_entries; /* size limit number of entries */
size_t strs_len; /* length of all strings */ size_t strs_len; /* length of all strings */
size_t max_strs_size; /* max length of all strings */ size_t max_strs_size; /* max length of all strings */
int opts;
}; };
#define DYNHDS_OPT_NONE (0)
#define DYNHDS_OPT_LOWERCASE (1 << 0)
/** /**
* Init for use on first time or after a reset. * Init for use on first time or after a reset.
* Allow `max_entries` headers to be added, 0 for unlimited. * Allow `max_entries` headers to be added, 0 for unlimited.
@ -73,6 +77,12 @@ void Curl_dynhds_reset(struct dynhds *dynhds);
*/ */
size_t Curl_dynhds_count(struct dynhds *dynhds); size_t Curl_dynhds_count(struct dynhds *dynhds);
/**
* Set the options to use, replacing any existing ones.
* This will not have an effect on already existing headers.
*/
void Curl_dynhds_set_opts(struct dynhds *dynhds, int opts);
/** /**
* Return the n-th header entry or NULL if it does not exist. * Return the n-th header entry or NULL if it does not exist.
*/ */
@ -140,11 +150,18 @@ CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
/** /**
* Add a single header from a HTTP/1.1 formatted line at the end. Line * Add a single header from a HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. And characters after * may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored. * that will be ignored.
*/ */
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line); CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line);
/**
* Add a single header from a HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored.
*/
CURLcode Curl_dynhds_h1_add_line(struct dynhds *dynhds,
const char *line, size_t line_len);
/** /**
* Add the headers to the given `dynbuf` in HTTP/1.1 format with * Add the headers to the given `dynbuf` in HTTP/1.1 format with

View File

@ -1,316 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#include "urldata.h"
#include "h2h3.h"
#include "transfer.h"
#include "sendf.h"
#include "strcase.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
/*
* Curl_pseudo_headers() creates the array with pseudo headers to be
* used in an HTTP/2 or HTTP/3 request.
*/
#if defined(USE_NGHTTP2) || defined(ENABLE_QUIC)
/* Index where :authority header field will appear in request header
field list. */
#define AUTHORITY_DST_IDX 3
/* USHRT_MAX is 65535 == 0xffff */
#define HEADER_OVERFLOW(x) \
(x.namelen > 0xffff || x.valuelen > 0xffff - x.namelen)
/*
* Check header memory for the token "trailers".
* Parse the tokens as separated by comma and surrounded by whitespace.
* Returns TRUE if found or FALSE if not.
*/
static bool contains_trailers(const char *p, size_t len)
{
const char *end = p + len;
for(;;) {
for(; p != end && (*p == ' ' || *p == '\t'); ++p)
;
if(p == end || (size_t)(end - p) < sizeof("trailers") - 1)
return FALSE;
if(strncasecompare("trailers", p, sizeof("trailers") - 1)) {
p += sizeof("trailers") - 1;
for(; p != end && (*p == ' ' || *p == '\t'); ++p)
;
if(p == end || *p == ',')
return TRUE;
}
/* skip to next token */
for(; p != end && *p != ','; ++p)
;
if(p == end)
return FALSE;
++p;
}
}
typedef enum {
/* Send header to server */
HEADERINST_FORWARD,
/* Don't send header to server */
HEADERINST_IGNORE,
/* Discard header, and replace it with "te: trailers" */
HEADERINST_TE_TRAILERS
} header_instruction;
/* Decides how to treat given header field. */
static header_instruction inspect_header(const char *name, size_t namelen,
const char *value, size_t valuelen) {
switch(namelen) {
case 2:
if(!strncasecompare("te", name, namelen))
return HEADERINST_FORWARD;
return contains_trailers(value, valuelen) ?
HEADERINST_TE_TRAILERS : HEADERINST_IGNORE;
case 7:
return strncasecompare("upgrade", name, namelen) ?
HEADERINST_IGNORE : HEADERINST_FORWARD;
case 10:
return (strncasecompare("connection", name, namelen) ||
strncasecompare("keep-alive", name, namelen)) ?
HEADERINST_IGNORE : HEADERINST_FORWARD;
case 16:
return strncasecompare("proxy-connection", name, namelen) ?
HEADERINST_IGNORE : HEADERINST_FORWARD;
case 17:
return strncasecompare("transfer-encoding", name, namelen) ?
HEADERINST_IGNORE : HEADERINST_FORWARD;
default:
return HEADERINST_FORWARD;
}
}
CURLcode Curl_pseudo_headers(struct Curl_easy *data,
const char *mem, /* the request */
const size_t len /* size of request */,
size_t* hdrlen /* opt size of headers read */,
struct h2h3req **hp)
{
struct connectdata *conn = data->conn;
size_t nheader = 0;
size_t i;
size_t authority_idx;
char *hdbuf = (char *)mem;
char *end, *line_end;
struct h2h3pseudo *nva = NULL;
struct h2h3req *hreq = NULL;
char *vptr;
/* Calculate number of headers contained in [mem, mem + len). Assumes a
correctly generated HTTP header field block. */
for(i = 1; i < len; ++i) {
if(hdbuf[i] == '\n' && hdbuf[i - 1] == '\r') {
++nheader;
++i;
}
}
if(nheader < 2) {
goto fail;
}
/* We counted additional 2 \r\n in the first and last line. We need 3
new headers: :method, :path and :scheme. Therefore we need one
more space. */
nheader += 1;
hreq = malloc(sizeof(struct h2h3req) +
sizeof(struct h2h3pseudo) * (nheader - 1));
if(!hreq) {
goto fail;
}
nva = &hreq->header[0];
/* Extract :method, :path from request line
We do line endings with CRLF so checking for CR is enough */
line_end = memchr(hdbuf, '\r', len);
if(!line_end) {
goto fail;
}
/* Method does not contain spaces */
end = memchr(hdbuf, ' ', line_end - hdbuf);
if(!end || end == hdbuf)
goto fail;
nva[0].name = H2H3_PSEUDO_METHOD;
nva[0].namelen = sizeof(H2H3_PSEUDO_METHOD) - 1;
nva[0].value = hdbuf;
nva[0].valuelen = (size_t)(end - hdbuf);
hdbuf = end + 1;
/* Path may contain spaces so scan backwards */
end = NULL;
for(i = (size_t)(line_end - hdbuf); i; --i) {
if(hdbuf[i - 1] == ' ') {
end = &hdbuf[i - 1];
break;
}
}
if(!end || end == hdbuf)
goto fail;
nva[1].name = H2H3_PSEUDO_PATH;
nva[1].namelen = sizeof(H2H3_PSEUDO_PATH) - 1;
nva[1].value = hdbuf;
nva[1].valuelen = (end - hdbuf);
nva[2].name = H2H3_PSEUDO_SCHEME;
nva[2].namelen = sizeof(H2H3_PSEUDO_SCHEME) - 1;
vptr = Curl_checkheaders(data, STRCONST(H2H3_PSEUDO_SCHEME));
if(vptr) {
vptr += sizeof(H2H3_PSEUDO_SCHEME);
while(*vptr && ISBLANK(*vptr))
vptr++;
nva[2].value = vptr;
infof(data, "set pseudo header %s to %s", H2H3_PSEUDO_SCHEME, vptr);
}
else {
if(conn->handler->flags & PROTOPT_SSL)
nva[2].value = "https";
else
nva[2].value = "http";
}
nva[2].valuelen = strlen((char *)nva[2].value);
authority_idx = 0;
i = 3;
while(i < nheader) {
size_t hlen;
hdbuf = line_end + 2;
/* check for next CR, but only within the piece of data left in the given
buffer */
line_end = memchr(hdbuf, '\r', len - (hdbuf - (char *)mem));
if(!line_end || (line_end == hdbuf))
goto fail;
/* header continuation lines are not supported */
if(*hdbuf == ' ' || *hdbuf == '\t')
goto fail;
for(end = hdbuf; end < line_end && *end != ':'; ++end)
;
if(end == hdbuf || end == line_end)
goto fail;
hlen = end - hdbuf;
if(hlen == 4 && strncasecompare("host", hdbuf, 4)) {
authority_idx = i;
nva[i].name = H2H3_PSEUDO_AUTHORITY;
nva[i].namelen = sizeof(H2H3_PSEUDO_AUTHORITY) - 1;
}
else {
nva[i].namelen = (size_t)(end - hdbuf);
/* Lower case the header name for HTTP/3 */
Curl_strntolower((char *)hdbuf, hdbuf, nva[i].namelen);
nva[i].name = hdbuf;
}
hdbuf = end + 1;
while(*hdbuf == ' ' || *hdbuf == '\t')
++hdbuf;
end = line_end;
switch(inspect_header((const char *)nva[i].name, nva[i].namelen, hdbuf,
end - hdbuf)) {
case HEADERINST_IGNORE:
/* skip header fields prohibited by HTTP/2 specification. */
--nheader;
continue;
case HEADERINST_TE_TRAILERS:
nva[i].value = "trailers";
nva[i].valuelen = sizeof("trailers") - 1;
break;
default:
nva[i].value = hdbuf;
nva[i].valuelen = (end - hdbuf);
}
++i;
}
/* :authority must come before non-pseudo header fields */
if(authority_idx && authority_idx != AUTHORITY_DST_IDX) {
struct h2h3pseudo authority = nva[authority_idx];
for(i = authority_idx; i > AUTHORITY_DST_IDX; --i) {
nva[i] = nva[i - 1];
}
nva[i] = authority;
}
/* Warn stream may be rejected if cumulative length of headers is too
large. */
#define MAX_ACC 60000 /* <64KB to account for some overhead */
{
size_t acc = 0;
for(i = 0; i < nheader; ++i) {
acc += nva[i].namelen + nva[i].valuelen;
infof(data, "h2h3 [%.*s: %.*s]",
(int)nva[i].namelen, nva[i].name,
(int)nva[i].valuelen, nva[i].value);
}
if(acc > MAX_ACC) {
infof(data, "http_request: Warning: The cumulative length of all "
"headers exceeds %d bytes and that could cause the "
"stream to be rejected.", MAX_ACC);
}
}
if(hdrlen) {
/* Skip trailing CRLF */
end += 4;
*hdrlen = end - mem;
}
hreq->entries = nheader;
*hp = hreq;
return CURLE_OK;
fail:
free(hreq);
return CURLE_OUT_OF_MEMORY;
}
void Curl_pseudo_free(struct h2h3req *hp)
{
free(hp);
}
#endif /* USE_NGHTTP2 or HTTP/3 enabled */

View File

@ -3136,7 +3136,17 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
DEBUGASSERT(Curl_conn_is_http3(data, conn, FIRSTSOCKET)); DEBUGASSERT(Curl_conn_is_http3(data, conn, FIRSTSOCKET));
break; break;
case CURL_HTTP_VERSION_2: case CURL_HTTP_VERSION_2:
DEBUGASSERT(Curl_conn_is_http2(data, conn, FIRSTSOCKET)); #ifndef CURL_DISABLE_PROXY
if(!Curl_conn_is_http2(data, conn, FIRSTSOCKET) &&
conn->bits.proxy && !conn->bits.tunnel_proxy
) {
result = Curl_http2_switch(data, conn, FIRSTSOCKET);
if(result)
return result;
}
else
#endif
DEBUGASSERT(Curl_conn_is_http2(data, conn, FIRSTSOCKET));
break; break;
case CURL_HTTP_VERSION_1_1: case CURL_HTTP_VERSION_1_1:
/* continue with HTTP/1.1 when explicitly requested */ /* continue with HTTP/1.1 when explicitly requested */
@ -4516,37 +4526,46 @@ out:
return result; return result;
} }
/* simple implementation of strndup(), which isn't portable */
static char *my_strndup(const char *ptr, size_t len)
{
char *copy = malloc(len + 1);
if(!copy)
return NULL;
memcpy(copy, ptr, len);
copy[len] = '\0';
return copy;
}
CURLcode Curl_http_req_make(struct http_req **preq, CURLcode Curl_http_req_make(struct http_req **preq,
const char *method, const char *method, size_t m_len,
const char *scheme, const char *scheme, size_t s_len,
const char *authority, const char *authority, size_t a_len,
const char *path) const char *path, size_t p_len)
{ {
struct http_req *req; struct http_req *req;
CURLcode result = CURLE_OUT_OF_MEMORY; CURLcode result = CURLE_OUT_OF_MEMORY;
size_t mlen;
DEBUGASSERT(method); DEBUGASSERT(method);
mlen = strlen(method); if(m_len + 1 >= sizeof(req->method))
if(mlen + 1 >= sizeof(req->method))
return CURLE_BAD_FUNCTION_ARGUMENT; return CURLE_BAD_FUNCTION_ARGUMENT;
req = calloc(1, sizeof(*req)); req = calloc(1, sizeof(*req));
if(!req) if(!req)
goto out; goto out;
memcpy(req->method, method, mlen); memcpy(req->method, method, m_len);
if(scheme) { if(scheme) {
req->scheme = strdup(scheme); req->scheme = my_strndup(scheme, s_len);
if(!req->scheme) if(!req->scheme)
goto out; goto out;
} }
if(authority) { if(authority) {
req->authority = strdup(authority); req->authority = my_strndup(authority, a_len);
if(!req->authority) if(!req->authority)
goto out; goto out;
} }
if(path) { if(path) {
req->path = strdup(path); req->path = my_strndup(path, p_len);
if(!req->path) if(!req->path)
goto out; goto out;
} }
@ -4561,6 +4580,164 @@ out:
return result; return result;
} }
static CURLcode req_assign_url_authority(struct http_req *req, CURLU *url)
{
char *user, *pass, *host, *port;
struct dynbuf buf;
CURLUcode uc;
CURLcode result = CURLE_URL_MALFORMAT;
user = pass = host = port = NULL;
Curl_dyn_init(&buf, DYN_HTTP_REQUEST);
uc = curl_url_get(url, CURLUPART_HOST, &host, 0);
if(uc && uc != CURLUE_NO_HOST)
goto out;
if(!host) {
req->authority = NULL;
result = CURLE_OK;
goto out;
}
uc = curl_url_get(url, CURLUPART_PORT, &port, CURLU_NO_DEFAULT_PORT);
if(uc && uc != CURLUE_NO_PORT)
goto out;
uc = curl_url_get(url, CURLUPART_USER, &user, 0);
if(uc && uc != CURLUE_NO_USER)
goto out;
if(user) {
uc = curl_url_get(url, CURLUPART_PASSWORD, &user, 0);
if(uc && uc != CURLUE_NO_PASSWORD)
goto out;
}
if(user) {
result = Curl_dyn_add(&buf, user);
if(result)
goto out;
if(pass) {
result = Curl_dyn_addf(&buf, ":%s", pass);
if(result)
goto out;
}
result = Curl_dyn_add(&buf, "@");
if(result)
goto out;
}
result = Curl_dyn_add(&buf, host);
if(result)
goto out;
if(port) {
result = Curl_dyn_addf(&buf, ":%s", port);
if(result)
goto out;
}
req->authority = strdup(Curl_dyn_ptr(&buf));
if(!req->authority)
goto out;
result = CURLE_OK;
out:
free(user);
free(pass);
free(host);
free(port);
Curl_dyn_free(&buf);
return result;
}
static CURLcode req_assign_url_path(struct http_req *req, CURLU *url)
{
char *path, *query;
struct dynbuf buf;
CURLUcode uc;
CURLcode result = CURLE_URL_MALFORMAT;
path = query = NULL;
Curl_dyn_init(&buf, DYN_HTTP_REQUEST);
uc = curl_url_get(url, CURLUPART_PATH, &path, CURLU_PATH_AS_IS);
if(uc)
goto out;
uc = curl_url_get(url, CURLUPART_QUERY, &query, 0);
if(uc && uc != CURLUE_NO_QUERY)
goto out;
if(!path && !query) {
req->path = NULL;
}
else if(path && !query) {
req->path = path;
path = NULL;
}
else {
if(path) {
result = Curl_dyn_add(&buf, path);
if(result)
goto out;
}
if(query) {
result = Curl_dyn_addf(&buf, "?%s", query);
if(result)
goto out;
}
req->path = strdup(Curl_dyn_ptr(&buf));
if(!req->path)
goto out;
}
result = CURLE_OK;
out:
free(path);
free(query);
Curl_dyn_free(&buf);
return result;
}
CURLcode Curl_http_req_make2(struct http_req **preq,
const char *method, size_t m_len,
CURLU *url, const char *scheme_default)
{
struct http_req *req;
CURLcode result = CURLE_OUT_OF_MEMORY;
CURLUcode uc;
DEBUGASSERT(method);
if(m_len + 1 >= sizeof(req->method))
return CURLE_BAD_FUNCTION_ARGUMENT;
req = calloc(1, sizeof(*req));
if(!req)
goto out;
memcpy(req->method, method, m_len);
uc = curl_url_get(url, CURLUPART_SCHEME, &req->scheme, 0);
if(uc && uc != CURLUE_NO_SCHEME)
goto out;
if(!req->scheme && scheme_default) {
req->scheme = strdup(scheme_default);
if(!req->scheme)
goto out;
}
result = req_assign_url_authority(req, url);
if(result)
goto out;
result = req_assign_url_path(req, url);
if(result)
goto out;
Curl_dynhds_init(&req->headers, 0, DYN_H2_HEADERS);
Curl_dynhds_init(&req->trailers, 0, DYN_H2_TRAILERS);
result = CURLE_OK;
out:
if(result && req)
Curl_http_req_free(req);
*preq = result? NULL : req;
return result;
}
void Curl_http_req_free(struct http_req *req) void Curl_http_req_free(struct http_req *req)
{ {
if(req) { if(req) {
@ -4573,6 +4750,97 @@ void Curl_http_req_free(struct http_req *req)
} }
} }
struct name_const {
const char *name;
size_t namelen;
};
static struct name_const H2_NON_FIELD[] = {
{ STRCONST("Host") },
{ STRCONST("Upgrade") },
{ STRCONST("Connection") },
{ STRCONST("Keep-Alive") },
{ STRCONST("Proxy-Connection") },
{ STRCONST("Transfer-Encoding") },
};
static bool h2_non_field(const char *name, size_t namelen)
{
size_t i;
for(i = 0; i < sizeof(H2_NON_FIELD)/sizeof(H2_NON_FIELD[0]); ++i) {
if(namelen < H2_NON_FIELD[i].namelen)
return FALSE;
if(namelen == H2_NON_FIELD[i].namelen &&
strcasecompare(H2_NON_FIELD[i].name, name))
return TRUE;
}
return FALSE;
}
CURLcode Curl_http_req_to_h2(struct dynhds *h2_headers,
struct http_req *req, struct Curl_easy *data)
{
const char *scheme = NULL, *authority = NULL;
struct dynhds_entry *e;
size_t i;
CURLcode result;
DEBUGASSERT(req);
DEBUGASSERT(h2_headers);
if(req->scheme) {
scheme = req->scheme;
}
else if(strcmp("CONNECT", req->method)) {
scheme = Curl_checkheaders(data, STRCONST(HTTP_PSEUDO_SCHEME));
if(scheme) {
scheme += sizeof(HTTP_PSEUDO_SCHEME);
while(*scheme && ISBLANK(*scheme))
scheme++;
infof(data, "set pseudo header %s to %s", HTTP_PSEUDO_SCHEME, scheme);
}
else {
scheme = (data->conn && data->conn->handler->flags & PROTOPT_SSL)?
"https" : "http";
}
}
if(req->authority) {
authority = req->authority;
}
else {
e = Curl_dynhds_get(&req->headers, STRCONST("Host"));
if(e)
authority = e->value;
}
Curl_dynhds_reset(h2_headers);
Curl_dynhds_set_opts(h2_headers, DYNHDS_OPT_LOWERCASE);
result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_METHOD),
req->method, strlen(req->method));
if(!result && scheme) {
result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_SCHEME),
scheme, strlen(scheme));
}
if(!result && authority) {
result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_AUTHORITY),
authority, strlen(authority));
}
if(!result && req->path) {
result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_PATH),
req->path, strlen(req->path));
}
for(i = 0; !result && i < Curl_dynhds_count(&req->headers); ++i) {
e = Curl_dynhds_getn(&req->headers, i);
if(!h2_non_field(e->name, e->namelen)) {
result = Curl_dynhds_add(h2_headers, e->name, e->namelen,
e->value, e->valuelen);
}
}
return result;
}
CURLcode Curl_http_resp_make(struct http_resp **presp, CURLcode Curl_http_resp_make(struct http_resp **presp,
int status, int status,
const char *description) const char *description)

View File

@ -260,6 +260,7 @@ Curl_http_output_auth(struct Curl_easy *data,
/* Decode HTTP status code string. */ /* Decode HTTP status code string. */
CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len); CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len);
/** /**
* All about a core HTTP request, excluding body and trailers * All about a core HTTP request, excluding body and trailers
*/ */
@ -276,13 +277,41 @@ struct http_req {
* Create a HTTP request struct. * Create a HTTP request struct.
*/ */
CURLcode Curl_http_req_make(struct http_req **preq, CURLcode Curl_http_req_make(struct http_req **preq,
const char *method, const char *method, size_t m_len,
const char *scheme, const char *scheme, size_t s_len,
const char *authority, const char *authority, size_t a_len,
const char *path); const char *path, size_t p_len);
CURLcode Curl_http_req_make2(struct http_req **preq,
const char *method, size_t m_len,
CURLU *url, const char *scheme_default);
void Curl_http_req_free(struct http_req *req); void Curl_http_req_free(struct http_req *req);
#define HTTP_PSEUDO_METHOD ":method"
#define HTTP_PSEUDO_SCHEME ":scheme"
#define HTTP_PSEUDO_AUTHORITY ":authority"
#define HTTP_PSEUDO_PATH ":path"
#define HTTP_PSEUDO_STATUS ":status"
/**
* Create the list of HTTP/2 headers which represent the request,
* using HTTP/2 pseudo headers preceeding the `req->headers`.
*
* Applies the following transformations:
* - if `authority` is set, any "Host" header is removed.
* - if `authority` is unset and a "Host" header is present, use
* that as `authority` and remove "Host"
* - removes and Connection header fields as defined in rfc9113 ch. 8.2.2
* - lower-cases the header field names
*
* @param h2_headers will contain the HTTP/2 headers on success
* @param req the request to transform
* @param data the handle to lookup defaults like ' :scheme' from
*/
CURLcode Curl_http_req_to_h2(struct dynhds *h2_headers,
struct http_req *req, struct Curl_easy *data);
/** /**
* All about a core HTTP response, excluding body and trailers * All about a core HTTP response, excluding body and trailers
*/ */

349
lib/http1.c Normal file
View File

@ -0,0 +1,349 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#ifndef CURL_DISABLE_HTTP
#include "urldata.h"
#include <curl/curl.h>
#include "http.h"
#include "http1.h"
#include "urlapi-int.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
#define MAX_URL_LEN (4*1024)
void Curl_h1_req_parse_init(struct h1_req_parser *parser, size_t max_line_len)
{
memset(parser, 0, sizeof(*parser));
parser->max_line_len = max_line_len;
Curl_bufq_init(&parser->scratch, max_line_len, 1);
}
void Curl_h1_req_parse_free(struct h1_req_parser *parser)
{
if(parser) {
Curl_http_req_free(parser->req);
Curl_bufq_free(&parser->scratch);
parser->req = NULL;
parser->done = FALSE;
}
}
static ssize_t detect_line(struct h1_req_parser *parser,
const char *buf, const size_t buflen, int options,
CURLcode *err)
{
const char *line_end;
size_t len;
DEBUGASSERT(!parser->line);
line_end = memchr(buf, '\n', buflen);
if(!line_end) {
*err = (buflen > parser->max_line_len)? CURLE_URL_MALFORMAT : CURLE_AGAIN;
return -1;
}
len = line_end - buf + 1;
if(len > parser->max_line_len) {
*err = CURLE_URL_MALFORMAT;
return -1;
}
if(options & H1_PARSE_OPT_STRICT) {
if((len == 1) || (buf[len - 2] != '\r')) {
*err = CURLE_URL_MALFORMAT;
return -1;
}
parser->line = buf;
parser->line_len = len - 2;
}
else {
parser->line = buf;
parser->line_len = len - (((len == 1) || (buf[len - 2] != '\r'))? 1 : 2);
}
*err = CURLE_OK;
return (ssize_t)len;
}
static ssize_t next_line(struct h1_req_parser *parser,
const char *buf, const size_t buflen, int options,
CURLcode *err)
{
ssize_t nread = 0, n;
if(parser->line) {
if(parser->scratch_skip) {
/* last line was from scratch. Remove it now, since we are done
* with it and look for the next one. */
Curl_bufq_skip_and_shift(&parser->scratch, parser->scratch_skip);
parser->scratch_skip = 0;
}
parser->line = NULL;
parser->line_len = 0;
}
if(Curl_bufq_is_empty(&parser->scratch)) {
nread = detect_line(parser, buf, buflen, options, err);
if(nread < 0) {
if(*err != CURLE_AGAIN)
return -1;
/* not a complete line, add to scratch for later revisit */
nread = Curl_bufq_write(&parser->scratch,
(const unsigned char *)buf, buflen, err);
return nread;
}
/* found one */
}
else {
const char *sbuf;
size_t sbuflen;
/* scratch contains bytes from last attempt, add more to it */
if(buflen) {
const char *line_end;
size_t add_len;
ssize_t pos;
line_end = memchr(buf, '\n', buflen);
pos = line_end? (line_end - buf + 1) : -1;
add_len = (pos >= 0)? (size_t)pos : buflen;
nread = Curl_bufq_write(&parser->scratch,
(const unsigned char *)buf, add_len, err);
if(nread < 0) {
/* Unable to add anything to scratch is an error, since we should
* have seen a line there then before. */
if(*err == CURLE_AGAIN)
*err = CURLE_URL_MALFORMAT;
return -1;
}
}
if(Curl_bufq_peek(&parser->scratch,
(const unsigned char **)&sbuf, &sbuflen)) {
n = detect_line(parser, sbuf, sbuflen, options, err);
if(n < 0 && *err != CURLE_AGAIN)
return -1; /* real error */
parser->scratch_skip = (size_t)n;
}
else {
/* we SHOULD be able to peek at scratch data */
DEBUGASSERT(0);
}
}
return nread;
}
static CURLcode start_req(struct h1_req_parser *parser,
const char *scheme_default, int options)
{
const char *p, *m, *target, *hv, *scheme, *authority, *path;
size_t m_len, target_len, hv_len, scheme_len, authority_len, path_len;
size_t i;
CURLU *url = NULL;
CURLcode result = CURLE_URL_MALFORMAT; /* Use this as default fail */
DEBUGASSERT(!parser->req);
/* line must match: "METHOD TARGET HTTP_VERSION" */
p = memchr(parser->line, ' ', parser->line_len);
if(!p || p == parser->line)
goto out;
m = parser->line;
m_len = p - parser->line;
target = p + 1;
target_len = hv_len = 0;
hv = NULL;
/* URL may contain spaces so scan backwards */
for(i = parser->line_len; i > m_len; --i) {
if(parser->line[i] == ' ') {
hv = &parser->line[i + 1];
hv_len = parser->line_len - i;
target_len = (hv - target) - 1;
break;
}
}
/* no SPACE found or empty TARGET or empy HTTP_VERSION */
if(!target_len || !hv_len)
goto out;
/* TODO: we do not check HTTP_VERSION for conformity, should
+ do that when STRICT option is supplied. */
(void)hv;
/* The TARGET can be (rfc 9112, ch. 3.2):
* origin-form: path + optional query
* absolute-form: absolute URI
* authority-form: host+port for CONNECT
* asterisk-form: '*' for OPTIONS
*
* from TARGET, we derive `scheme` `authority` `path`
* origin-form -- -- TARGET
* absolute-form URL* URL* URL*
* authority-form -- TARGET --
* asterisk-form -- -- TARGET
*/
scheme = authority = path = NULL;
scheme_len = authority_len = path_len = 0;
if(target_len == 1 && target[0] == '*') {
/* asterisk-form */
path = target;
path_len = target_len;
}
else if(!strncmp("CONNECT", m, m_len)) {
/* authority-form */
authority = target;
authority_len = target_len;
}
else if(target[0] == '/') {
/* origin-form */
path = target;
path_len = target_len;
}
else {
/* origin-form OR absolute-form */
CURLUcode uc;
char tmp[MAX_URL_LEN];
/* default, unless we see an absolute URL */
path = target;
path_len = target_len;
/* URL parser wants 0-termination */
if(target_len >= sizeof(tmp))
goto out;
memcpy(tmp, target, target_len);
tmp[target_len] = '\0';
/* See if treating TARGET as an absolute URL makes sense */
if(Curl_is_absolute_url(tmp, NULL, 0, FALSE)) {
int url_options;
url = curl_url();
if(!url) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
url_options = (CURLU_NON_SUPPORT_SCHEME|
CURLU_PATH_AS_IS|
CURLU_NO_DEFAULT_PORT);
if(!(options & H1_PARSE_OPT_STRICT))
url_options |= CURLU_ALLOW_SPACE;
uc = curl_url_set(url, CURLUPART_URL, tmp, url_options);
if(uc) {
goto out;
}
}
if(!url && (options & H1_PARSE_OPT_STRICT)) {
/* we should have an absolute URL or have seen `/` earlier */
goto out;
}
}
if(url) {
result = Curl_http_req_make2(&parser->req, m, m_len, url, scheme_default);
}
else {
if(!scheme && scheme_default) {
scheme = scheme_default;
scheme_len = strlen(scheme_default);
}
result = Curl_http_req_make(&parser->req, m, m_len, scheme, scheme_len,
authority, authority_len, path, path_len);
}
out:
curl_url_cleanup(url);
return result;
}
ssize_t Curl_h1_req_parse_read(struct h1_req_parser *parser,
const char *buf, size_t buflen,
const char *scheme_default, int options,
CURLcode *err)
{
ssize_t nread = 0, n;
*err = CURLE_OK;
while(!parser->done) {
n = next_line(parser, buf, buflen, options, err);
if(n < 0) {
if(*err != CURLE_AGAIN) {
nread = -1;
}
*err = CURLE_OK;
goto out;
}
/* Consume this line */
nread += (size_t)n;
buf += (size_t)n;
buflen -= (size_t)n;
if(!parser->line) {
/* consumed bytes, but line not complete */
if(!buflen)
goto out;
}
else if(!parser->req) {
*err = start_req(parser, scheme_default, options);
if(*err) {
nread = -1;
goto out;
}
}
else if(parser->line_len == 0) {
/* last, empty line, we are finished */
if(!parser->req) {
*err = CURLE_URL_MALFORMAT;
nread = -1;
goto out;
}
parser->done = TRUE;
Curl_bufq_free(&parser->scratch);
/* last chance adjustments */
}
else {
*err = Curl_dynhds_h1_add_line(&parser->req->headers,
parser->line, parser->line_len);
if(*err) {
nread = -1;
goto out;
}
}
}
out:
return nread;
}
#endif /* !CURL_DISABLE_HTTP */

View File

@ -1,5 +1,5 @@
#ifndef HEADER_CURL_H2H3_H #ifndef HEADER_CURL_HTTP1_H
#define HEADER_CURL_H2H3_H #define HEADER_CURL_HTTP1_H
/*************************************************************************** /***************************************************************************
* _ _ ____ _ * _ _ ____ _
* Project ___| | | | _ \| | * Project ___| | | | _ \| |
@ -23,40 +23,37 @@
* SPDX-License-Identifier: curl * SPDX-License-Identifier: curl
* *
***************************************************************************/ ***************************************************************************/
#include "curl_setup.h" #include "curl_setup.h"
#define H2H3_PSEUDO_METHOD ":method" #ifndef CURL_DISABLE_HTTP
#define H2H3_PSEUDO_SCHEME ":scheme" #include "bufq.h"
#define H2H3_PSEUDO_AUTHORITY ":authority" #include "http.h"
#define H2H3_PSEUDO_PATH ":path"
#define H2H3_PSEUDO_STATUS ":status"
struct h2h3pseudo { #define H1_PARSE_OPT_NONE (0)
const char *name; #define H1_PARSE_OPT_STRICT (1 << 0)
size_t namelen;
const char *value; struct h1_req_parser {
size_t valuelen; struct http_req *req;
struct bufq scratch;
size_t scratch_skip;
const char *line;
size_t max_line_len;
size_t line_len;
bool done;
}; };
struct h2h3req { void Curl_h1_req_parse_init(struct h1_req_parser *parser, size_t max_line_len);
size_t entries; void Curl_h1_req_parse_free(struct h1_req_parser *parser);
struct h2h3pseudo header[1]; /* the array is allocated to contain entries */
};
/* ssize_t Curl_h1_req_parse_read(struct h1_req_parser *parser,
* Curl_pseudo_headers() creates the array with pseudo headers to be const char *buf, size_t buflen,
* used in an HTTP/2 or HTTP/3 request. Returns an allocated struct. const char *scheme_default, int options,
* Free it with Curl_pseudo_free(). CURLcode *err);
*/
CURLcode Curl_pseudo_headers(struct Curl_easy *data,
const char *request,
const size_t len,
size_t* hdrlen /* optional */,
struct h2h3req **hp);
/* CURLcode Curl_h1_req_dprint(const struct http_req *req,
* Curl_pseudo_free() frees a h2h3req struct. struct dynbuf *dbuf);
*/
void Curl_pseudo_free(struct h2h3req *hp);
#endif /* HEADER_CURL_H2H3_H */
#endif /* !CURL_DISABLE_HTTP */
#endif /* HEADER_CURL_HTTP1_H */

View File

@ -29,6 +29,7 @@
#include <nghttp2/nghttp2.h> #include <nghttp2/nghttp2.h>
#include "urldata.h" #include "urldata.h"
#include "bufq.h" #include "bufq.h"
#include "http1.h"
#include "http2.h" #include "http2.h"
#include "http.h" #include "http.h"
#include "sendf.h" #include "sendf.h"
@ -43,7 +44,6 @@
#include "strdup.h" #include "strdup.h"
#include "transfer.h" #include "transfer.h"
#include "dynbuf.h" #include "dynbuf.h"
#include "h2h3.h"
#include "headers.h" #include "headers.h"
/* The last 3 #include files should be in this order */ /* The last 3 #include files should be in this order */
#include "curl_printf.h" #include "curl_printf.h"
@ -120,7 +120,7 @@ struct cf_h2_ctx {
struct bufq outbufq; /* network output */ struct bufq outbufq; /* network output */
struct bufc_pool stream_bufcp; /* spares for stream buffers */ struct bufc_pool stream_bufcp; /* spares for stream buffers */
size_t drain_total; /* sum of all stream's UrlState.drain */ size_t drain_total; /* sum of all stream's UrlState drain */
int32_t goaway_error; int32_t goaway_error;
int32_t last_stream_id; int32_t last_stream_id;
BIT(conn_closed); BIT(conn_closed);
@ -191,7 +191,6 @@ struct stream_ctx {
struct bufq recvbuf; /* response buffer */ struct bufq recvbuf; /* response buffer */
struct bufq sendbuf; /* request buffer */ struct bufq sendbuf; /* request buffer */
struct dynhds resp_trailers; /* response trailer fields */ struct dynhds resp_trailers; /* response trailer fields */
size_t req_hds_len; /* amount of request header bytes in sendbuf. */
size_t resp_hds_len; /* amount of response header bytes in recvbuf */ size_t resp_hds_len; /* amount of response header bytes in recvbuf */
curl_off_t upload_left; /* number of request bytes left to upload */ curl_off_t upload_left; /* number of request bytes left to upload */
@ -243,7 +242,6 @@ static CURLcode http2_data_setup(struct Curl_cfilter *cf,
Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp, Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
H2_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT); H2_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
Curl_dynhds_init(&stream->resp_trailers, 0, DYN_H2_TRAILERS); Curl_dynhds_init(&stream->resp_trailers, 0, DYN_H2_TRAILERS);
stream->req_hds_len = 0;
stream->resp_hds_len = 0; stream->resp_hds_len = 0;
stream->bodystarted = FALSE; stream->bodystarted = FALSE;
stream->status_code = -1; stream->status_code = -1;
@ -773,7 +771,7 @@ static int set_transfer_url(struct Curl_easy *data,
if(!u) if(!u)
return 5; return 5;
v = curl_pushheader_byname(hp, H2H3_PSEUDO_SCHEME); v = curl_pushheader_byname(hp, HTTP_PSEUDO_SCHEME);
if(v) { if(v) {
uc = curl_url_set(u, CURLUPART_SCHEME, v, 0); uc = curl_url_set(u, CURLUPART_SCHEME, v, 0);
if(uc) { if(uc) {
@ -782,7 +780,7 @@ static int set_transfer_url(struct Curl_easy *data,
} }
} }
v = curl_pushheader_byname(hp, H2H3_PSEUDO_AUTHORITY); v = curl_pushheader_byname(hp, HTTP_PSEUDO_AUTHORITY);
if(v) { if(v) {
uc = curl_url_set(u, CURLUPART_HOST, v, 0); uc = curl_url_set(u, CURLUPART_HOST, v, 0);
if(uc) { if(uc) {
@ -791,7 +789,7 @@ static int set_transfer_url(struct Curl_easy *data,
} }
} }
v = curl_pushheader_byname(hp, H2H3_PSEUDO_PATH); v = curl_pushheader_byname(hp, HTTP_PSEUDO_PATH);
if(v) { if(v) {
uc = curl_url_set(u, CURLUPART_PATH, v, 0); uc = curl_url_set(u, CURLUPART_PATH, v, 0);
if(uc) { if(uc) {
@ -945,7 +943,6 @@ static CURLcode recvbuf_write_hds(struct Curl_cfilter *cf,
if(nwritten < 0) if(nwritten < 0)
return result; return result;
stream->resp_hds_len += (size_t)nwritten; stream->resp_hds_len += (size_t)nwritten;
/* TODO: make sure recvbuf is more flexible with overflow */
DEBUGASSERT((size_t)nwritten == blen); DEBUGASSERT((size_t)nwritten == blen);
return CURLE_OK; return CURLE_OK;
} }
@ -978,12 +975,8 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
} }
} }
if(frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { if(frame->hd.flags & NGHTTP2_FLAG_END_STREAM) {
/* Stream has ended. If there is pending data, ensure that read drain_this(cf, data);
will occur to consume it. */ Curl_expire(data, 0, EXPIRE_RUN_NOW);
if(!data->state.drain && !Curl_bufq_is_empty(&stream->recvbuf)) {
drain_this(cf, data);
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
} }
break; break;
case NGHTTP2_HEADERS: case NGHTTP2_HEADERS:
@ -1280,7 +1273,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
if(frame->hd.type == NGHTTP2_PUSH_PROMISE) { if(frame->hd.type == NGHTTP2_PUSH_PROMISE) {
char *h; char *h;
if(!strcmp(H2H3_PSEUDO_AUTHORITY, (const char *)name)) { if(!strcmp(HTTP_PSEUDO_AUTHORITY, (const char *)name)) {
/* pseudo headers are lower case */ /* pseudo headers are lower case */
int rc = 0; int rc = 0;
char *check = aprintf("%s:%d", cf->conn->host.name, char *check = aprintf("%s:%d", cf->conn->host.name,
@ -1352,15 +1345,15 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
return 0; return 0;
} }
if(namelen == sizeof(H2H3_PSEUDO_STATUS) - 1 && if(namelen == sizeof(HTTP_PSEUDO_STATUS) - 1 &&
memcmp(H2H3_PSEUDO_STATUS, name, namelen) == 0) { memcmp(HTTP_PSEUDO_STATUS, name, namelen) == 0) {
/* nghttp2 guarantees :status is received first and only once. */ /* nghttp2 guarantees :status is received first and only once. */
char buffer[32]; char buffer[32];
result = Curl_http_decode_status(&stream->status_code, result = Curl_http_decode_status(&stream->status_code,
(const char *)value, valuelen); (const char *)value, valuelen);
if(result) if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE; return NGHTTP2_ERR_CALLBACK_FAILURE;
msnprintf(buffer, sizeof(buffer), H2H3_PSEUDO_STATUS ":%u\r", msnprintf(buffer, sizeof(buffer), HTTP_PSEUDO_STATUS ":%u\r",
stream->status_code); stream->status_code);
result = Curl_headers_push(data_s, buffer, CURLH_PSEUDO); result = Curl_headers_push(data_s, buffer, CURLH_PSEUDO);
if(result) if(result)
@ -1527,7 +1520,7 @@ static CURLcode http2_data_done_send(struct Curl_cfilter *cf,
if(!ctx || !ctx->h2 || !stream) if(!ctx || !ctx->h2 || !stream)
goto out; goto out;
DEBUGF(LOG_CF(data, cf, "[h2sid=%d] data done", stream->id)); DEBUGF(LOG_CF(data, cf, "[h2sid=%d] data done send", stream->id));
if(stream->upload_left) { if(stream->upload_left) {
/* If the stream still thinks there's data left to upload. */ /* If the stream still thinks there's data left to upload. */
if(stream->upload_left == -1) if(stream->upload_left == -1)
@ -1751,11 +1744,15 @@ static CURLcode h2_progress_ingress(struct Curl_cfilter *cf,
* it is time to stop due to connection close or us not processing * it is time to stop due to connection close or us not processing
* all network input */ * all network input */
while(!ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) { while(!ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) {
/* Also, when the stream exists, break the loop when it has become
* closed or its receive buffer is full */
stream = H2_STREAM_CTX(data); stream = H2_STREAM_CTX(data);
if(stream && (stream->closed || Curl_bufq_is_full(&stream->recvbuf))) if(stream && (stream->closed || Curl_bufq_is_full(&stream->recvbuf))) {
break; /* We would like to abort here and stop processing, so that
* the transfer loop can handle the data/close here. However,
* this may leave data in underlying buffers that will not
* be consumed. */
if(!cf->next || !cf->next->cft->has_data_pending(cf->next, data))
break;
}
nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result); nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result);
DEBUGF(LOG_CF(data, cf, "read %zd bytes nw data -> %zd, %d", DEBUGF(LOG_CF(data, cf, "read %zd bytes nw data -> %zd, %d",
@ -1851,6 +1848,130 @@ out:
return nread; return nread;
} }
static ssize_t h2_submit(struct stream_ctx **pstream,
struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
struct stream_ctx *stream = NULL;
struct h1_req_parser h1;
struct dynhds h2_headers;
nghttp2_nv *nva = NULL;
size_t nheader, i;
nghttp2_data_provider data_prd;
int32_t stream_id;
nghttp2_priority_spec pri_spec;
ssize_t nwritten;
Curl_h1_req_parse_init(&h1, (4*1024));
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
*err = http2_data_setup(cf, data, &stream);
if(*err) {
nwritten = -1;
goto out;
}
nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
if(*err) {
nwritten = -1;
goto out;
}
nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp2_nv) * nheader);
if(!nva) {
*err = CURLE_OUT_OF_MEMORY;
nwritten = -1;
goto out;
}
for(i = 0; i < nheader; ++i) {
struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
nva[i].name = (unsigned char *)e->name;
nva[i].namelen = e->namelen;
nva[i].value = (unsigned char *)e->value;
nva[i].valuelen = e->valuelen;
nva[i].flags = NGHTTP2_NV_FLAG_NONE;
}
#define MAX_ACC 60000 /* <64KB to account for some overhead */
{
size_t acc = 0;
for(i = 0; i < nheader; ++i) {
acc += nva[i].namelen + nva[i].valuelen;
infof(data, "h2 [%.*s: %.*s]",
(int)nva[i].namelen, nva[i].name,
(int)nva[i].valuelen, nva[i].value);
}
if(acc > MAX_ACC) {
infof(data, "http_request: Warning: The cumulative length of all "
"headers exceeds %d bytes and that could cause the "
"stream to be rejected.", MAX_ACC);
}
}
h2_pri_spec(data, &pri_spec);
DEBUGF(LOG_CF(data, cf, "send request allowed %d (easy handle %p)",
nghttp2_session_check_request_allowed(ctx->h2), (void *)data));
switch(data->state.httpreq) {
case HTTPREQ_POST:
case HTTPREQ_POST_FORM:
case HTTPREQ_POST_MIME:
case HTTPREQ_PUT:
if(data->state.infilesize != -1)
stream->upload_left = data->state.infilesize;
else
/* data sending without specifying the data amount up front */
stream->upload_left = -1; /* unknown */
data_prd.read_callback = req_body_read_callback;
data_prd.source.ptr = NULL;
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
&data_prd, data);
break;
default:
stream->upload_left = 0; /* no request body */
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
NULL, data);
}
Curl_safefree(nva);
if(stream_id < 0) {
DEBUGF(LOG_CF(data, cf, "send: nghttp2_submit_request error (%s)%u",
nghttp2_strerror(stream_id), stream_id));
*err = CURLE_SEND_ERROR;
nwritten = -1;
goto out;
}
DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) submit %s",
stream_id, len, data->state.url));
infof(data, "Using Stream ID: %u (easy handle %p)",
stream_id, (void *)data);
stream->id = stream_id;
out:
DEBUGF(LOG_CF(data, cf, "[h2sid=%d] submit -> %zd, %d",
stream? stream->id : -1, nwritten, *err));
*pstream = stream;
Curl_h1_req_parse_free(&h1);
Curl_dynhds_free(&h2_headers);
return nwritten;
}
static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err) const void *buf, size_t len, CURLcode *err)
{ {
@ -1860,17 +1981,11 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
* request. * request.
*/ */
struct cf_h2_ctx *ctx = cf->ctx; struct cf_h2_ctx *ctx = cf->ctx;
int rv;
struct stream_ctx *stream = H2_STREAM_CTX(data); struct stream_ctx *stream = H2_STREAM_CTX(data);
nghttp2_nv *nva = NULL;
size_t nheader;
nghttp2_data_provider data_prd;
int32_t stream_id;
nghttp2_priority_spec pri_spec;
CURLcode result;
struct h2h3req *hreq;
struct cf_call_data save; struct cf_call_data save;
int rv;
ssize_t nwritten; ssize_t nwritten;
CURLcode result;
CF_DATA_SAVE(save, cf, data); CF_DATA_SAVE(save, cf, data);
@ -1949,123 +2064,37 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
/* handled writing BODY for open stream. */ /* handled writing BODY for open stream. */
goto out; goto out;
} }
*err = http2_data_setup(cf, data, &stream);
if(*err) {
nwritten = -1;
goto out;
}
if(!stream->req_hds_len) {
/* first invocation carries the HTTP/1.1 formatted request headers.
* we remember that in case we EAGAIN this call, because the next
* invocation may have added request body data into the buffer. */
stream->req_hds_len = len;
DEBUGF(LOG_CF(data, cf, "cf_send, first submit (len=%zu, hds_len=%zu)",
len, stream->req_hds_len));
}
/* Stream has not been opened yet. `buf` is expected to contain
* `stream->req_hds_len` bytes of request headers. */
DEBUGF(LOG_CF(data, cf, "cf_send, submit %s (len=%zu, hds_len=%zu)",
data->state.url, len, stream->req_hds_len));
DEBUGASSERT(stream->req_hds_len <= len);
result = Curl_pseudo_headers(data, buf, stream->req_hds_len,
NULL, &hreq);
if(result) {
*err = result;
nwritten = -1;
goto out;
}
nheader = hreq->entries;
nva = malloc(sizeof(nghttp2_nv) * nheader);
if(!nva) {
Curl_pseudo_free(hreq);
*err = CURLE_OUT_OF_MEMORY;
nwritten = -1;
goto out;
}
else { else {
unsigned int i; nwritten = h2_submit(&stream, cf, data, buf, len, err);
for(i = 0; i < nheader; i++) { if(nwritten < 0) {
nva[i].name = (unsigned char *)hreq->header[i].name; goto out;
nva[i].namelen = hreq->header[i].namelen;
nva[i].value = (unsigned char *)hreq->header[i].value;
nva[i].valuelen = hreq->header[i].valuelen;
nva[i].flags = NGHTTP2_NV_FLAG_NONE;
} }
Curl_pseudo_free(hreq);
}
h2_pri_spec(data, &pri_spec); result = h2_progress_ingress(cf, data);
if(result) {
*err = result;
nwritten = -1;
goto out;
}
DEBUGF(LOG_CF(data, cf, "send request allowed %d (easy handle %p)", result = h2_progress_egress(cf, data);
nghttp2_session_check_request_allowed(ctx->h2), (void *)data)); if(result) {
*err = result;
nwritten = -1;
goto out;
}
switch(data->state.httpreq) { if(should_close_session(ctx)) {
case HTTPREQ_POST: DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
case HTTPREQ_POST_FORM: *err = CURLE_HTTP2;
case HTTPREQ_POST_MIME: nwritten = -1;
case HTTPREQ_PUT: goto out;
if(data->state.infilesize != -1) }
stream->upload_left = data->state.infilesize;
else
/* data sending without specifying the data amount up front */
stream->upload_left = -1; /* unknown */
data_prd.read_callback = req_body_read_callback;
data_prd.source.ptr = NULL;
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
&data_prd, data);
break;
default:
stream->upload_left = 0; /* no request body */
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
NULL, data);
}
Curl_safefree(nva);
if(stream_id < 0) {
DEBUGF(LOG_CF(data, cf, "send: nghttp2_submit_request error (%s)%u",
nghttp2_strerror(stream_id), stream_id));
*err = CURLE_SEND_ERROR;
nwritten = -1;
goto out;
}
DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) submit %s",
stream_id, len, data->state.url));
infof(data, "Using Stream ID: %u (easy handle %p)",
stream_id, (void *)data);
stream->id = stream_id;
nwritten = stream->req_hds_len;
result = h2_progress_ingress(cf, data);
if(result) {
*err = result;
nwritten = -1;
goto out;
}
result = h2_progress_egress(cf, data);
if(result) {
*err = result;
nwritten = -1;
goto out;
}
if(should_close_session(ctx)) {
DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
*err = CURLE_HTTP2;
nwritten = -1;
goto out;
} }
out: out:
DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send -> %zd, %d", DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send -> %zd, %d",
stream->id, nwritten, *err)); stream? stream->id : -1, nwritten, *err));
CF_DATA_RESTORE(cf, save); CF_DATA_RESTORE(cf, save);
return nwritten; return nwritten;
} }

View File

@ -35,7 +35,7 @@
#include "cf-socket.h" #include "cf-socket.h"
#include "connect.h" #include "connect.h"
#include "progress.h" #include "progress.h"
#include "h2h3.h" #include "http1.h"
#include "curl_msh3.h" #include "curl_msh3.h"
#include "socketpair.h" #include "socketpair.h"
#include "vquic/vquic.h" #include "vquic/vquic.h"
@ -321,7 +321,7 @@ static void MSH3_CALL msh3_header_received(MSH3_REQUEST *Request,
msh3_lock_acquire(&stream->recv_lock); msh3_lock_acquire(&stream->recv_lock);
if((hd->NameLength == 7) && if((hd->NameLength == 7) &&
!strncmp(H2H3_PSEUDO_STATUS, (char *)hd->Name, 7)) { !strncmp(HTTP_PSEUDO_STATUS, (char *)hd->Name, 7)) {
char line[14]; /* status line is always 13 characters long */ char line[14]; /* status line is always 13 characters long */
size_t ncopy; size_t ncopy;
@ -548,36 +548,75 @@ static ssize_t cf_msh3_send(struct Curl_cfilter *cf, struct Curl_easy *data,
{ {
struct cf_msh3_ctx *ctx = cf->ctx; struct cf_msh3_ctx *ctx = cf->ctx;
struct stream_ctx *stream = H3_STREAM_CTX(data); struct stream_ctx *stream = H3_STREAM_CTX(data);
struct h2h3req *hreq; struct h1_req_parser h1;
size_t hdrlen = 0; struct dynhds h2_headers;
MSH3_HEADER *nva = NULL;
size_t nheader, i;
ssize_t nwritten = -1; ssize_t nwritten = -1;
struct cf_call_data save; struct cf_call_data save;
bool eos;
CF_DATA_SAVE(save, cf, data); CF_DATA_SAVE(save, cf, data);
Curl_h1_req_parse_init(&h1, (4*1024));
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
/* Sizes must match for cast below to work" */ /* Sizes must match for cast below to work" */
DEBUGASSERT(stream); DEBUGASSERT(stream);
DEBUGASSERT(sizeof(MSH3_HEADER) == sizeof(struct h2h3pseudo));
DEBUGF(LOG_CF(data, cf, "req: send %zu bytes", len)); DEBUGF(LOG_CF(data, cf, "req: send %zu bytes", len));
if(!stream->req) { if(!stream->req) {
/* The first send on the request contains the headers and possibly some /* The first send on the request contains the headers and possibly some
data. Parse out the headers and create the request, then if there is data. Parse out the headers and create the request, then if there is
any data left over go ahead and send it too. */ any data left over go ahead and send it too. */
nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
*err = Curl_pseudo_headers(data, buf, len, &hdrlen, &hreq); *err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
if(*err) { if(*err) {
failf(data, "Curl_pseudo_headers failed"); nwritten = -1;
*err = CURLE_SEND_ERROR;
goto out; goto out;
} }
DEBUGF(LOG_CF(data, cf, "req: send %zu headers", hreq->entries)); nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(MSH3_HEADER) * nheader);
if(!nva) {
*err = CURLE_OUT_OF_MEMORY;
nwritten = -1;
goto out;
}
for(i = 0; i < nheader; ++i) {
struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
nva[i].Name = e->name;
nva[i].NameLength = e->namelen;
nva[i].Value = e->value;
nva[i].ValueLength = e->valuelen;
}
switch(data->state.httpreq) {
case HTTPREQ_POST:
case HTTPREQ_POST_FORM:
case HTTPREQ_POST_MIME:
case HTTPREQ_PUT:
/* known request body size or -1 */
eos = FALSE;
break;
default:
/* there is not request body */
eos = TRUE;
stream->upload_done = TRUE;
break;
}
DEBUGF(LOG_CF(data, cf, "req: send %zu headers", nheader));
stream->req = MsH3RequestOpen(ctx->qconn, &msh3_request_if, data, stream->req = MsH3RequestOpen(ctx->qconn, &msh3_request_if, data,
(MSH3_HEADER*)hreq->header, hreq->entries, nva, nheader,
hdrlen == len ? MSH3_REQUEST_FLAG_FIN : eos ? MSH3_REQUEST_FLAG_FIN :
MSH3_REQUEST_FLAG_NONE); MSH3_REQUEST_FLAG_NONE);
Curl_pseudo_free(hreq);
if(!stream->req) { if(!stream->req) {
failf(data, "request open failed"); failf(data, "request open failed");
*err = CURLE_SEND_ERROR; *err = CURLE_SEND_ERROR;
@ -608,6 +647,9 @@ static ssize_t cf_msh3_send(struct Curl_cfilter *cf, struct Curl_easy *data,
out: out:
set_quic_expire(cf, data); set_quic_expire(cf, data);
free(nva);
Curl_h1_req_parse_free(&h1);
Curl_dynhds_free(&h2_headers);
CF_DATA_RESTORE(cf, save); CF_DATA_RESTORE(cf, save);
return nwritten; return nwritten;
} }

View File

@ -56,10 +56,10 @@
#include "progress.h" #include "progress.h"
#include "strerror.h" #include "strerror.h"
#include "dynbuf.h" #include "dynbuf.h"
#include "http1.h"
#include "select.h" #include "select.h"
#include "vquic.h" #include "vquic.h"
#include "vquic_int.h" #include "vquic_int.h"
#include "h2h3.h"
#include "vtls/keylog.h" #include "vtls/keylog.h"
#include "vtls/vtls.h" #include "vtls/vtls.h"
#include "curl_ngtcp2.h" #include "curl_ngtcp2.h"
@ -989,8 +989,8 @@ static int cf_ngtcp2_get_select_socks(struct Curl_cfilter *cf,
stream && nghttp3_conn_is_stream_writable(ctx->h3conn, stream->id)) stream && nghttp3_conn_is_stream_writable(ctx->h3conn, stream->id))
rv |= GETSOCK_WRITESOCK(0); rv |= GETSOCK_WRITESOCK(0);
DEBUGF(LOG_CF(data, cf, "get_select_socks -> %x (sock=%d)", /* DEBUGF(LOG_CF(data, cf, "get_select_socks -> %x (sock=%d)",
rv, (int)socks[0])); rv, (int)socks[0])); */
CF_DATA_RESTORE(cf, save); CF_DATA_RESTORE(cf, save);
return rv; return rv;
} }
@ -1540,49 +1540,65 @@ cb_h3_read_req_body(nghttp3_conn *conn, int64_t stream_id,
field list. */ field list. */
#define AUTHORITY_DST_IDX 3 #define AUTHORITY_DST_IDX 3
static CURLcode h3_stream_open(struct Curl_cfilter *cf, static ssize_t h3_stream_open(struct Curl_cfilter *cf,
struct Curl_easy *data, struct Curl_easy *data,
const void *mem, const void *buf, size_t len,
size_t len) CURLcode *err)
{ {
struct cf_ngtcp2_ctx *ctx = cf->ctx; struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct stream_ctx *stream = NULL; struct stream_ctx *stream = NULL;
struct h1_req_parser h1;
struct dynhds h2_headers;
size_t nheader; size_t nheader;
CURLcode result = CURLE_OK;
nghttp3_nv *nva = NULL; nghttp3_nv *nva = NULL;
int rc = 0; int rc = 0;
unsigned int i; unsigned int i;
struct h2h3req *hreq = NULL; ssize_t nwritten = -1;
nghttp3_data_reader reader; nghttp3_data_reader reader;
nghttp3_data_reader *preader = NULL; nghttp3_data_reader *preader = NULL;
result = h3_data_setup(cf, data); Curl_h1_req_parse_init(&h1, (4*1024));
if(result) Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
*err = h3_data_setup(cf, data);
if(*err)
goto out; goto out;
stream = H3_STREAM_CTX(data); stream = H3_STREAM_CTX(data);
DEBUGASSERT(stream);
rc = ngtcp2_conn_open_bidi_stream(ctx->qconn, &stream->id, NULL); rc = ngtcp2_conn_open_bidi_stream(ctx->qconn, &stream->id, NULL);
if(rc) { if(rc) {
failf(data, "can get bidi streams"); failf(data, "can get bidi streams");
*err = CURLE_SEND_ERROR;
goto out; goto out;
} }
result = Curl_pseudo_headers(data, mem, len, NULL, &hreq); nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
if(result) if(nwritten < 0)
goto out; goto out;
nheader = hreq->entries; DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
if(*err) {
nwritten = -1;
goto out;
}
nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp3_nv) * nheader); nva = malloc(sizeof(nghttp3_nv) * nheader);
if(!nva) { if(!nva) {
result = CURLE_OUT_OF_MEMORY; *err = CURLE_OUT_OF_MEMORY;
nwritten = -1;
goto out; goto out;
} }
for(i = 0; i < nheader; i++) { for(i = 0; i < nheader; ++i) {
nva[i].name = (unsigned char *)hreq->header[i].name; struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
nva[i].namelen = hreq->header[i].namelen; nva[i].name = (unsigned char *)e->name;
nva[i].value = (unsigned char *)hreq->header[i].value; nva[i].namelen = e->namelen;
nva[i].valuelen = hreq->header[i].valuelen; nva[i].value = (unsigned char *)e->value;
nva[i].valuelen = e->valuelen;
nva[i].flags = NGHTTP3_NV_FLAG_NONE; nva[i].flags = NGHTTP3_NV_FLAG_NONE;
} }
@ -1604,8 +1620,21 @@ static CURLcode h3_stream_open(struct Curl_cfilter *cf,
rc = nghttp3_conn_submit_request(ctx->h3conn, stream->id, rc = nghttp3_conn_submit_request(ctx->h3conn, stream->id,
nva, nheader, preader, data); nva, nheader, preader, data);
if(rc) if(rc) {
switch(rc) {
case NGHTTP3_ERR_CONN_CLOSING:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send, "
"connection is closing", stream->id));
break;
default:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send -> %d (%s)",
stream->id, rc, ngtcp2_strerror(rc)));
break;
}
*err = CURLE_SEND_ERROR;
nwritten = -1;
goto out; goto out;
}
infof(data, "Using HTTP/3 Stream ID: %" PRId64 " (easy handle %p)", infof(data, "Using HTTP/3 Stream ID: %" PRId64 " (easy handle %p)",
stream->id, (void *)data); stream->id, (void *)data);
@ -1613,23 +1642,10 @@ static CURLcode h3_stream_open(struct Curl_cfilter *cf,
stream->id, data->state.url)); stream->id, data->state.url));
out: out:
if(stream && !result && rc) {
switch(rc) {
case NGHTTP3_ERR_CONN_CLOSING:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send, "
"connection is closing", stream->id));
result = CURLE_RECV_ERROR;
break;
default:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send -> %d (%s)",
stream->id, rc, ngtcp2_strerror(rc)));
result = CURLE_SEND_ERROR;
break;
}
}
free(nva); free(nva);
Curl_pseudo_free(hreq); Curl_h1_req_parse_free(&h1);
return result; Curl_dynhds_free(&h2_headers);
return nwritten;
} }
static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data, static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
@ -1653,16 +1669,11 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
} }
if(!stream || stream->id < 0) { if(!stream || stream->id < 0) {
CURLcode result = h3_stream_open(cf, data, buf, len); sent = h3_stream_open(cf, data, buf, len, err);
if(result) { if(sent < 0) {
DEBUGF(LOG_CF(data, cf, "failed to open stream -> %d", result)); DEBUGF(LOG_CF(data, cf, "failed to open stream -> %d", *err));
sent = -1;
goto out; goto out;
} }
/* Assume that mem of length len only includes HTTP/1.1 style
header fields. In other words, it does not contain request
body. */
sent = len;
} }
else { else {
sent = Curl_bufq_write(&stream->sendbuf, buf, len, err); sent = Curl_bufq_write(&stream->sendbuf, buf, len, err);

View File

@ -40,11 +40,11 @@
#include "connect.h" #include "connect.h"
#include "progress.h" #include "progress.h"
#include "strerror.h" #include "strerror.h"
#include "http1.h"
#include "vquic.h" #include "vquic.h"
#include "vquic_int.h" #include "vquic_int.h"
#include "curl_quiche.h" #include "curl_quiche.h"
#include "transfer.h" #include "transfer.h"
#include "h2h3.h"
#include "vtls/openssl.h" #include "vtls/openssl.h"
#include "vtls/keylog.h" #include "vtls/keylog.h"
@ -187,7 +187,6 @@ static void cf_quiche_ctx_clear(struct cf_quiche_ctx *ctx)
struct stream_ctx { struct stream_ctx {
int64_t id; /* HTTP/3 protocol stream identifier */ int64_t id; /* HTTP/3 protocol stream identifier */
struct bufq recvbuf; /* h3 response */ struct bufq recvbuf; /* h3 response */
size_t req_hds_len; /* how many bytes in the first send are headers */
uint64_t error3; /* HTTP/3 stream error code */ uint64_t error3; /* HTTP/3 stream error code */
bool closed; /* TRUE on stream close */ bool closed; /* TRUE on stream close */
bool reset; /* TRUE on stream reset */ bool reset; /* TRUE on stream reset */
@ -373,7 +372,7 @@ static int cb_each_header(uint8_t *name, size_t name_len,
CURLcode result; CURLcode result;
(void)stream; (void)stream;
if((name_len == 7) && !strncmp(H2H3_PSEUDO_STATUS, (char *)name, 7)) { if((name_len == 7) && !strncmp(HTTP_PSEUDO_STATUS, (char *)name, 7)) {
result = write_resp_raw(x->cf, x->data, "HTTP/3 ", sizeof("HTTP/3 ") - 1); result = write_resp_raw(x->cf, x->data, "HTTP/3 ", sizeof("HTTP/3 ") - 1);
if(!result) if(!result)
result = write_resp_raw(x->cf, x->data, value, value_len); result = write_resp_raw(x->cf, x->data, value, value_len);
@ -876,52 +875,58 @@ out:
static ssize_t h3_open_stream(struct Curl_cfilter *cf, static ssize_t h3_open_stream(struct Curl_cfilter *cf,
struct Curl_easy *data, struct Curl_easy *data,
const void *mem, size_t len, const void *buf, size_t len,
CURLcode *err) CURLcode *err)
{ {
struct cf_quiche_ctx *ctx = cf->ctx; struct cf_quiche_ctx *ctx = cf->ctx;
struct stream_ctx *stream = H3_STREAM_CTX(data); struct stream_ctx *stream = H3_STREAM_CTX(data);
size_t nheader; size_t nheader, i;
int64_t stream3_id; int64_t stream3_id;
struct h1_req_parser h1;
struct dynhds h2_headers;
quiche_h3_header *nva = NULL; quiche_h3_header *nva = NULL;
struct h2h3req *hreq = NULL; ssize_t nwritten;
if(!stream) { if(!stream) {
*err = h3_data_setup(cf, data); *err = h3_data_setup(cf, data);
if(*err) if(*err) {
goto fail; nwritten = -1;
goto out;
}
stream = H3_STREAM_CTX(data); stream = H3_STREAM_CTX(data);
DEBUGASSERT(stream); DEBUGASSERT(stream);
} }
if(!stream->req_hds_len) { Curl_h1_req_parse_init(&h1, (4*1024));
stream->req_hds_len = len; /* fist call */ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
}
else { DEBUGASSERT(stream);
/* subsequent attempt, we should get at least as many bytes as nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
* in the first call as headers are either completely sent or not if(nwritten < 0)
* at all. */ goto out;
DEBUGASSERT(stream->req_hds_len <= len); DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
if(*err) {
nwritten = -1;
goto out;
} }
*err = Curl_pseudo_headers(data, mem, stream->req_hds_len, NULL, &hreq); nheader = Curl_dynhds_count(&h2_headers);
if(*err)
goto fail;
nheader = hreq->entries;
nva = malloc(sizeof(quiche_h3_header) * nheader); nva = malloc(sizeof(quiche_h3_header) * nheader);
if(!nva) { if(!nva) {
*err = CURLE_OUT_OF_MEMORY; *err = CURLE_OUT_OF_MEMORY;
goto fail; nwritten = -1;
goto out;
} }
else {
unsigned int i; for(i = 0; i < nheader; ++i) {
for(i = 0; i < nheader; i++) { struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
nva[i].name = (unsigned char *)hreq->header[i].name; nva[i].name = (unsigned char *)e->name;
nva[i].name_len = hreq->header[i].namelen; nva[i].name_len = e->namelen;
nva[i].value = (unsigned char *)hreq->header[i].value; nva[i].value = (unsigned char *)e->value;
nva[i].value_len = hreq->header[i].valuelen; nva[i].value_len = e->valuelen;
}
} }
switch(data->state.httpreq) { switch(data->state.httpreq) {
@ -950,17 +955,20 @@ static ssize_t h3_open_stream(struct Curl_cfilter *cf,
data->state.url)); data->state.url));
stream_send_suspend(cf, data); stream_send_suspend(cf, data);
*err = CURLE_AGAIN; *err = CURLE_AGAIN;
goto fail; nwritten = -1;
goto out;
} }
else { else {
DEBUGF(LOG_CF(data, cf, "send_request(%s) -> %" PRId64, DEBUGF(LOG_CF(data, cf, "send_request(%s) -> %" PRId64,
data->state.url, stream3_id)); data->state.url, stream3_id));
} }
*err = CURLE_SEND_ERROR; *err = CURLE_SEND_ERROR;
goto fail; nwritten = -1;
goto out;
} }
DEBUGASSERT(stream->id == -1); DEBUGASSERT(stream->id == -1);
*err = CURLE_OK;
stream->id = stream3_id; stream->id = stream3_id;
stream->closed = FALSE; stream->closed = FALSE;
stream->reset = FALSE; stream->reset = FALSE;
@ -970,15 +978,11 @@ static ssize_t h3_open_stream(struct Curl_cfilter *cf,
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] opened for %s", DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] opened for %s",
stream3_id, data->state.url)); stream3_id, data->state.url));
Curl_pseudo_free(hreq); out:
free(nva); free(nva);
*err = CURLE_OK; Curl_h1_req_parse_free(&h1);
return stream->req_hds_len; Curl_dynhds_free(&h2_headers);
return nwritten;
fail:
free(nva);
Curl_pseudo_free(hreq);
return -1;
} }
static ssize_t cf_quiche_send(struct Curl_cfilter *cf, struct Curl_easy *data, static ssize_t cf_quiche_send(struct Curl_cfilter *cf, struct Curl_easy *data,

View File

@ -1814,13 +1814,7 @@ static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf,
int httpwant = CURL_HTTP_VERSION_1_1; int httpwant = CURL_HTTP_VERSION_1_1;
#ifdef USE_HTTP2 #ifdef USE_HTTP2
if(conn->bits.tunnel_proxy && if(conn->http_proxy.proxytype == CURLPROXY_HTTPS2) {
((conn->http_proxy.proxytype == CURLPROXY_HTTPS2)
#ifdef DEBUGBUILD
|| getenv("CURL_PROXY_TUNNEL_H2")
#endif
)
) {
use_alpn = TRUE; use_alpn = TRUE;
httpwant = CURL_HTTP_VERSION_2; httpwant = CURL_HTTP_VERSION_2;
} }
@ -2042,7 +2036,7 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
int can_multi = 0; int can_multi = 0;
unsigned char *palpn = unsigned char *palpn =
#ifndef CURL_DISABLE_PROXY #ifndef CURL_DISABLE_PROXY
Curl_ssl_cf_is_proxy(cf)? (cf->conn->bits.tunnel_proxy && Curl_ssl_cf_is_proxy(cf))?
&cf->conn->proxy_alpn : &cf->conn->alpn &cf->conn->proxy_alpn : &cf->conn->alpn
#else #else
&cf->conn->alpn &cf->conn->alpn

View File

@ -250,7 +250,7 @@ test2400 test2401 test2402 test2403 \
\ \
test2500 test2501 test2502 test2503 \ test2500 test2501 test2502 test2503 \
\ \
test2600 test2601 test2602 \ test2600 test2601 test2602 test2603 \
\ \
test3000 test3001 test3002 test3003 test3004 test3005 test3006 test3007 \ test3000 test3001 test3002 test3003 test3004 test3005 test3006 test3007 \
test3008 test3009 test3010 test3011 test3012 test3013 test3014 test3015 \ test3008 test3009 test3010 test3011 test3012 test3013 test3014 test3015 \

22
tests/data/test2603 Normal file
View File

@ -0,0 +1,22 @@
<testcase>
<info>
<keywords>
unittest
http1
</keywords>
</info>
#
# Client-side
<client>
<server>
none
</server>
<features>
unittest
</features>
<name>
http1 parser unit tests
</name>
</client>
</testcase>

View File

@ -281,7 +281,7 @@ class ScoreCard:
if max_parallel > 1 else [] if max_parallel > 1 else []
self.info(f'{max_parallel}...') self.info(f'{max_parallel}...')
for i in range(sample_size): for i in range(sample_size):
curl = CurlClient(env=self.env) curl = CurlClient(env=self.env, silent=self._silent_curl)
r = curl.http_download(urls=[url], alpn_proto=proto, no_save=True, r = curl.http_download(urls=[url], alpn_proto=proto, no_save=True,
with_headers=False, with_headers=False,
extra_args=extra_args) extra_args=extra_args)
@ -459,13 +459,11 @@ class ScoreCard:
for key, val in sval.items(): for key, val in sval.items():
if 'errors' in val: if 'errors' in val:
errors.extend(val['errors']) errors.extend(val['errors'])
print(f' {dkey:<8} {skey:>8} ' line = f' {dkey:<8} {skey:>8} '
f'{self.fmt_reqs(sval["serial"]["speed"]):>12} ' for k in sval.keys():
f'{self.fmt_reqs(sval["par-6"]["speed"]):>12} ' line += f'{self.fmt_reqs(sval[k]["speed"]):>12} '
f'{self.fmt_reqs(sval["par-25"]["speed"]):>12} ' line += f' {"/".join(errors):<20}'
f'{self.fmt_reqs(sval["par-50"]["speed"]):>12} ' print(line)
f'{self.fmt_reqs(sval["par-100"]["speed"]):>12} '
f' {"/".join(errors):<20}')
def parse_size(s): def parse_size(s):

View File

@ -50,14 +50,6 @@ class TestProxy:
httpd.clear_extra_configs() httpd.clear_extra_configs()
httpd.reload() httpd.reload()
def set_tunnel_proto(self, proto):
if proto == 'h2':
os.environ['CURL_PROXY_TUNNEL_H2'] = '1'
return 'HTTP/2'
else:
os.environ.pop('CURL_PROXY_TUNNEL_H2', None)
return 'HTTP/1.1'
def get_tunnel_proto_used(self, r: ExecResult): def get_tunnel_proto_used(self, r: ExecResult):
for l in r.trace_lines: for l in r.trace_lines:
m = re.match(r'.* CONNECT tunnel: (\S+) negotiated$', l) m = re.match(r'.* CONNECT tunnel: (\S+) negotiated$', l)
@ -71,37 +63,60 @@ class TestProxy:
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=curl.get_proxy_args(proxys=False))
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
# download via https: proxy (no tunnel) # download via https: proxy (no tunnel)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support') reason='curl lacks HTTPS-proxy support')
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_02_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat): def test_10_02_proxys_down(self, env: Env, httpd, nghttpx_fwd, proto, repeat):
if proto == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proto=proto)
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxy', f'https://{env.proxy_domain}:{env.proxys_port}/', r.check_response(count=1, http_status=200,
'--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1', protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
'--proxy-cacert', env.ca.cert_file,
]) # upload via https: with proto (no tunnel)
r.check_response(count=1, http_status=200) @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.parametrize("fname, fcount", [
['data.json', 5],
['data-100k', 5],
['data-1m', 2]
])
@pytest.mark.skipif(condition=not Env.have_nghttpx(),
reason="no nghttpx available")
def test_10_02_proxys_up(self, env: Env, httpd, nghttpx, proto,
fname, fcount, repeat):
if proto == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
count = fcount
srcfile = os.path.join(httpd.docs_dir, fname)
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/curltest/echo?id=[0-{count-1}]'
xargs = curl.get_proxy_args(proto=proto)
r = curl.http_upload(urls=[url], data=f'@{srcfile}', alpn_proto=proto,
extra_args=xargs)
r.check_response(count=count, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
indata = open(srcfile).readlines()
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == indata
# download http: via http: proxytunnel # download http: via http: proxytunnel
def test_10_03_proxytunnel_http(self, env: Env, httpd, repeat): def test_10_03_proxytunnel_http(self, env: Env, httpd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=False, tunnel=True)
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
# download http: via https: proxytunnel # download http: via https: proxytunnel
@ -111,13 +126,9 @@ class TestProxy:
def test_10_04_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat): def test_10_04_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(tunnel=True)
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
# download https: with proto via http: proxytunnel # download https: with proto via http: proxytunnel
@ -126,13 +137,10 @@ class TestProxy:
def test_10_05_proxytunnel_http(self, env: Env, httpd, proto, repeat): def test_10_05_proxytunnel_http(self, env: Env, httpd, proto, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json' url = f'https://localhost:{env.https_port}/data.json'
xargs = curl.get_proxy_args(proxys=False, tunnel=True)
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_headers=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200, r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
@ -145,20 +153,15 @@ class TestProxy:
def test_10_06_proxytunnel_https(self, env: Env, httpd, nghttpx_fwd, proto, tunnel, repeat): def test_10_06_proxytunnel_https(self, env: Env, httpd, nghttpx_fwd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json?[0-0]' url = f'https://localhost:{env.https_port}/data.json?[0-0]'
xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_headers=True, extra_args=xargs)
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200, r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
if tunnel == 'h2' else 'HTTP/1.1'
srcfile = os.path.join(httpd.docs_dir, 'data.json') srcfile = os.path.join(httpd.docs_dir, 'data.json')
dfile = curl.download_file(0) dfile = curl.download_file(0)
assert filecmp.cmp(srcfile, dfile, shallow=False) assert filecmp.cmp(srcfile, dfile, shallow=False)
@ -178,20 +181,15 @@ class TestProxy:
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
count = fcount count = fcount
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/{fname}?[0-{count-1}]' url = f'https://localhost:{env.https_port}/{fname}?[0-{count-1}]'
xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_headers=True, extra_args=xargs)
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=count, http_status=200, r.check_response(count=count, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
if tunnel == 'h2' else 'HTTP/1.1'
srcfile = os.path.join(httpd.docs_dir, fname) srcfile = os.path.join(httpd.docs_dir, fname)
for i in range(count): for i in range(count):
dfile = curl.download_file(i) dfile = curl.download_file(i)
@ -213,20 +211,15 @@ class TestProxy:
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
count = fcount count = fcount
srcfile = os.path.join(httpd.docs_dir, fname) srcfile = os.path.join(httpd.docs_dir, fname)
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/curltest/echo?id=[0-{count-1}]' url = f'https://localhost:{env.https_port}/curltest/echo?id=[0-{count-1}]'
xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
r = curl.http_upload(urls=[url], data=f'@{srcfile}', alpn_proto=proto, r = curl.http_upload(urls=[url], data=f'@{srcfile}', alpn_proto=proto,
extra_args=[ extra_args=xargs)
'--proxytunnel', assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', if tunnel == 'h2' else 'HTTP/1.1'
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto
r.check_response(count=count, http_status=200) r.check_response(count=count, http_status=200)
indata = open(srcfile).readlines() indata = open(srcfile).readlines()
r.check_response(count=count, http_status=200)
for i in range(count): for i in range(count):
respdata = open(curl.response_file(i)).readlines() respdata = open(curl.response_file(i)).readlines()
assert respdata == indata assert respdata == indata
@ -237,20 +230,15 @@ class TestProxy:
def test_10_09_reuse_ser(self, env: Env, httpd, nghttpx_fwd, tunnel, repeat): def test_10_09_reuse_ser(self, env: Env, httpd, nghttpx_fwd, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url1 = f'https://localhost:{env.https_port}/data.json' url1 = f'https://localhost:{env.https_port}/data.json'
url2 = f'http://localhost:{env.http_port}/data.json' url2 = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
r = curl.http_download(urls=[url1, url2], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url1, url2], alpn_proto='http/1.1', with_stats=True,
with_headers=True, with_headers=True, extra_args=xargs)
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=2, http_status=200) r.check_response(count=2, http_status=200)
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
if tunnel == 'h2' else 'HTTP/1.1'
if tunnel == 'h2': if tunnel == 'h2':
# TODO: we would like to reuse the first connection for the # TODO: we would like to reuse the first connection for the
# second URL, but this is currently not possible # second URL, but this is currently not possible

View File

@ -31,7 +31,7 @@ import re
import time import time
import pytest import pytest
from testenv import Env, CurlClient from testenv import Env, CurlClient, ExecResult
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -52,20 +52,12 @@ class TestProxyAuth:
httpd.set_proxy_auth(False) httpd.set_proxy_auth(False)
httpd.reload() httpd.reload()
def set_tunnel_proto(self, proto): def get_tunnel_proto_used(self, r: ExecResult):
if proto == 'h2': for line in r.trace_lines:
os.environ['CURL_PROXY_TUNNEL_H2'] = '1' m = re.match(r'.* CONNECT tunnel: (\S+) negotiated$', line)
return 'HTTP/2'
else:
os.environ.pop('CURL_PROXY_TUNNEL_H2', None)
return 'HTTP/1.1'
def get_tunnel_proto_used(self, curl: CurlClient):
assert os.path.exists(curl.trace_file)
for l in open(curl.trace_file).readlines():
m = re.match(r'.* == Info: CONNECT tunnel: (\S+) negotiated', l)
if m: if m:
return m.group(1) return m.group(1)
assert False, f'tunnel protocol not found in:\n{"".join(r.trace_lines)}'
return None return None
# download via http: proxy (no tunnel), no auth # download via http: proxy (no tunnel), no auth
@ -73,22 +65,17 @@ class TestProxyAuth:
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=curl.get_proxy_args(proxys=False))
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=407) r.check_response(count=1, http_status=407)
# download via http: proxy (no tunnel), auth # download via http: proxy (no tunnel), auth
def test_13_02_proxy_auth(self, env: Env, httpd, repeat): def test_13_02_proxy_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=False)
xargs.extend(['--proxy-user', 'proxy:proxy'])
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxy-user', 'proxy:proxy',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
@ -97,12 +84,9 @@ class TestProxyAuth:
def test_13_03_proxys_no_auth(self, env: Env, httpd, nghttpx_fwd, repeat): def test_13_03_proxys_no_auth(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=True)
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=407) r.check_response(count=1, http_status=407)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
@ -111,37 +95,28 @@ class TestProxyAuth:
def test_13_04_proxys_auth(self, env: Env, httpd, nghttpx_fwd, repeat): def test_13_04_proxys_auth(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=True)
xargs.extend(['--proxy-user', 'proxy:proxy'])
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxy-user', 'proxy:proxy',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
def test_13_05_tunnel_http_no_auth(self, env: Env, httpd, repeat): def test_13_05_tunnel_http_no_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=False, tunnel=True)
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
# expect "COULD_NOT_CONNECT" # expect "COULD_NOT_CONNECT"
r.check_response(exitcode=56, http_status=None) r.check_response(exitcode=56, http_status=None)
def test_13_06_tunnel_http_auth(self, env: Env, httpd, repeat): def test_13_06_tunnel_http_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json' url = f'http://localhost:{env.http_port}/data.json'
xargs = curl.get_proxy_args(proxys=False, tunnel=True)
xargs.extend(['--proxy-user', 'proxy:proxy'])
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy-user', 'proxy:proxy',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200) r.check_response(count=1, http_status=200)
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
@ -152,20 +127,16 @@ class TestProxyAuth:
def test_13_07_tunnels_no_auth(self, env: Env, httpd, proto, tunnel, repeat): def test_13_07_tunnels_no_auth(self, env: Env, httpd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json' url = f'https://localhost:{env.https_port}/data.json'
xargs = curl.get_proxy_args(proxys=True, tunnel=True, proto=tunnel)
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_trace=True, with_headers=True, with_trace=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
# expect "COULD_NOT_CONNECT" # expect "COULD_NOT_CONNECT"
r.check_response(exitcode=56, http_status=None) r.check_response(exitcode=56, http_status=None)
assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
if tunnel == 'h2' else 'HTTP/1.1'
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
@ -175,19 +146,15 @@ class TestProxyAuth:
def test_13_08_tunnels_auth(self, env: Env, httpd, proto, tunnel, repeat): def test_13_08_tunnels_auth(self, env: Env, httpd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2') pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env) curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json' url = f'https://localhost:{env.https_port}/data.json'
xargs = curl.get_proxy_args(proxys=True, tunnel=True, proto=tunnel)
xargs.extend(['--proxy-user', 'proxy:proxy'])
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_trace=True, with_headers=True, with_trace=True,
extra_args=[ extra_args=xargs)
'--proxytunnel',
'--proxy-user', 'proxy:proxy',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200, r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
if tunnel == 'h2' else 'HTTP/1.1'

View File

@ -317,6 +317,26 @@ class CurlClient:
if not os.path.exists(path): if not os.path.exists(path):
return os.makedirs(path) return os.makedirs(path)
def get_proxy_args(self, proto: str = 'http/1.1',
proxys: bool = True, tunnel: bool = False):
if proxys:
pport = self.env.pts_port(proto) if tunnel else self.env.proxys_port
xargs = [
'--proxy', f'https://{self.env.proxy_domain}:{pport}/',
'--resolve', f'{self.env.proxy_domain}:{pport}:127.0.0.1',
'--proxy-cacert', self.env.ca.cert_file,
]
if proto == 'h2':
xargs.append('--proxy-http2')
else:
xargs = [
'--proxy', f'http://{self.env.proxy_domain}:{self.env.proxy_port}/',
'--resolve', f'{self.env.proxy_domain}:{self.env.proxy_port}:127.0.0.1',
]
if tunnel:
xargs.append('--proxytunnel')
return xargs
def http_get(self, url: str, extra_args: Optional[List[str]] = None): def http_get(self, url: str, extra_args: Optional[List[str]] = None):
return self._raw(url, options=extra_args, with_stats=False) return self._raw(url, options=extra_args, with_stats=False)

View File

@ -38,7 +38,7 @@ include_directories(
# or else they will fail to link. Some of the tests require the special libcurlu # or else they will fail to link. Some of the tests require the special libcurlu
# build, so filter those out until we get libcurlu. # build, so filter those out until we get libcurlu.
list(FILTER UNITPROGS EXCLUDE REGEX list(FILTER UNITPROGS EXCLUDE REGEX
"unit1394|unit1395|unit1604|unit1608|unit1621|unit1650|unit1653|unit1655|unit1660|unit2600|unit2601|unit2602") "unit1394|unit1395|unit1604|unit1608|unit1621|unit1650|unit1653|unit1655|unit1660|unit2600|unit2601|unit2602|unit2603")
if(NOT BUILD_SHARED_LIBS) if(NOT BUILD_SHARED_LIBS)
foreach(_testfile ${UNITPROGS}) foreach(_testfile ${UNITPROGS})
add_executable(${_testfile} EXCLUDE_FROM_ALL ${_testfile}.c ${UNITFILES}) add_executable(${_testfile} EXCLUDE_FROM_ALL ${_testfile}.c ${UNITFILES})

View File

@ -158,4 +158,6 @@ unit2601_SOURCES = unit2601.c $(UNITFILES)
unit2602_SOURCES = unit2602.c $(UNITFILES) unit2602_SOURCES = unit2602.c $(UNITFILES)
unit2603_SOURCES = unit2603.c $(UNITFILES)
unit3200_SOURCES = unit3200.c $(UNITFILES) unit3200_SOURCES = unit3200.c $(UNITFILES)

View File

@ -38,5 +38,5 @@ UNITPROGS = unit1300 unit1302 unit1303 unit1304 unit1305 unit1307 \
unit1620 unit1621 \ unit1620 unit1621 \
unit1650 unit1651 unit1652 unit1653 unit1654 unit1655 \ unit1650 unit1651 unit1652 unit1653 unit1654 unit1655 \
unit1660 unit1661 \ unit1660 unit1661 \
unit2600 unit2601 unit2602 \ unit2600 unit2601 unit2602 unit2603 \
unit3200 unit3200

View File

@ -112,7 +112,6 @@ UNITTEST_START
Curl_dyn_init(&dbuf, 32*1024); Curl_dyn_init(&dbuf, 32*1024);
fail_if(Curl_dynhds_h1_dprint(&hds, &dbuf), "h1 print failed"); fail_if(Curl_dynhds_h1_dprint(&hds, &dbuf), "h1 print failed");
if(Curl_dyn_ptr(&dbuf)) { if(Curl_dyn_ptr(&dbuf)) {
fprintf(stderr, "%s", Curl_dyn_ptr(&dbuf));
fail_if(strcmp(Curl_dyn_ptr(&dbuf), fail_if(strcmp(Curl_dyn_ptr(&dbuf),
"test1: 123\r\ntest1: 123\r\nBla-Bla: thingies\r\n"), "test1: 123\r\ntest1: 123\r\nBla-Bla: thingies\r\n"),
"h1 format differs"); "h1 format differs");
@ -121,5 +120,29 @@ UNITTEST_START
} }
Curl_dynhds_free(&hds); Curl_dynhds_free(&hds);
Curl_dynhds_init(&hds, 128, 4*1024);
/* continuation without previous header fails */
result = Curl_dynhds_h1_cadd_line(&hds, " indented value");
fail_unless(result, "add should have failed");
/* continuation with previous header must succeed */
fail_if(Curl_dynhds_h1_cadd_line(&hds, "ti1: val1"), "add");
fail_if(Curl_dynhds_h1_cadd_line(&hds, " val2"), "add indent");
fail_if(Curl_dynhds_h1_cadd_line(&hds, "ti2: val1"), "add");
fail_if(Curl_dynhds_h1_cadd_line(&hds, "\tval2"), "add indent");
fail_if(Curl_dynhds_h1_cadd_line(&hds, "ti3: val1"), "add");
fail_if(Curl_dynhds_h1_cadd_line(&hds, " val2"), "add indent");
Curl_dyn_init(&dbuf, 32*1024);
fail_if(Curl_dynhds_h1_dprint(&hds, &dbuf), "h1 print failed");
if(Curl_dyn_ptr(&dbuf)) {
fprintf(stderr, "indent concat: %s\n", Curl_dyn_ptr(&dbuf));
fail_if(strcmp(Curl_dyn_ptr(&dbuf),
"ti1: val1 val2\r\nti2: val1 val2\r\nti3: val1 val2\r\n"),
"wrong format");
}
Curl_dyn_free(&dbuf);
Curl_dynhds_free(&hds);
UNITTEST_STOP UNITTEST_STOP

190
tests/unit/unit2603.c Normal file
View File

@ -0,0 +1,190 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curlcheck.h"
#include "urldata.h"
#include "http.h"
#include "http1.h"
#include "curl_log.h"
static CURLcode unit_setup(void)
{
return CURLE_OK;
}
static void unit_stop(void)
{
}
struct tcase {
const char **input;
const char *default_scheme;
const char *method;
const char *scheme;
const char *authority;
const char *path;
size_t header_count;
size_t input_remain;
};
static void check_eq(const char *s, const char *exp_s, const char *name)
{
if(s && exp_s) {
if(strcmp(s, exp_s)) {
fprintf(stderr, "expected %s: '%s' but got '%s'\n", name, exp_s, s);
fail("unexpected req component");
}
}
else if(!s && exp_s) {
fprintf(stderr, "expected %s: '%s' but got NULL\n", name, exp_s);
fail("unexpected req component");
}
else if(s && !exp_s) {
fprintf(stderr, "expected %s: NULL but got '%s'\n", name, s);
fail("unexpected req component");
}
}
static void parse_success(struct tcase *t)
{
struct h1_req_parser p;
const char *buf;
size_t buflen, i, in_len, in_consumed;
CURLcode err;
ssize_t nread;
Curl_h1_req_parse_init(&p, 1024);
in_len = in_consumed = 0;
for(i = 0; t->input[i]; ++i) {
buf = t->input[i];
buflen = strlen(buf);
in_len += buflen;
nread = Curl_h1_req_parse_read(&p, buf, buflen, t->default_scheme,
0, &err);
if(nread < 0) {
fprintf(stderr, "got err %d parsing: '%s'\n", err, buf);
fail("error consuming");
}
in_consumed += (size_t)nread;
if((size_t)nread != buflen) {
if(!p.done) {
fprintf(stderr, "only %zd/%zu consumed for: '%s'\n",
nread, buflen, buf);
fail("not all consumed");
}
}
}
fail_if(!p.done, "end not detected");
fail_if(!p.req, "not request created");
if(t->input_remain != (in_len - in_consumed)) {
fprintf(stderr, "expected %zu input bytes to remain, but got %zu\n",
t->input_remain, in_len - in_consumed);
fail("unexpected input consumption");
}
if(p.req) {
check_eq(p.req->method, t->method, "method");
check_eq(p.req->scheme, t->scheme, "scheme");
check_eq(p.req->authority, t->authority, "authority");
check_eq(p.req->path, t->path, "path");
if(Curl_dynhds_count(&p.req->headers) != t->header_count) {
fprintf(stderr, "expected %zu headers but got %zu\n", t->header_count,
Curl_dynhds_count(&p.req->headers));
fail("unexpected req header count");
}
}
Curl_h1_req_parse_free(&p);
}
static const char *T1_INPUT[] = {
"GET /path HTTP/1.1\r\nHost: test.curl.se\r\n\r\n",
NULL,
};
static struct tcase TEST1a = {
T1_INPUT, NULL, "GET", NULL, NULL, "/path", 1, 0
};
static struct tcase TEST1b = {
T1_INPUT, "https", "GET", "https", NULL, "/path", 1, 0
};
static const char *T2_INPUT[] = {
"GET /path HTT",
"P/1.1\r\nHost: te",
"st.curl.se\r\n\r",
"\n12345678",
NULL,
};
static struct tcase TEST2 = {
T2_INPUT, NULL, "GET", NULL, NULL, "/path", 1, 8
};
static const char *T3_INPUT[] = {
"GET ftp://ftp.curl.se/xxx?a=2 HTTP/1.1\r\nContent-Length: 0\r",
"\nUser-Agent: xxx\r\n\r\n",
NULL,
};
static struct tcase TEST3a = {
T3_INPUT, NULL, "GET", "ftp", "ftp.curl.se", "/xxx?a=2", 2, 0
};
static const char *T4_INPUT[] = {
"CONNECT ftp.curl.se:123 HTTP/1.1\r\nContent-Length: 0\r\n",
"User-Agent: xxx\r\n",
"nothing: \r\n\r\n\n\n",
NULL,
};
static struct tcase TEST4a = {
T4_INPUT, NULL, "CONNECT", NULL, "ftp.curl.se:123", NULL, 3, 2
};
static const char *T5_INPUT[] = {
"OPTIONS * HTTP/1.1\r\nContent-Length: 0\r\nBlabla: xxx.yyy\r",
"\n\tzzzzzz\r\n\r\n",
"123",
NULL,
};
static struct tcase TEST5a = {
T5_INPUT, NULL, "OPTIONS", NULL, NULL, "*", 2, 3
};
static const char *T6_INPUT[] = {
"PUT /path HTTP/1.1\nHost: test.curl.se\n\n123",
NULL,
};
static struct tcase TEST6a = {
T6_INPUT, NULL, "PUT", NULL, NULL, "/path", 1, 3
};
UNITTEST_START
parse_success(&TEST1a);
parse_success(&TEST1b);
parse_success(&TEST2);
parse_success(&TEST3a);
parse_success(&TEST4a);
parse_success(&TEST5a);
parse_success(&TEST6a);
UNITTEST_STOP