diff options
author | CoprDistGit <infra@openeuler.org> | 2023-10-13 08:48:16 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2023-10-13 08:48:16 +0000 |
commit | 385fc7319d6d11da099be4071c13255361d2bbec (patch) | |
tree | 6f9cc55acc6399fc776646106d6c5ae0b6d7294b | |
parent | 09db049a40e03afdbf0c43696a42c838d1f9de51 (diff) |
automatic import of curlopeneuler22.03_LTS
-rw-r--r-- | .gitignore | 1 | ||||
-rw-r--r-- | backport-0101-curl-7.32.0-multilib.patch | 91 | ||||
-rw-r--r-- | backport-CVE-2023-32001.patch | 37 | ||||
-rw-r--r-- | backport-CVE-2023-38039.patch | 212 | ||||
-rw-r--r-- | backport-CVE-2023-38545.patch | 134 | ||||
-rw-r--r-- | backport-CVE-2023-38546.patch | 128 | ||||
-rw-r--r-- | backport-curl-7.84.0-test3026.patch | 71 | ||||
-rw-r--r-- | backport-curl-7.88.0-tests-warnings.patch | 30 | ||||
-rw-r--r-- | backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch | 112 | ||||
-rw-r--r-- | backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch | 41 | ||||
-rw-r--r-- | curl.spec | 441 | ||||
-rw-r--r-- | sources | 1 |
12 files changed, 1299 insertions, 0 deletions
@@ -0,0 +1 @@ +/curl-8.1.2.tar.xz diff --git a/backport-0101-curl-7.32.0-multilib.patch b/backport-0101-curl-7.32.0-multilib.patch new file mode 100644 index 0000000..b4f8e2a --- /dev/null +++ b/backport-0101-curl-7.32.0-multilib.patch @@ -0,0 +1,91 @@ +From 2a4754a3a7cf60ecc36d83cbe50b8c337cb87632 Mon Sep 17 00:00:00 2001 +From: Kamil Dudka <kdudka@redhat.com> +Date: Fri, 12 Apr 2013 12:04:05 +0200 +Subject: [PATCH] prevent multilib conflicts on the curl-config script + +--- + curl-config.in | 23 +++++------------------ + docs/curl-config.1 | 4 +++- + libcurl.pc.in | 1 + + 3 files changed, 9 insertions(+), 19 deletions(-) + +diff --git a/curl-config.in b/curl-config.in +index 150004d..95d0759 100644 +--- a/curl-config.in ++++ b/curl-config.in +@@ -78,7 +78,7 @@ while test $# -gt 0; do + ;; + + --cc) +- echo "@CC@" ++ echo "gcc" + ;; + + --prefix) +@@ -157,32 +157,19 @@ while test $# -gt 0; do + ;; + + --libs) +- if test "X@libdir@" != "X/usr/lib" -a "X@libdir@" != "X/usr/lib64"; then +- CURLLIBDIR="-L@libdir@ " +- else +- CURLLIBDIR="" +- fi +- if test "X@ENABLE_SHARED@" = "Xno"; then +- echo ${CURLLIBDIR}-lcurl @LIBCURL_LIBS@ +- else +- echo ${CURLLIBDIR}-lcurl +- fi ++ echo -lcurl + ;; + --ssl-backends) + echo "@SSL_BACKENDS@" + ;; + + --static-libs) +- if test "X@ENABLE_STATIC@" != "Xno" ; then +- echo "@libdir@/libcurl.@libext@" @LDFLAGS@ @LIBCURL_LIBS@ +- else +- echo "curl was built with static libraries disabled" >&2 +- exit 1 +- fi ++ echo "curl was built with static libraries disabled" >&2 ++ exit 1 + ;; + + --configure) +- echo @CONFIGURE_OPTIONS@ ++ pkg-config libcurl --variable=configure_options | sed 's/^"//;s/"$//' + ;; + + *) +diff --git a/docs/curl-config.1 b/docs/curl-config.1 +index 14a9d2b..ffcc004 100644 +--- a/docs/curl-config.1 ++++ b/docs/curl-config.1 +@@ -72,7 +72,9 @@ no, one or several names. If more than one name, they will appear + comma-separated. (Added in 7.58.0) + .IP "--static-libs" + Shows the complete set of libs and other linker options you will need in order +-to link your application with libcurl statically. (Added in 7.17.1) ++to link your application with libcurl statically. Note that Fedora/RHEL libcurl ++packages do not provide any static libraries, thus cannot be linked statically. ++(Added in 7.17.1) + .IP "--version" + Outputs version information about the installed libcurl. + .IP "--vernum" +diff --git a/libcurl.pc.in b/libcurl.pc.in +index 2ba9c39..f8f8b00 100644 +--- a/libcurl.pc.in ++++ b/libcurl.pc.in +@@ -31,6 +31,7 @@ libdir=@libdir@ + includedir=@includedir@ + supported_protocols="@SUPPORT_PROTOCOLS@" + supported_features="@SUPPORT_FEATURES@" ++configure_options=@CONFIGURE_OPTIONS@ + + Name: libcurl + URL: https://curl.se/ +-- +2.26.2 + diff --git a/backport-CVE-2023-32001.patch b/backport-CVE-2023-32001.patch new file mode 100644 index 0000000..8827596 --- /dev/null +++ b/backport-CVE-2023-32001.patch @@ -0,0 +1,37 @@ +From 0c667188e0c6cda615a036b8a2b4125f2c404dde Mon Sep 17 00:00:00 2001 +From: SaltyMilk <soufiane.elmelcaoui@gmail.com> +Date: Mon, 10 Jul 2023 21:43:28 +0200 +Subject: [PATCH] fopen: optimize + +Closes #11419 +--- + lib/fopen.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/lib/fopen.c b/lib/fopen.c +index c9c9e3d6e..b6e3caddd 100644 +--- a/lib/fopen.c ++++ b/lib/fopen.c +@@ -56,13 +56,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename, + int fd = -1; + *tempname = NULL; + +- if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) { +- /* a non-regular file, fallback to direct fopen() */ +- *fh = fopen(filename, FOPEN_WRITETEXT); +- if(*fh) +- return CURLE_OK; ++ *fh = fopen(filename, FOPEN_WRITETEXT); ++ if(!*fh) + goto fail; +- } ++ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode)) ++ return CURLE_OK; ++ fclose(*fh); ++ *fh = NULL; + + result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix)); + if(result) +-- +2.33.0 + diff --git a/backport-CVE-2023-38039.patch b/backport-CVE-2023-38039.patch new file mode 100644 index 0000000..03a879b --- /dev/null +++ b/backport-CVE-2023-38039.patch @@ -0,0 +1,212 @@ +From 3ee79c1674fd6f99e8efca52cd7510e08b766770 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg <daniel@haxx.se> +Date: Wed, 2 Aug 2023 23:34:48 +0200 +Subject: [PATCH] http: return error when receiving too large header set + +To avoid abuse. The limit is set to 300 KB for the accumulated size of +all received HTTP headers for a single response. Incomplete research +suggests that Chrome uses a 256-300 KB limit, while Firefox allows up to +1MB. + +Closes #11582 +--- + lib/c-hyper.c | 12 +++++++----- + lib/cf-h1-proxy.c | 4 +++- + lib/http.c | 34 ++++++++++++++++++++++++++++++---- + lib/http.h | 9 +++++++++ + lib/pingpong.c | 4 +++- + lib/urldata.h | 17 ++++++++--------- + 6 files changed, 60 insertions(+), 20 deletions(-) + +diff --git a/lib/c-hyper.c b/lib/c-hyper.c +index c29983c0b24a6..0b9d9ab478e67 100644 +--- a/lib/c-hyper.c ++++ b/lib/c-hyper.c +@@ -182,8 +182,11 @@ static int hyper_each_header(void *userdata, + } + } + +- data->info.header_size += (curl_off_t)len; +- data->req.headerbytecount += (curl_off_t)len; ++ result = Curl_bump_headersize(data, len, FALSE); ++ if(result) { ++ data->state.hresult = result; ++ return HYPER_ITER_BREAK; ++ } + return HYPER_ITER_CONTINUE; + } + +@@ -313,9 +316,8 @@ static CURLcode status_line(struct Curl_easy *data, + if(result) + return result; + } +- data->info.header_size += (curl_off_t)len; +- data->req.headerbytecount += (curl_off_t)len; +- return CURLE_OK; ++ result = Curl_bump_headersize(data, len, FALSE); ++ return result; + } + + /* +diff --git a/lib/cf-h1-proxy.c b/lib/cf-h1-proxy.c +index c9b157c9bccc7..b1d8cb618b7d1 100644 +--- a/lib/cf-h1-proxy.c ++++ b/lib/cf-h1-proxy.c +@@ -587,7 +587,9 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, + return result; + } + +- data->info.header_size += (long)perline; ++ result = Curl_bump_headersize(data, perline, TRUE); ++ if(result) ++ return result; + + /* Newlines are CRLF, so the CR is ignored as the line isn't + really terminated until the LF comes. Treat a following CR +diff --git a/lib/http.c b/lib/http.c +index f7c71afd7d847..bc78ff97435c4 100644 +--- a/lib/http.c ++++ b/lib/http.c +@@ -3920,6 +3920,29 @@ static CURLcode verify_header(struct Curl_easy *data) + return CURLE_OK; + } + ++CURLcode Curl_bump_headersize(struct Curl_easy *data, ++ size_t delta, ++ bool connect_only) ++{ ++ size_t bad = 0; ++ if(delta < MAX_HTTP_RESP_HEADER_SIZE) { ++ if(!connect_only) ++ data->req.headerbytecount += (unsigned int)delta; ++ data->info.header_size += (unsigned int)delta; ++ if(data->info.header_size > MAX_HTTP_RESP_HEADER_SIZE) ++ bad = data->info.header_size; ++ } ++ else ++ bad = data->info.header_size + delta; ++ if(bad) { ++ failf(data, "Too large response headers: %zu > %zu", ++ bad, MAX_HTTP_RESP_HEADER_SIZE); ++ return CURLE_RECV_ERROR; ++ } ++ return CURLE_OK; ++} ++ ++ + /* + * Read any HTTP header lines from the server and pass them to the client app. + */ +@@ -4173,8 +4196,9 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, + if(result) + return result; + +- data->info.header_size += (long)headerlen; +- data->req.headerbytecount += (long)headerlen; ++ result = Curl_bump_headersize(data, headerlen, FALSE); ++ if(result) ++ return result; + + /* + * When all the headers have been parsed, see if we should give +@@ -4496,8 +4520,10 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, + if(result) + return result; + +- data->info.header_size += Curl_dyn_len(&data->state.headerb); +- data->req.headerbytecount += Curl_dyn_len(&data->state.headerb); ++ result = Curl_bump_headersize(data, Curl_dyn_len(&data->state.headerb), ++ FALSE); ++ if(result) ++ return result; + + Curl_dyn_reset(&data->state.headerb); + } +diff --git a/lib/http.h b/lib/http.h +index df3b4e38b8a88..4aeabc345938c 100644 +--- a/lib/http.h ++++ b/lib/http.h +@@ -64,6 +64,10 @@ extern const struct Curl_handler Curl_handler_wss; + + struct dynhds; + ++CURLcode Curl_bump_headersize(struct Curl_easy *data, ++ size_t delta, ++ bool connect_only); ++ + /* Header specific functions */ + bool Curl_compareheader(const char *headerline, /* line to check */ + const char *header, /* header keyword _with_ colon */ +@@ -183,6 +187,11 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data); + #define EXPECT_100_THRESHOLD (1024*1024) + #endif + ++/* MAX_HTTP_RESP_HEADER_SIZE is the maximum size of all response headers ++ combined that libcurl allows for a single HTTP response, any HTTP ++ version. This count includes CONNECT response headers. */ ++#define MAX_HTTP_RESP_HEADER_SIZE (300*1024) ++ + #endif /* CURL_DISABLE_HTTP */ + + /**************************************************************************** +diff --git a/lib/pingpong.c b/lib/pingpong.c +index f3f7cb93cb9b7..523bbec189fe6 100644 +--- a/lib/pingpong.c ++++ b/lib/pingpong.c +@@ -341,7 +341,9 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data, + ssize_t clipamount = 0; + bool restart = FALSE; + +- data->req.headerbytecount += (long)gotbytes; ++ result = Curl_bump_headersize(data, gotbytes, FALSE); ++ if(result) ++ return result; + + pp->nread_resp += gotbytes; + for(i = 0; i < gotbytes; ptr++, i++) { +diff --git a/lib/urldata.h b/lib/urldata.h +index e5446b6840f63..d21aa415dc94b 100644 +--- a/lib/urldata.h ++++ b/lib/urldata.h +@@ -629,17 +629,16 @@ struct SingleRequest { + curl_off_t bytecount; /* total number of bytes read */ + curl_off_t writebytecount; /* number of bytes written */ + +- curl_off_t headerbytecount; /* only count received headers */ +- curl_off_t deductheadercount; /* this amount of bytes doesn't count when we +- check if anything has been transferred at +- the end of a connection. We use this +- counter to make only a 100 reply (without a +- following second response code) result in a +- CURLE_GOT_NOTHING error code */ +- + curl_off_t pendingheader; /* this many bytes left to send is actually + header and not body */ + struct curltime start; /* transfer started at this time */ ++ unsigned int headerbytecount; /* only count received headers */ ++ unsigned int deductheadercount; /* this amount of bytes doesn't count when ++ we check if anything has been transferred ++ at the end of a connection. We use this ++ counter to make only a 100 reply (without ++ a following second response code) result ++ in a CURLE_GOT_NOTHING error code */ + enum { + HEADER_NORMAL, /* no bad header at all */ + HEADER_PARTHEADER, /* part of the chunk is a bad header, the rest +@@ -1089,7 +1088,6 @@ struct PureInfo { + int httpversion; /* the http version number X.Y = X*10+Y */ + time_t filetime; /* If requested, this is might get set. Set to -1 if the + time was unretrievable. */ +- curl_off_t header_size; /* size of read header(s) in bytes */ + curl_off_t request_size; /* the amount of bytes sent in the request(s) */ + unsigned long proxyauthavail; /* what proxy auth types were announced */ + unsigned long httpauthavail; /* what host auth types were announced */ +@@ -1097,6 +1095,7 @@ struct PureInfo { + char *contenttype; /* the content type of the object */ + char *wouldredirect; /* URL this would've been redirected to if asked to */ + curl_off_t retry_after; /* info from Retry-After: header */ ++ unsigned int header_size; /* size of read header(s) in bytes */ + + /* PureInfo members 'conn_primary_ip', 'conn_primary_port', 'conn_local_ip' + and, 'conn_local_port' are copied over from the connectdata struct in + diff --git a/backport-CVE-2023-38545.patch b/backport-CVE-2023-38545.patch new file mode 100644 index 0000000..c15c273 --- /dev/null +++ b/backport-CVE-2023-38545.patch @@ -0,0 +1,134 @@ +From 92fd36dd54de9ac845549944692eb33c5aee7343 Mon Sep 17 00:00:00 2001 +From: Jay Satiro <raysatiro@yahoo.com> +Date: Mon, 9 Oct 2023 17:15:44 -0400 +Subject: [PATCH] socks: return error if hostname too long for remote resolve + +Prior to this change the state machine attempted to change the remote +resolve to a local resolve if the hostname was longer than 255 +characters. Unfortunately that did not work as intended and caused a +security issue. + +This patch applies to curl versions 7.87.0 - 8.1.2. Other versions +that are affected take a different patch. Refer to the CVE advisory +for more information. + +Bug: https://curl.se/docs/CVE-2023-38545.html +--- + lib/socks.c | 8 +++---- + tests/data/Makefile.inc | 2 +- + tests/data/test728 | 64 +++++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 69 insertions(+), 5 deletions(-) + create mode 100644 tests/data/test728 + +diff --git a/lib/socks.c b/lib/socks.c +index d491e08..e7da5b4 100644 +--- a/lib/socks.c ++++ b/lib/socks.c +@@ -539,9 +539,9 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, + + /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet */ + if(!socks5_resolve_local && hostname_len > 255) { +- infof(data, "SOCKS5: server resolving disabled for hostnames of " +- "length > 255 [actual len=%zu]", hostname_len); +- socks5_resolve_local = TRUE; ++ failf(data, "SOCKS5: the destination hostname is too long to be " ++ "resolved remotely by the proxy."); ++ return CURLPX_LONG_HOSTNAME; + } + + if(auth & ~(CURLAUTH_BASIC | CURLAUTH_GSSAPI)) +@@ -882,7 +882,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, + } + else { + socksreq[len++] = 3; +- socksreq[len++] = (char) hostname_len; /* one byte address length */ ++ socksreq[len++] = (unsigned char) hostname_len; /* one byte length */ + memcpy(&socksreq[len], sx->hostname, hostname_len); /* w/o NULL */ + len += hostname_len; + } +diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc +index 3e0221a..64b11de 100644 +--- a/tests/data/Makefile.inc ++++ b/tests/data/Makefile.inc +@@ -99,7 +99,7 @@ test679 test680 test681 test682 test683 test684 test685 \ + \ + test700 test701 test702 test703 test704 test705 test706 test707 test708 \ + test709 test710 test711 test712 test713 test714 test715 test716 test717 \ +-test718 test719 test720 test721 \ ++test718 test719 test720 test721 test728 \ + \ + test800 test801 test802 test803 test804 test805 test806 test807 test808 \ + test809 test810 test811 test812 test813 test814 test815 test816 test817 \ +diff --git a/tests/data/test728 b/tests/data/test728 +new file mode 100644 +index 0000000..05bcf28 +--- /dev/null ++++ b/tests/data/test728 +@@ -0,0 +1,64 @@ ++<testcase> ++<info> ++<keywords> ++HTTP ++HTTP GET ++SOCKS5 ++SOCKS5h ++followlocation ++</keywords> ++</info> ++ ++# ++# Server-side ++<reply> ++# The hostname in this redirect is 256 characters and too long (> 255) for ++# SOCKS5 remote resolve. curl must return error CURLE_PROXY in this case. ++<data> ++HTTP/1.1 301 Moved Permanently ++Location: http://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/ ++Content-Length: 0 ++Connection: close ++ ++</data> ++</reply> ++ ++# ++# Client-side ++<client> ++<features> ++proxy ++</features> ++<server> ++http ++socks5 ++</server> ++ <name> ++SOCKS5h with HTTP redirect to hostname too long ++ </name> ++ <command> ++--no-progress-meter --location --proxy socks5h://%HOSTIP:%SOCKSPORT http://%HOSTIP:%HTTPPORT/%TESTNUMBER ++</command> ++</client> ++ ++# ++# Verify data after the test has been "shot" ++<verify> ++<protocol crlf="yes"> ++GET /%TESTNUMBER HTTP/1.1 ++Host: %HOSTIP:%HTTPPORT ++User-Agent: curl/%VERSION ++Accept: */* ++ ++</protocol> ++<errorcode> ++97 ++</errorcode> ++# the error message is verified because error code CURLE_PROXY (97) may be ++# returned for any number of reasons and we need to make sure it is ++# specifically for the reason below so that we know the check is working. ++<stderr mode="text"> ++curl: (97) SOCKS5: the destination hostname is too long to be resolved remotely by the proxy. ++</stderr> ++</verify> ++</testcase> +-- +2.7.4 + diff --git a/backport-CVE-2023-38546.patch b/backport-CVE-2023-38546.patch new file mode 100644 index 0000000..8159462 --- /dev/null +++ b/backport-CVE-2023-38546.patch @@ -0,0 +1,128 @@ +From 61275672b46d9abb3285740467b882e22ed75da8 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg <daniel@haxx.se> +Date: Thu, 14 Sep 2023 23:28:32 +0200 +Subject: [PATCH] cookie: remove unnecessary struct fields + +Plus: reduce the hash table size from 256 to 63. It seems unlikely to +make much of a speed difference for most use cases but saves 1.5KB of +data per instance. + +Closes #11862 +--- + lib/cookie.c | 13 +------------ + lib/cookie.h | 13 ++++--------- + lib/easy.c | 4 +--- + 3 files changed, 6 insertions(+), 24 deletions(-) + +diff --git a/lib/cookie.c b/lib/cookie.c +index 4345a84c6fd9d2..e39c89a94a960d 100644 +--- a/lib/cookie.c ++++ b/lib/cookie.c +@@ -119,7 +119,6 @@ static void freecookie(struct Cookie *co) + free(co->name); + free(co->value); + free(co->maxage); +- free(co->version); + free(co); + } + +@@ -718,11 +717,7 @@ Curl_cookie_add(struct Curl_easy *data, + } + } + else if((nlen == 7) && strncasecompare("version", namep, 7)) { +- strstore(&co->version, valuep, vlen); +- if(!co->version) { +- badcookie = TRUE; +- break; +- } ++ /* just ignore */ + } + else if((nlen == 7) && strncasecompare("max-age", namep, 7)) { + /* +@@ -1160,7 +1155,6 @@ Curl_cookie_add(struct Curl_easy *data, + free(clist->path); + free(clist->spath); + free(clist->expirestr); +- free(clist->version); + free(clist->maxage); + + *clist = *co; /* then store all the new data */ +@@ -1224,9 +1218,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, + c = calloc(1, sizeof(struct CookieInfo)); + if(!c) + return NULL; /* failed to get memory */ +- c->filename = strdup(file?file:"none"); /* copy the name just in case */ +- if(!c->filename) +- goto fail; /* failed to get memory */ + /* + * Initialize the next_expiration time to signal that we don't have enough + * information yet. +@@ -1378,7 +1369,6 @@ static struct Cookie *dup_cookie(struct Cookie *src) + CLONE(name); + CLONE(value); + CLONE(maxage); +- CLONE(version); + d->expires = src->expires; + d->tailmatch = src->tailmatch; + d->secure = src->secure; +@@ -1595,7 +1585,6 @@ void Curl_cookie_cleanup(struct CookieInfo *c) + { + if(c) { + unsigned int i; +- free(c->filename); + for(i = 0; i < COOKIE_HASH_SIZE; i++) + Curl_cookie_freelist(c->cookies[i]); + free(c); /* free the base struct as well */ +diff --git a/lib/cookie.h b/lib/cookie.h +index b3c0063b2cfb25..41e9e7a6914e0a 100644 +--- a/lib/cookie.h ++++ b/lib/cookie.h +@@ -36,11 +36,7 @@ struct Cookie { + char *domain; /* domain = <this> */ + curl_off_t expires; /* expires = <this> */ + char *expirestr; /* the plain text version */ +- +- /* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */ +- char *version; /* Version = <value> */ + char *maxage; /* Max-Age = <value> */ +- + bool tailmatch; /* whether we do tail-matching of the domain name */ + bool secure; /* whether the 'secure' keyword was used */ + bool livecookie; /* updated from a server, not a stored file */ +@@ -56,17 +52,16 @@ struct Cookie { + #define COOKIE_PREFIX__SECURE (1<<0) + #define COOKIE_PREFIX__HOST (1<<1) + +-#define COOKIE_HASH_SIZE 256 ++#define COOKIE_HASH_SIZE 63 + + struct CookieInfo { + /* linked list of cookies we know of */ + struct Cookie *cookies[COOKIE_HASH_SIZE]; +- char *filename; /* file we read from/write to */ +- long numcookies; /* number of cookies in the "jar" */ ++ curl_off_t next_expiration; /* the next time at which expiration happens */ ++ int numcookies; /* number of cookies in the "jar" */ ++ int lastct; /* last creation-time used in the jar */ + bool running; /* state info, for cookie adding information */ + bool newsession; /* new session, discard session cookies on load */ +- int lastct; /* last creation-time used in the jar */ +- curl_off_t next_expiration; /* the next time at which expiration happens */ + }; + + /* The maximum sizes we accept for cookies. RFC 6265 section 6.1 says +diff --git a/lib/easy.c b/lib/easy.c +index 16bbd35251d408..03195481f9780a 100644 +--- a/lib/easy.c ++++ b/lib/easy.c +@@ -925,9 +925,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data) + if(data->cookies) { + /* If cookies are enabled in the parent handle, we enable them + in the clone as well! */ +- outcurl->cookies = Curl_cookie_init(data, +- data->cookies->filename, +- outcurl->cookies, ++ outcurl->cookies = Curl_cookie_init(data, NULL, outcurl->cookies, + data->set.cookiesession); + if(!outcurl->cookies) + goto fail; diff --git a/backport-curl-7.84.0-test3026.patch b/backport-curl-7.84.0-test3026.patch new file mode 100644 index 0000000..1098583 --- /dev/null +++ b/backport-curl-7.84.0-test3026.patch @@ -0,0 +1,71 @@ +From 279b990727a1fd3e2828fbbd80581777e4200b67 Mon Sep 17 00:00:00 2001 +From: Kamil Dudka <kdudka@redhat.com> +Date: Mon, 27 Jun 2022 16:50:57 +0200 +Subject: [PATCH] test3026: disable valgrind + +It fails on x86_64 with: +``` + Use --max-threads=INT to specify a larger number of threads + and rerun valgrind + valgrind: the 'impossible' happened: + Max number of threads is too low + host stacktrace: + ==174357== at 0x58042F5A: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x58043087: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x580432EF: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x58043310: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x58099E77: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x580E67E9: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x5809D59D: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x5809901A: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x5809B0B6: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + ==174357== by 0x580E4050: ??? (in /usr/libexec/valgrind/memcheck-amd64-linux) + sched status: + running_tid=1 + Thread 1: status = VgTs_Runnable syscall 56 (lwpid 174357) + ==174357== at 0x4A07816: clone (in /usr/lib64/libc.so.6) + ==174357== by 0x4A08720: __clone_internal (in /usr/lib64/libc.so.6) + ==174357== by 0x4987ACF: create_thread (in /usr/lib64/libc.so.6) + ==174357== by 0x49885F6: pthread_create@@GLIBC_2.34 (in /usr/lib64/libc.so.6) + ==174357== by 0x1093B5: test.part.0 (lib3026.c:64) + ==174357== by 0x492454F: (below main) (in /usr/lib64/libc.so.6) + client stack range: [0x1FFEFFC000 0x1FFF000FFF] client SP: 0x1FFEFFC998 + valgrind stack range: [0x1002BAA000 0x1002CA9FFF] top usage: 11728 of 1048576 +[...] +``` +--- + tests/data/test3026 | 3 +++ + tests/libtest/lib3026.c | 4 ++-- + 2 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/tests/data/test3026 b/tests/data/test3026 +index fb80cc8..01f2ba5 100644 +--- a/tests/data/test3026 ++++ b/tests/data/test3026 +@@ -41,5 +41,8 @@ none + <errorcode> + 0 + </errorcode> ++<valgrind> ++disable ++</valgrind> + </verify> + </testcase> +diff --git a/tests/libtest/lib3026.c b/tests/libtest/lib3026.c +index 43fe335..70cd7a4 100644 +--- a/tests/libtest/lib3026.c ++++ b/tests/libtest/lib3026.c +@@ -147,8 +147,8 @@ int test(char *URL) + results[i] = CURL_LAST; /* initialize with invalid value */ + res = pthread_create(&tids[i], NULL, run_thread, &results[i]); + if(res) { +- fprintf(stderr, "%s:%d Couldn't create thread, errno %d\n", +- __FILE__, __LINE__, res); ++ fprintf(stderr, "%s:%d Couldn't create thread, i=%u, errno %d\n", ++ __FILE__, __LINE__, i, res); + tid_count = i; + test_failure = -1; + goto cleanup; +-- +2.37.1 + diff --git a/backport-curl-7.88.0-tests-warnings.patch b/backport-curl-7.88.0-tests-warnings.patch new file mode 100644 index 0000000..04b2ba2 --- /dev/null +++ b/backport-curl-7.88.0-tests-warnings.patch @@ -0,0 +1,30 @@ +From d506d885aa16b4a87acbac082eea41dccdc7b69f Mon Sep 17 00:00:00 2001 +From: Kamil Dudka <kdudka@redhat.com> +Date: Wed, 15 Feb 2023 10:42:38 +0100 +Subject: [PATCH] Revert "runtests: consider warnings fatal and error on them" + +While it might be useful for upstream developers, it is not so useful +for downstream consumers. + +This reverts upstream commit 22f795c834cfdbacbb1b55426028a581e3cf67a8. +--- + tests/runtests.pl | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/tests/runtests.pl b/tests/runtests.pl +index 71644ad18..0cf85c3fe 100755 +--- a/tests/runtests.pl ++++ b/tests/runtests.pl +@@ -55,8 +55,7 @@ + # given, this won't be a problem. + + use strict; +-# Promote all warnings to fatal +-use warnings FATAL => 'all'; ++use warnings; + use 5.006; + + # These should be the only variables that might be needed to get edited: +-- +2.39.1 + diff --git a/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch b/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch new file mode 100644 index 0000000..129e9ce --- /dev/null +++ b/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch @@ -0,0 +1,112 @@ +From 49e244318672c688097c1bf601a110005cd9a6a8 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg <daniel@haxx.se> +Date: Mon, 31 Jul 2023 10:07:35 +0200 +Subject: [PATCH] urlapi: make sure zoneid is also duplicated in curl_url_dup + +Add several curl_url_dup() tests to the general lib1560 test. + +Reported-by: Rutger Broekhoff +Bug: https://curl.se/mail/lib-2023-07/0047.html +Closes #11549 + +Conflict: tests/libtest/lib1560.c for context adapt +Reference: https://github.com/curl/curl/commit/49e244318672c688097c1bf601a110005cd9a6a8 +--- + lib/urlapi.c | 1 + + tests/libtest/lib1560.c | 67 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 68 insertions(+) + +diff --git a/lib/urlapi.c b/lib/urlapi.c +index cd423c335d88f..b1a126d548213 100644 +--- a/lib/urlapi.c ++++ b/lib/urlapi.c +@@ -1385,6 +1385,7 @@ CURLU *curl_url_dup(const CURLU *in) + DUP(u, in, path); + DUP(u, in, query); + DUP(u, in, fragment); ++ DUP(u, in, zoneid); + u->portnum = in->portnum; + } + return u; +diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c +index 0eca0fda72d0b..ff03bec9391a4 100644 +--- a/tests/libtest/lib1560.c ++++ b/tests/libtest/lib1560.c +@@ -1672,10 +1672,77 @@ static int huge(void) + return error; + } + ++static int urldup(void) ++{ ++ const char *url[] = { ++ "http://" ++ "user:pwd@" ++ "[2a04:4e42:e00::347%25eth0]" ++ ":80" ++ "/path" ++ "?query" ++ "#fraggie", ++ "https://example.com", ++ "https://user@example.com", ++ "https://user.pwd@example.com", ++ "https://user.pwd@example.com:1234", ++ "https://example.com:1234", ++ "example.com:1234", ++ "https://user.pwd@example.com:1234/path?query#frag", ++ NULL ++ }; ++ CURLU *copy = NULL; ++ char *h_str = NULL, *copy_str = NULL; ++ CURLU *h = curl_url(); ++ int i; ++ ++ if(!h) ++ goto err; ++ ++ for(i = 0; url[i]; i++) { ++ CURLUcode rc = curl_url_set(h, CURLUPART_URL, url[i], ++ CURLU_GUESS_SCHEME); ++ if(rc) ++ goto err; ++ copy = curl_url_dup(h); ++ ++ rc = curl_url_get(h, CURLUPART_URL, &h_str, 0); ++ if(rc) ++ goto err; ++ ++ rc = curl_url_get(copy, CURLUPART_URL, ©_str, 0); ++ if(rc) ++ goto err; ++ ++ if(strcmp(h_str, copy_str)) { ++ printf("Original: %s\nParsed: %s\nCopy: %s\n", ++ url[i], h_str, copy_str); ++ goto err; ++ } ++ curl_free(copy_str); ++ curl_free(h_str); ++ curl_url_cleanup(copy); ++ copy_str = NULL; ++ h_str = NULL; ++ copy = NULL; ++ } ++ curl_url_cleanup(h); ++ return 0; ++err: ++ curl_free(copy_str); ++ curl_free(h_str); ++ curl_url_cleanup(copy); ++ curl_url_cleanup(h); ++ return 1; ++} ++ + int test(char *URL) + { + (void)URL; /* not used */ + ++ if(urldup()) ++ return 11; ++ + if(get_url()) + return 3; + diff --git a/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch b/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch new file mode 100644 index 0000000..bf475bc --- /dev/null +++ b/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch @@ -0,0 +1,41 @@ +From a4a5e438ae533c9af5e97457ae424c9189545105 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg <daniel@haxx.se> +Date: Mon, 12 Jun 2023 14:10:37 +0200 +Subject: [PATCH] vtls: avoid memory leak if sha256 call fails + +... in the pinned public key handling function. + +Reported-by: lizhuang0630 on github +Fixes #11306 +Closes #11307 + +Conflict: NA +Reference: https://github.com/curl/curl/commit/a4a5e438ae533c9af5e97457ae424c9189545105 +--- + lib/vtls/vtls.c | 12 +++++------- + 1 file changed, 5 insertions(+), 7 deletions(-) + +diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c +index a4ff7d61a6193..cdd3a4fdc1c14 100644 +--- a/lib/vtls/vtls.c ++++ b/lib/vtls/vtls.c +@@ -907,14 +907,12 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data, + if(!sha256sumdigest) + return CURLE_OUT_OF_MEMORY; + encode = Curl_ssl->sha256sum(pubkey, pubkeylen, +- sha256sumdigest, CURL_SHA256_DIGEST_LENGTH); ++ sha256sumdigest, CURL_SHA256_DIGEST_LENGTH); + +- if(encode != CURLE_OK) +- return encode; +- +- encode = Curl_base64_encode((char *)sha256sumdigest, +- CURL_SHA256_DIGEST_LENGTH, &encoded, +- &encodedlen); ++ if(!encode) ++ encode = Curl_base64_encode((char *)sha256sumdigest, ++ CURL_SHA256_DIGEST_LENGTH, &encoded, ++ &encodedlen); + Curl_safefree(sha256sumdigest); + + if(encode) diff --git a/curl.spec b/curl.spec new file mode 100644 index 0000000..a69f433 --- /dev/null +++ b/curl.spec @@ -0,0 +1,441 @@ +#Global macro or variable +%global libpsl_version %(pkg-config --modversion libpsl 2>/dev/null || echo 0) +%global libssh_version %(pkg-config --modversion libssh 2>/dev/null || echo 0) +%global openssl_version %({ pkg-config --modversion openssl 2>/dev/null || echo 0;} | sed 's|-|-0.|') +%global _configure ../configure + +Name: curl +Version: 8.1.2 +Release: 5 +Summary: Curl is used in command lines or scripts to transfer data +License: curl +URL: https://curl.se/ +Source: https://curl.se/download/curl-%{version}.tar.xz + +Patch1: backport-0101-curl-7.32.0-multilib.patch +Patch2: backport-curl-7.84.0-test3026.patch +Patch4: backport-curl-7.88.0-tests-warnings.patch +Patch5: backport-CVE-2023-32001.patch +Patch6: backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch +Patch7: backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch +Patch8: backport-CVE-2023-38039.patch +Patch9: backport-CVE-2023-38545.patch +Patch10: backport-CVE-2023-38546.patch + +BuildRequires: automake brotli-devel coreutils gcc groff krb5-devel +BuildRequires: libidn2-devel libnghttp2-devel libpsl-devel +BuildRequires: libssh-devel make openldap-devel openssh-clients openssh-server +BuildRequires: openssl-devel perl-interpreter pkgconfig python3-devel sed +BuildRequires: zlib-devel gnutls-utils nghttp2 perl(IO::Compress::Gzip) +BuildRequires: perl(Getopt::Long) perl(Pod::Usage) perl(strict) perl(warnings) +BuildRequires: perl(Cwd) perl(Digest::MD5) perl(Exporter) perl(File::Basename) +BuildRequires: perl(File::Copy) perl(File::Spec) perl(IPC::Open2) perl(MIME::Base64) +BuildRequires: perl(Time::Local) perl(Time::HiRes) perl(vars) perl(Digest::SHA) + +%ifnarch aarch64 +BuildRequires: stunnel +%endif + +Requires: libcurl = %{version}-%{release} +Provides: curl-full = %{version}-%{release} webclient + +%description +cURL is a computer software project providing a library (libcurl) and +command-line tool (curl) for transferring data using various protocols. + +%package -n libcurl +Summary: A library for getting files from web servers +Requires: libssh >= %{libssh_version} libpsl >= %{libpsl_version} +Requires: openssl-libs >= 1:%{openssl_version} +Provides: libcurl-full = %{version}-%{release} +Conflicts: curl < 7.66.0-3 + +%description -n libcurl +A library for getting files from web servers. + +%package -n libcurl-devel +Summary: Header files for libcurl +Requires: libcurl = %{version}-%{release} +Provides: curl-devel = %{version}-%{release} +Obsoletes: curl-devel < %{version}-%{release} + +%description -n libcurl-devel +Header files for libcurl. + +%package_help + +%prep +%autosetup -n %{name}-%{version} -p1 + +echo "1801" >> tests/data/DISABLED + +# adapt test 323 for updated OpenSSL +sed -e 's/^35$/35,52/' -i tests/data/test323 +# use localhost6 instead of ip6-localhost in the curl test-suite +( + # avoid glob expansion in the trace output of `bash -x` + { set +x; } 2>/dev/null + cmd="sed -e 's|ip6-localhost|localhost6|' -i tests/data/test[0-9]*" + printf "+ %s\n" "$cmd" >&2 + eval "$cmd" +) + +%build +# regenerate Makefile.in files +aclocal -I m4 +automake + +install -d build-full +export common_configure_opts="--cache-file=../config.cache \ + --enable-hsts --enable-ipv6 --enable-symbol-hiding --enable-threaded-resolver \ + --without-zstd --with-gssapi --with-libidn2 --with-nghttp2 --with-ssl \ + --with-ca-bundle=%{_sysconfdir}/pki/tls/certs/ca-bundle.crt" + +%global _configure ../configure + +# configure full build +( + cd build-full + %configure $common_configure_opts \ + --enable-dict \ + --enable-gopher \ + --enable-imap \ + --enable-ldap \ + --enable-ldaps \ + --enable-manual \ + --enable-mqtt \ + --enable-ntlm \ + --enable-ntlm-wb \ + --enable-pop3 \ + --enable-rtsp \ + --enable-smb \ + --enable-smtp \ + --enable-telnet \ + --enable-tftp \ + --enable-tls-srp \ + --with-brotli \ + --with-libpsl \ + --with-libssh +) + +sed -e 's/^runpath_var=.*/runpath_var=/' \ + -e 's/^hardcode_libdir_flag_spec=".*"$/hardcode_libdir_flag_spec=""/' \ + -i build-full/libtool + +%make_build V=1 -C build-full + +%check +# compile upstream test-cases +%make_build V=1 -C build-full/tests + +# relax crypto policy for the test-suite to make it pass again (#1610888) +export OPENSSL_SYSTEM_CIPHERS_OVERRIDE=XXX +export OPENSSL_CONF= + +# make runtests.pl work for out-of-tree builds +export srcdir=../../tests + +# prevent valgrind from being extremely slow (#1662656) +unset DEBUGINFOD_URLS + +# run the upstream test-suite for curl-full +for size in full; do ( + cd build-${size} + + # we have to override LD_LIBRARY_PATH because we eliminated rpath + export LD_LIBRARY_PATH="${PWD}/lib/.libs" + + cd tests + perl -I../../tests ../../tests/runtests.pl -a -n -p -v '!flaky' +) +done + +%install +rm -f ${RPM_BUILD_ROOT}%{_libdir}/libcurl.{la,so} + +# install libcurl.m4 for devel +install -D -m 644 docs/libcurl/libcurl.m4 $RPM_BUILD_ROOT%{_datadir}/aclocal/libcurl.m4 + +# curl file install +cd build-full +%make_install + +# install zsh completion for curl +LD_LIBRARY_PATH="$RPM_BUILD_ROOT%{_libdir}:$LD_LIBRARY_PATH" %make_install -C scripts + +# do not install /usr/share/fish/completions/curl.fish which is also installed +# by fish-3.0.2-1.module_f31+3716+57207597 and would trigger a conflict +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/fish + +rm -f ${RPM_BUILD_ROOT}%{_libdir}/libcurl.a +rm -rf ${RPM_BUILD_ROOT}%{_libdir}/libcurl.la + +%ldconfig_scriptlets + +%ldconfig_scriptlets -n libcurl + +%files +%defattr(-,root,root) +%license COPYING +%{_bindir}/curl +%{_datadir}/zsh + +%files -n libcurl +%defattr(-,root,root) +%{_libdir}/libcurl.so.4 +%{_libdir}/libcurl.so.4.[0-9].[0-9] + +%files -n libcurl-devel +%defattr(-,root,root) +%doc docs/examples/*.c docs/examples/Makefile.example docs/INTERNALS.md +%doc docs/CONTRIBUTE.md docs/libcurl/ABI.md +%{_bindir}/curl-config* +%{_includedir}/curl +%{_libdir}/*.so +%{_libdir}/pkgconfig/*.pc +%{_datadir}/aclocal/libcurl.m4 + +%files help +%defattr(-,root,root) +%doc CHANGES README* +%doc docs/BUGS.md docs/FAQ docs/FEATURES.md +%doc docs/TheArtOfHttpScripting.md docs/TODO +%{_mandir}/man1/curl.1* +%{_mandir}/man1/curl-config.1* +%{_mandir}/man3/* + +%changelog +* Thu Oct 12 2023 zhouyihang <zhouyihang3@h-partners.com> - 8.1.2-5 +- Type:CVE +- CVE:CVE-2023-38545 CVE-2023-38546 +- SUG:NA +- DESC:fix CVE-2023-38545 CVE-2023-38546 + +* Thu Sep 14 2023 gaihuiying <eaglegai@163.com> - 8.1.2-4 +- Type:CVE +- CVE:CVE-2023-38039 +- SUG:NA +- DESC:fix CVE-2023-38039 + +* Wed Sep 06 2023 yanglu <yanglu72@h-partners.com> - 8.1.2-3 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:vtls:avoid memory leak if sha256 call fails + urlapi:make sure zoneid is also duplicated in curl_url_dup + +* Thu Jul 20 2023 zhouyihang <zhouyihang3@h-partners.com> - 8.1.2-2 +- Type:CVE +- CVE:CVE-2023-32001 +- SUG:NA +- DESC:fix CVE-2023-32001 + +* Sat Jul 15 2023 gaihuiying <eaglegai@163.com> - 8.1.2-1 +- Type:requirement +- CVE:NA +- SUG:NA +- DESC:update to curl 8.1.2 + +* Sat Jun 10 2023 zhouyihang <zhouyihang3@h-partners.com> - 7.88.1-4 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:disable valgrind in tests + +* Thu Jun 08 2023 xingwei <xingwei14@h-partners.com> - 7.88.1-3 +- Type:CVE +- CVE:CVE-2023-28320,CVE-2023-28321,CVE-2023-28322 +- SUG:NA +- DESC:fix CVE-2023-28320,CVE-2023-28321,CVE-2023-28322 + +* Wed Mar 22 2023 zengwefeng <zwfeng@huawei.com> - 7.88.1-2 +- Type:cves +- ID:CVE-2023-27533 CVE-2023-27534 CVE-2023-27535 CVE-2023-27536 CVE-2023-27537 CVE-2023-27538 +- SUG:NA +- DESC:fix CVE-2023-27533 CVE-2023-27534 CVE-2023-27535 CVE-2023-27536 CVE-2023-27537 CVE-2023-27538 + + +* Thu Mar 02 2023 xinghe <xinghe2@h-partners.com> - 7.88.1-1 +- Type:requirements +- ID:NA +- SUG:NA +- DESC:upgrade to 7.88.1 + +* Sat Feb 18 2023 xinghe <xinghe2@h-partners.com> - 7.86.0-3 +- Type:cves +- ID:CVE-2023-23914 CVE-2023-23915 CVE-2023-23916 +- SUG:NA +- DESC:fix CVE-2023-23914 CVE-2023-23915 CVE-2023-23916 + +* Thu Dec 22 2022 zhouyihang <zhouyihang3@h-partners.com> - 7.86.0-2 +- Type:cves +- ID:CVE-2022-43551 CVE-2022-43552 +- SUG:NA +- DESC:fix CVE-2022-43551 CVE-2022-43552 + +* Wed Nov 16 2022 xinghe <xinghe2@h-partners.com> - 7.86.0-1 +- Type:requirements +- ID:NA +- SUG:NA +- DESC:upgrade to 7.86.0 + +* Thu Oct 27 2022 yanglu <yanglu72@h-partners.com> - 7.79.1-12 +- Type:cves +- CVE:CVE-2022-32221 CVE-2022-42915 CVE-2022-42916 +- SUG:NA +- DESC:fix CVE-2022-32221 CVE-2022-42915 CVE-2022-42916 + +* Tue Oct 11 2022 huangduirong <huangduirong@huawei.com> - 7.79.1-11 +- Type:bugfix +- ID:NA +- SUG:NA +- DESC:Move autoreconf to build + +* Thu Sep 01 2022 zhouyihang <zhouyihang@h-partners.com> - 7.79.1-10 +- Type:cves +- CVE:CVE-2022-35252 +- SUG:NA +- DESC:fix CVE-2022-35252 + +* Thu Jul 28 2022 gaihuiying <eaglegai@163.com> - 7.79.1-9 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:just rebuild release to 7.79.1-9 + +* Mon Jul 25 2022 gaihuiying <eaglegai@163.com> - 7.79.1-8 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:fix build error when add --disable-http-auth configure option + +* Tue Jul 05 2022 gaihuiying <eaglegai@163.com> - 7.79.1-7 +- Type:cves +- CVE:CVE-2022-32207 +- SUG:NA +- DESC:fix CVE-2022-32207 better + +* Wed Jun 29 2022 gaihuiying <eaglegai@163.com> - 7.79.1-6 +- Type:cves +- CVE:CVE-2022-32205 CVE-2022-32206 CVE-2022-32207 CVE-2022-32208 +- SUG:NA +- DESC:fix CVE-2022-32205 CVE-2022-32206 CVE-2022-32207 CVE-2022-32208 + +* Tue May 17 2022 gaihuiying <eaglegai@163.com> - 7.79.1-5 +- Type:cves +- CVE:CVE-2022-27781 CVE-2022-27782 +- SUG:NA +- DESC:fix CVE-2022-27781 CVE-2022-27782 + +* Sat May 14 2022 gaoxingwang <gaoxingwang1@huawei.com> - 7.79.1-4 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:fix dict and neg telnet server start fail in upstream testcase + +* Fri May 06 2022 gaihuiying <eaglegai@163.com> - 7.79.1-3 +- Type:cves +- CVE:CVE-2022-22576 CVE-2022-27774 CVE-2022-27775 CVE-2022-27776 +- SUG:NA +- DESC:fix CVE-2022-22576 CVE-2022-27774 CVE-2022-27775 CVE-2022-27776 + +* Mon Apr 25 2022 gaoxingwang <gaoxingwang1@huawei.com> - 7.79.1-2 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:enable check in spec + +* Thu Jan 20 2022 gaoxingwang <gaoxingwang@huawei.com> - 7.79.1-1 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:update curl to 7.79.1 +* Wed Sep 29 2021 yanglu <yanglu72@huawei.com> - 7.77.0-3 +- Type:CVE +- CVE:CVE-2021-22945 CVE-2021-22946 CVE-2021-22947 +- SUG:NA +- DESC:fix CVE-2021-22945 CVE-2021-22946CVE-2021-22947 + +* Fri Aug 13 2021 gaihuiying <gaihuiying1@huawei.com> - 7.77.0-2 +- Type:CVE +- CVE:CVE-2021-22925 CVE-2021-22926 +- SUG:NA +- DESC:fix CVE-2021-22925 CVE-2021-22926 + +* Thu Jul 8 2021 gaihuiying <gaihuiying1@huawei.com> - 7.77.0-1 +- Type:requirement +- CVE:NA +- SUG:NA +- DESC:update curl to 7.77.0 + +* Tue Jun 8 2021 gaihuiying <gaihuiying1@huawei.com> - 7.71.1-9 +- Type:CVE +- CVE:CVE-2021-22897 CVE-2021-22898 +- SUG:NA +- DESC:fix CVE-2021-22897 CVE-2021-22898 + +* Tue Apr 20 2021 gaihuiying <gaihuiying1@huawei.com> - 7.71.1-8 +- Type:CVE +- CVE:CVE-2021-22890 +- SUG:NA +- DESC:fix CVE-2021-22890 + +* Thu Apr 8 2021 xieliuhua <xieliuhua@huawei.com> - 7.71.1-7 +- Type:CVE +- CVE:CVE-2021-22876 +- SUG:NA +- DESC:fix CVE-2021-22876 + +* Tue Jan 26 2021 wangxiaopeng <wangxiaopeng7@huawei.com> - 7.71.1-6 +- Type:CVE +- CVE:CVE-2020-8285 +- SUG:NA +- DESC:fix CVE-2020-8285 + +* Tue Jan 19 2021 xielh2000 <xielh2000@163.com> - 7.71.1-5 +- Type:CVE +- CVE:CVE-2020-8286 +- SUG:NA +- DESC:fix CVE-2020-8286 + +* Mon Jan 18 2021 xihaochen <xihaochen@huawei.com> - 7.71.1-4 +- Type:CVE +- CVE:CVE-2020-8284 +- SUG:NA +- DESC:fix CVE-2020-8284 + +* Tue Jan 5 2021 gaihuiying <gaihuiying1@huawei.com> - 7.71.1-3 +- Type:bugfix +- ID:NA +- SUG:NA +- DESC:fix downgrade error + +* Mon Dec 28 2020 liuxin <liuxin264@huawei.com> - 7.71.1-2 +- Type:cves +- ID:CVE-2020-8231 +- SUG:NA +- DESC:fix CVE-2020-8231 + +* Fri Jul 24 2020 zhujunhao <zhujunhao8@huawei.com> - 7.71.1-1 +- Update to 7.71.1 + +* Thu Apr 9 2020 songnannan <songnannan2@huawei.com> - 7.66.0-3 +- split out the libcurl and libcurl-devel package + +* Tue Mar 17 2020 chenzhen <chenzhen44@huawei.com> - 7.66.0-2 +- Type:cves +- ID:CVE-2019-15601 +- SUG:NA +- DESC:fix CVE-2019-15601 + +* Sat Jan 11 2020 openEuler Buildteam <buildteam@openeuler.org> - 7.66.0-1 +- update to 7.66.0 + +* Sat Dec 21 2019 openEuler Buildteam <buildteam@openeuler.org> - 7.61.1-4 +- Type:cves +- ID:CVE-2019-5481 CVE-2019-5482 +- SUG:NA +- DESC:fix CVE-2019-5481 CVE-2019-5482 + +* Wed Sep 18 2019 guanyanjie <guanyanjie@huawei.com> - 7.61.1-3 +- Init for openEuler @@ -0,0 +1 @@ +1f7f6678b1342ad78f30e1dedd015fe2 curl-8.1.2.tar.xz |