!24 Upgrade to version 2.7.18 and fix CVE-2019-9674 in LTS

Merge pull request !24 from LeoFang/openEuler-20.03-LTS
This commit is contained in:
openeuler-ci-bot 2020-08-07 09:14:56 +08:00 committed by Gitee
commit f92569ff64
25 changed files with 156 additions and 1279 deletions

View File

@ -1,13 +0,0 @@
--- a/Lib/test/test_gdb.py.old 2012-04-11 21:04:01.367073855 -0400
+++ b/Lib/test/test_gdb.py 2012-04-12 08:52:58.320288761 -0400
@@ -211,6 +211,10 @@
# ignore all warnings
'warning: ',
)
+ ignore_patterns += ('warning: Unable to open',
+ 'Missing separate debuginfo for',
+ 'Try: yum --disablerepo=',
+ 'Undefined set print command')
for line in errlines:
if not line:
continue

View File

@ -1,49 +0,0 @@
diff -up Python-2.7.3/Lib/test/test_os.py.uid-gid-overflows Python-2.7.3/Lib/test/test_os.py
--- Python-2.7.3/Lib/test/test_os.py.uid-gid-overflows 2012-04-09 19:07:32.000000000 -0400
+++ Python-2.7.3/Lib/test/test_os.py 2012-06-26 14:51:36.000817929 -0400
@@ -677,30 +677,36 @@ if sys.platform != 'win32':
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
+ self.assertRaises(TypeError, os.setuid, 'not an int')
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
+ self.assertRaises(TypeError, os.setgid, 'not an int')
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
+ self.assertRaises(TypeError, os.seteuid, 'not an int')
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
+ self.assertRaises(TypeError, os.setegid, 'not an int')
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
+ self.assertRaises(TypeError, os.setreuid, 'not an int', 0)
+ self.assertRaises(TypeError, os.setreuid, 0, 'not an int')
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@@ -715,6 +721,8 @@ if sys.platform != 'win32':
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
+ self.assertRaises(TypeError, os.setregid, 'not an int', 0)
+ self.assertRaises(TypeError, os.setregid, 0, 'not an int')
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)

View File

@ -1,12 +0,0 @@
diff -up Python-2.6.6/Lib/distutils/sysconfig.py.distutils-cflags Python-2.6.6/Lib/distutils/sysconfig.py
--- Python-2.6.6/Lib/distutils/sysconfig.py.distutils-cflags 2011-08-12 17:18:17.833091153 -0400
+++ Python-2.6.6/Lib/distutils/sysconfig.py 2011-08-12 17:18:27.449106938 -0400
@@ -187,7 +187,7 @@ def customize_compiler(compiler):
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
- cflags = opt + ' ' + os.environ['CFLAGS']
+ cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']

View File

@ -18,10 +18,10 @@ index 5021ebf..1903cc0 100644
__all__ = ["version", "bootstrap"]
-_SETUPTOOLS_VERSION = "40.6.2"
-_SETUPTOOLS_VERSION = "41.2.0"
+_WHEEL_DIR = "/usr/share/python-wheels/"
-_PIP_VERSION = "18.1"
-_PIP_VERSION = "19.2.3"
+def _get_most_recent_wheel_version(pkg):
+ prefix = os.path.join(_WHEEL_DIR, "{}-".format(pkg))
+ suffix = "-py2.py3-none-any.whl"

View File

@ -1,25 +0,0 @@
From 69b4a17f342146d6b7a73975a37678db9916aa75 Mon Sep 17 00:00:00 2001
From: "Gao, Xiang" <qasdfgtyuiop@gmail.com>
Date: Thu, 28 Feb 2019 08:18:48 -0500
Subject: [PATCH 342/362] bpo-36126: Fix ref count leakage in structseq_repr.
(GH-12035)
---
Objects/structseq.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/Objects/structseq.c b/Objects/structseq.c
index 3e45840..aee9528 100644
--- a/Objects/structseq.c
+++ b/Objects/structseq.c
@@ -266,6 +266,7 @@ structseq_repr(PyStructSequence *obj)
val = PyTuple_GetItem(tup, i);
if (cname == NULL || val == NULL) {
+ Py_DECREF(tup);
return NULL;
}
repr = PyObject_Repr(val);
--
1.8.3.1

View File

@ -1,34 +0,0 @@
From 710dcfd2f4bee034894a39026388f9c21ea976f1 Mon Sep 17 00:00:00 2001
From: stratakis <cstratak@redhat.com>
Date: Mon, 4 Mar 2019 16:40:25 +0100
Subject: [PATCH 349/362] [2.7] bpo-13096: Fix memory leak in ctypes POINTER
handling of large values (GH-12100)
---
Misc/NEWS.d/next/Library/2019-03-04-16-13-01.bpo-13096.SGPt_n.rst | 1 +
Modules/_ctypes/callproc.c | 1 +
2 files changed, 2 insertions(+)
create mode 100644 Misc/NEWS.d/next/Library/2019-03-04-16-13-01.bpo-13096.SGPt_n.rst
diff --git a/Misc/NEWS.d/next/Library/2019-03-04-16-13-01.bpo-13096.SGPt_n.rst b/Misc/NEWS.d/next/Library/2019-03-04-16-13-01.bpo-13096.SGPt_n.rst
new file mode 100644
index 0000000..2bf49c8
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-03-04-16-13-01.bpo-13096.SGPt_n.rst
@@ -0,0 +1 @@
+Fix memory leak in ctypes POINTER handling of large values.
diff --git a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
index 2097342..defcde1 100644
--- a/Modules/_ctypes/callproc.c
+++ b/Modules/_ctypes/callproc.c
@@ -1831,6 +1831,7 @@ POINTER(PyObject *self, PyObject *cls)
"s(O){}",
buf,
&PyCPointer_Type);
+ PyMem_Free(buf);
if (result == NULL)
return result;
key = PyLong_FromVoidPtr(result);
--
1.8.3.1

View File

@ -1,75 +0,0 @@
From 84b5ac9ba6fd71ba9d0ef98e2a166a35189b263f Mon Sep 17 00:00:00 2001
From: Christian Heimes <christian@python.org>
Date: Mon, 4 Mar 2019 18:10:45 +0100
Subject: [PATCH 350/362] [2.7] bpo-36179: Fix ref leaks in _hashopenssl
(GH-12158) (GH-12166)
Fix two unlikely reference leaks in _hashopenssl. The leaks only occur in
out-of-memory cases. Thanks to Charalampos Stratakis.
Signed-off-by: Christian Heimes <christian@python.org>
https://bugs.python.org/issue36179.
(cherry picked from commit b7bc283ab6a23ee98784400ebffe7fe410232a2e)
Co-authored-by: Christian Heimes <christian@python.org>
https://bugs.python.org/issue36179
---
.../next/Library/2019-03-04-10-42-46.bpo-36179.jEyuI-.rst | 2 ++
Modules/_hashopenssl.c | 14 ++++++++------
2 files changed, 10 insertions(+), 6 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2019-03-04-10-42-46.bpo-36179.jEyuI-.rst
diff --git a/Misc/NEWS.d/next/Library/2019-03-04-10-42-46.bpo-36179.jEyuI-.rst b/Misc/NEWS.d/next/Library/2019-03-04-10-42-46.bpo-36179.jEyuI-.rst
new file mode 100644
index 0000000..61a9877
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-03-04-10-42-46.bpo-36179.jEyuI-.rst
@@ -0,0 +1,2 @@
+Fix two unlikely reference leaks in _hashopenssl. The leaks only occur in
+out-of-memory cases.
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index de69f6f..78445eb 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -133,12 +133,6 @@ newEVPobject(PyObject *name)
if (retval == NULL)
return NULL;
- retval->ctx = EVP_MD_CTX_new();
- if (retval->ctx == NULL) {
- PyErr_NoMemory();
- return NULL;
- }
-
/* save the name for .name to return */
Py_INCREF(name);
retval->name = name;
@@ -146,6 +140,13 @@ newEVPobject(PyObject *name)
retval->lock = NULL;
#endif
+ retval->ctx = EVP_MD_CTX_new();
+ if (retval->ctx == NULL) {
+ Py_DECREF(retval);
+ PyErr_NoMemory();
+ return NULL;
+ }
+
return retval;
}
@@ -205,6 +206,7 @@ EVP_copy(EVPobject *self, PyObject *unused)
return NULL;
if (!locked_EVP_MD_CTX_copy(newobj->ctx, self)) {
+ Py_DECREF(newobj);
return _setException(PyExc_ValueError);
}
return (PyObject *)newobj;
--
1.8.3.1

View File

@ -1,49 +0,0 @@
From d9bf7f4198871132714cfe7d702baaa02206e9f1 Mon Sep 17 00:00:00 2001
From: "T. Wouters" <thomas@python.org>
Date: Mon, 4 Mar 2019 10:52:07 -0800
Subject: [PATCH 351/362] [2.7] bpo-36149 Fix potential use of uninitialized
memory in cPickle (#12105)
Fix off-by-one bug in cPickle that caused it to use uninitialised memory on truncated pickles read from FILE*s.
---
.../2019-02-28-13-52-18.bpo-36149.GJdnh4.rst | 2 ++
Modules/cPickle.c | 13 ++++++++-----
2 files changed, 10 insertions(+), 5 deletions(-)
create mode 100644 Misc/NEWS.d/next/Core and Builtins/2019-02-28-13-52-18.bpo-36149.GJdnh4.rst
diff --git a/Misc/NEWS.d/next/Core and Builtins/2019-02-28-13-52-18.bpo-36149.GJdnh4.rst b/Misc/NEWS.d/next/Core and Builtins/2019-02-28-13-52-18.bpo-36149.GJdnh4.rst
new file mode 100644
index 0000000..672db6c
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2019-02-28-13-52-18.bpo-36149.GJdnh4.rst
@@ -0,0 +1,2 @@
+Fix use of uninitialized memory in cPickle when reading a truncated pickle
+from a file object.
diff --git a/Modules/cPickle.c b/Modules/cPickle.c
index 914ebb3..f7c6fec 100644
--- a/Modules/cPickle.c
+++ b/Modules/cPickle.c
@@ -586,12 +586,15 @@ readline_file(Unpicklerobject *self, char **s)
while (1) {
Py_ssize_t bigger;
char *newbuf;
- for (; i < (self->buf_size - 1); i++) {
- if (feof(self->fp) ||
- (self->buf[i] = getc(self->fp)) == '\n') {
- self->buf[i + 1] = '\0';
+ while (i < (self->buf_size - 1)) {
+ int newchar = getc(self->fp);
+ if (newchar != EOF) {
+ self->buf[i++] = newchar;
+ }
+ if (newchar == EOF || newchar == '\n') {
+ self->buf[i] = '\0';
*s = self->buf;
- return i + 1;
+ return i;
}
}
if (self->buf_size > (PY_SSIZE_T_MAX >> 1)) {
--
1.8.3.1

View File

@ -1,41 +0,0 @@
From b2aefd77e1da438aed649d018d6aa504ec35eac8 Mon Sep 17 00:00:00 2001
From: stratakis <cstratak@redhat.com>
Date: Wed, 6 Mar 2019 15:11:56 +0100
Subject: [PATCH 353/362] [2.7] bpo-36186: Fix
linuxaudiodev.linux_audio_device() error handling (GH-12163)
Fix linuxaudiodev.linux_audio_device() error handling:
close the internal file descriptor if it fails to open the device.
---
Misc/NEWS.d/next/Library/2019-03-04-16-39-16.bpo-36186.Hqw1A_.rst | 1 +
Modules/linuxaudiodev.c | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 Misc/NEWS.d/next/Library/2019-03-04-16-39-16.bpo-36186.Hqw1A_.rst
diff --git a/Misc/NEWS.d/next/Library/2019-03-04-16-39-16.bpo-36186.Hqw1A_.rst b/Misc/NEWS.d/next/Library/2019-03-04-16-39-16.bpo-36186.Hqw1A_.rst
new file mode 100644
index 0000000..a14d155
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-03-04-16-39-16.bpo-36186.Hqw1A_.rst
@@ -0,0 +1 @@
+Fix linuxaudiodev.linux_audio_device() error handling: close the internal file descriptor if it fails to open the device.
diff --git a/Modules/linuxaudiodev.c b/Modules/linuxaudiodev.c
index 7fe20ae..f5135d9 100644
--- a/Modules/linuxaudiodev.c
+++ b/Modules/linuxaudiodev.c
@@ -126,10 +126,12 @@ newladobject(PyObject *arg)
}
if (imode == O_WRONLY && ioctl(fd, SNDCTL_DSP_NONBLOCK, NULL) == -1) {
PyErr_SetFromErrnoWithFilename(LinuxAudioError, basedev);
+ close(fd);
return NULL;
}
if (ioctl(fd, SNDCTL_DSP_GETFMTS, &afmts) == -1) {
PyErr_SetFromErrnoWithFilename(LinuxAudioError, basedev);
+ close(fd);
return NULL;
}
/* Create and initialize the object */
--
1.8.3.1

View File

@ -1,54 +0,0 @@
From 098b139816f379271b8d4de2561b5805dd47d229 Mon Sep 17 00:00:00 2001
From: stratakis <cstratak@redhat.com>
Date: Wed, 6 Mar 2019 15:14:06 +0100
Subject: [PATCH 354/362] bpo-36147: Fix a memory leak in ctypes s_get()
(GH-12102)
The s_get() function leaks the result variable on low memory.
Partially backport commit 19b52545df898ec911c44e29f75badb902924c0
to fix it.
---
Modules/_ctypes/cfield.c | 26 +++++++++-----------------
1 file changed, 9 insertions(+), 17 deletions(-)
diff --git a/Modules/_ctypes/cfield.c b/Modules/_ctypes/cfield.c
index 46f041b..1b495fc 100644
--- a/Modules/_ctypes/cfield.c
+++ b/Modules/_ctypes/cfield.c
@@ -1291,24 +1291,16 @@ U_set(void *ptr, PyObject *value, Py_ssize_t length)
static PyObject *
s_get(void *ptr, Py_ssize_t size)
{
- PyObject *result;
- size_t slen;
+ Py_ssize_t i;
+ char *p;
- result = PyString_FromString((char *)ptr);
- if (!result)
- return NULL;
- /* chop off at the first NUL character, if any.
- * On error, result will be deallocated and set to NULL.
- */
- slen = strlen(PyString_AS_STRING(result));
- size = min(size, (Py_ssize_t)slen);
- if (result->ob_refcnt == 1) {
- /* shorten the result */
- _PyString_Resize(&result, size);
- return result;
- } else
- /* cannot shorten the result */
- return PyString_FromStringAndSize(ptr, size);
+ p = (char *)ptr;
+ for (i = 0; i < size; ++i) {
+ if (*p++ == '\0')
+ break;
+ }
+
+ return PyBytes_FromStringAndSize((char *)ptr, (Py_ssize_t)i);
}
static PyObject *
--
1.8.3.1

View File

@ -1,30 +0,0 @@
From b19943ec97b80db97dd93ed714615f757cc12ad3 Mon Sep 17 00:00:00 2001
From: "Miss Islington (bot)"
<31488909+miss-islington@users.noreply.github.com>
Date: Thu, 7 Mar 2019 10:49:15 -0800
Subject: [PATCH 357/362] bpo-36140: Fix an incorrect check in
msidb_getsummaryinformation() (GH-12074)
(cherry picked from commit bf94cc7b496a379e1f604aa2e4080bb70ca4020e)
Co-authored-by: Zackery Spytz <zspytz@gmail.com>
---
PC/_msi.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/PC/_msi.c b/PC/_msi.c
index 4000f00..3c46d83 100644
--- a/PC/_msi.c
+++ b/PC/_msi.c
@@ -894,7 +894,7 @@ msidb_getsummaryinformation(msiobj *db, PyObject *args)
return msierror(status);
oresult = PyObject_NEW(struct msiobj, &summary_Type);
- if (!result) {
+ if (!oresult) {
MsiCloseHandle(result);
return NULL;
}
--
1.8.3.1

View File

@ -1,27 +0,0 @@
From 498468d9c3f53d9cfdd79cf1dc83251316d6d3df Mon Sep 17 00:00:00 2001
From: Terry Jan Reedy <tjreedy@udel.edu>
Date: Thu, 7 Mar 2019 22:16:07 -0500
Subject: [PATCH 358/362] [2.7] IDLE: Fix typo in keybindingDialog.py (GH-2322)
(GH-12231)
Cherry picked by hand from a0e911b
---
Lib/idlelib/keybindingDialog.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Lib/idlelib/keybindingDialog.py b/Lib/idlelib/keybindingDialog.py
index 755f1af..9713c79 100644
--- a/Lib/idlelib/keybindingDialog.py
+++ b/Lib/idlelib/keybindingDialog.py
@@ -182,7 +182,7 @@ class GetKeysDialog(Toplevel):
def LoadFinalKeyList(self):
#these tuples are also available for use in validity checks
- self.functionKeys=('F1','F2','F2','F4','F5','F6','F7','F8','F9',
+ self.functionKeys=('F1','F2','F3','F4','F5','F6','F7','F8','F9',
'F10','F11','F12')
self.alphanumKeys=tuple(string.ascii_lowercase+string.digits)
self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
--
1.8.3.1

View File

@ -1,101 +0,0 @@
diff -uNrp a/Lib/cookielib.py b/Lib/cookielib.py
--- a/Lib/cookielib.py 2019-12-21 16:06:12.476000000 +0800
+++ b/Lib/cookielib.py 2019-12-21 16:09:31.556000000 +0800
@@ -1139,6 +1139,12 @@ class DefaultCookiePolicy(CookiePolicy):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+
+
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
@@ -1151,7 +1157,7 @@ class DefaultCookiePolicy(CookiePolicy):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
- if cookie.version == 0 and not ("."+erhn).endswith(domain):
+ if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
@@ -1165,7 +1171,11 @@ class DefaultCookiePolicy(CookiePolicy):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
- if not (req_host.endswith(domain) or erhn.endswith(domain)):
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+ if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
diff -uNrp a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
--- a/Lib/test/test_cookielib.py 2019-12-21 16:06:12.640000000 +0800
+++ b/Lib/test/test_cookielib.py 2019-12-21 16:11:53.888000000 +0800
@@ -368,6 +368,7 @@ class CookieTests(TestCase):
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
+ ("http://foo.bar.com/", "bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
@@ -378,6 +379,8 @@ class CookieTests(TestCase):
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
+ ("http://barfoo.com", ".foo.com", False),
+ ("http://barfoo.com", "foo.com", False),
]:
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
@@ -938,6 +941,34 @@ class CookieTests(TestCase):
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
+ c.clear()
+
+ pol.set_blocked_domains([])
+ req = Request("http://acme.com/")
+ res = FakeResponse(headers, "http://acme.com/")
+ cookies = c.make_cookies(res, req)
+ c.extract_cookies(res, req)
+ self.assertEqual(len(c), 1)
+
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertTrue(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(pol.return_ok(cookies[0], req))
+ self.assertFalse(req.has_header("Cookie"))
+
+ p = pol.set_blocked_domains(["acme.com"])
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
+
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
diff -uNrp a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
--- a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst 1970-01-01 08:00:00.000000000 +0800
+++ b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst 2019-12-21 16:12:17.416000000 +0800
@@ -0,0 +1,4 @@
+Don't send cookies of domain A without Domain attribute to domain B when
+domain A is a suffix match of domain B while using a cookiejar with
+:class:`cookielib.DefaultCookiePolicy` policy. Patch by Karthikeyan
+Singaravelan.

View File

@ -1,45 +0,0 @@
diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
--- a/Lib/test/test_urlparse.py 2019-12-21 15:41:32.172000000 +0800
+++ b/Lib/test/test_urlparse.py 2019-12-21 15:44:28.316000000 +0800
@@ -641,6 +641,12 @@ class UrlParseTestCase(unittest.TestCase
self.assertIn(u'\u2100', denorm_chars)
self.assertIn(u'\uFF03', denorm_chars)
+ # bpo-36742: Verify port separators are ignored when they
+ # existed prior to decomposition
+ urlparse.urlsplit(u'http://\u30d5\u309a:80')
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
+
for scheme in [u"http", u"https", u"ftp"]:
for c in denorm_chars:
url = u"{}://netloc{}false.netloc/path".format(scheme, c)
diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
--- a/Lib/urlparse.py 2019-12-21 15:41:32.080000000 +0800
+++ b/Lib/urlparse.py 2019-12-21 15:46:11.480000000 +0800
@@ -171,13 +171,17 @@ def _checknetloc(netloc):
# looking for characters like \u2100 that expand to 'a/c'
# IDNA uses NFKC equivalence, so normalize for this check
import unicodedata
- netloc2 = unicodedata.normalize('NFKC', netloc)
- if netloc == netloc2:
+ n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
+ n = n.replace(':', '') # ignore characters already included
+ n = n.replace('#', '') # but not the surrounding text
+ n = n.replace('?', '')
+ netloc2 = unicodedata.normalize('NFKC', n)
+ if n == netloc2:
return
_, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
for c in '/?#@:':
if c in netloc2:
- raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+ raise ValueError("netloc '" + netloc + "' contains invalid " +
"characters under NFKC normalization")
def urlsplit(url, scheme='', allow_fragments=True):
diff -uNrp a/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst b/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst
--- a/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst 1970-01-01 08:00:00.000000000 +0800
+++ b/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst 2019-12-21 15:53:31.188000000 +0800
@@ -0,0 +1 @@
+Fixes mishandling of pre-normalization characters in urlsplit().

View File

@ -1,50 +0,0 @@
diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
--- a/Lib/test/test_urlparse.py 2019-12-21 15:54:46.576000000 +0800
+++ b/Lib/test/test_urlparse.py 2019-12-21 15:56:22.440000000 +0800
@@ -648,11 +648,13 @@ class UrlParseTestCase(unittest.TestCase
urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
for scheme in [u"http", u"https", u"ftp"]:
- for c in denorm_chars:
- url = u"{}://netloc{}false.netloc/path".format(scheme, c)
- print "Checking %r" % url
- with self.assertRaises(ValueError):
- urlparse.urlsplit(url)
+ for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
+ for c in denorm_chars:
+ url = u"{}://{}/path".format(scheme, netloc.format(c))
+ if test_support.verbose:
+ print "Checking %r" % url
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
--- a/Lib/urlparse.py 2019-12-21 15:54:46.344000000 +0800
+++ b/Lib/urlparse.py 2019-12-21 15:57:41.260000000 +0800
@@ -171,18 +171,18 @@ def _checknetloc(netloc):
# looking for characters like \u2100 that expand to 'a/c'
# IDNA uses NFKC equivalence, so normalize for this check
import unicodedata
- n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
- n = n.replace(':', '') # ignore characters already included
- n = n.replace('#', '') # but not the surrounding text
- n = n.replace('?', '')
+ n = netloc.replace(u'@', u'') # ignore characters already included
+ n = n.replace(u':', u'') # but not the surrounding text
+ n = n.replace(u'#', u'')
+ n = n.replace(u'?', u'')
netloc2 = unicodedata.normalize('NFKC', n)
if n == netloc2:
return
_, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
for c in '/?#@:':
if c in netloc2:
- raise ValueError("netloc '" + netloc + "' contains invalid " +
- "characters under NFKC normalization")
+ raise ValueError(u"netloc '" + netloc + u"' contains invalid " +
+ u"characters under NFKC normalization")
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:

View File

@ -1,41 +0,0 @@
diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
--- a/Lib/test/test_urlparse.py 2019-12-21 15:58:00.556000000 +0800
+++ b/Lib/test/test_urlparse.py 2019-12-21 15:59:11.456000000 +0800
@@ -656,6 +656,15 @@ class UrlParseTestCase(unittest.TestCase
with self.assertRaises(ValueError):
urlparse.urlsplit(url)
+ # check error message: invalid netloc must be formated with repr()
+ # to get an ASCII error message
+ with self.assertRaises(ValueError) as cm:
+ urlparse.urlsplit(u'http://example.com\uFF03@bing.com')
+ self.assertEqual(str(cm.exception),
+ "netloc u'example.com\\uff03@bing.com' contains invalid characters "
+ "under NFKC normalization")
+ self.assertIsInstance(cm.exception.args[0], str)
+
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
--- a/Lib/urlparse.py 2019-12-21 15:58:00.480000000 +0800
+++ b/Lib/urlparse.py 2019-12-21 15:59:55.128000000 +0800
@@ -181,8 +181,9 @@ def _checknetloc(netloc):
_, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
for c in '/?#@:':
if c in netloc2:
- raise ValueError(u"netloc '" + netloc + u"' contains invalid " +
- u"characters under NFKC normalization")
+ raise ValueError("netloc %r contains invalid characters "
+ "under NFKC normalization"
+ % netloc)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
diff -uNrp a/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst b/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst
--- a/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst 1970-01-01 08:00:00.000000000 +0800
+++ b/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst 2019-12-21 16:00:40.480000000 +0800
@@ -0,0 +1,3 @@
+:func:`urlparse.urlsplit` error message for invalid ``netloc`` according to
+NFKC normalization is now a :class:`str` string, rather than a
+:class:`unicode` string, to prevent error when displaying the error.

View File

@ -1,57 +0,0 @@
diff -uNrp a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
--- a/Lib/email/_parseaddr.py 2019-12-21 16:01:21.340000000 +0800
+++ b/Lib/email/_parseaddr.py 2019-12-21 16:03:22.108000000 +0800
@@ -336,7 +336,12 @@ class AddrlistClass:
aslist.append('@')
self.pos += 1
self.gotonext()
- return EMPTYSTRING.join(aslist) + self.getdomain()
+ domain = self.getdomain()
+ if not domain:
+ # Invalid domain, return an empty address instead of returning a
+ # local part to denote failed parsing.
+ return EMPTYSTRING
+ return EMPTYSTRING.join(aslist) + domain
def getdomain(self):
"""Get the complete domain name from an address."""
@@ -351,6 +356,10 @@ class AddrlistClass:
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
+ elif self.field[self.pos] == '@':
+ # bpo-34155: Don't parse domains with two `@` like
+ # `a@malicious.org@important.com`.
+ return EMPTYSTRING
elif self.field[self.pos] in self.atomends:
break
else:
diff -uNrp a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
--- a/Lib/email/test/test_email.py 2019-12-21 16:01:21.344000000 +0800
+++ b/Lib/email/test/test_email.py 2019-12-21 16:04:40.564000000 +0800
@@ -2306,6 +2306,20 @@ class TestMiscellaneous(TestEmailBase):
self.assertEqual(Utils.parseaddr('<>'), ('', ''))
self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+ def test_parseaddr_multiple_domains(self):
+ self.assertEqual(
+ Utils.parseaddr('a@b@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@b.c@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@172.17.0.1@c'),
+ ('', '')
+ )
+
def test_noquote_dump(self):
self.assertEqual(
Utils.formataddr(('A Silly Person', 'person@dom.ain')),
diff -uNrp a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
--- a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst 1970-01-01 08:00:00.000000000 +0800
+++ b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst 2019-12-21 16:05:05.292000000 +0800
@@ -0,0 +1 @@
+Fix parsing of invalid email addresses with more than one ``@`` (e.g. a@b@c.com.) to not return the part before 2nd ``@`` as valid email address. Patch by maxking & jpic.

View File

@ -1,74 +0,0 @@
diff -uNrp a/Lib/DocXMLRPCServer.py b/Lib/DocXMLRPCServer.py
--- a/Lib/DocXMLRPCServer.py 2019-12-21 16:13:25.240000000 +0800
+++ b/Lib/DocXMLRPCServer.py 2019-12-21 16:15:24.076000000 +0800
@@ -20,6 +20,15 @@ from SimpleXMLRPCServer import (SimpleXM
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
+def _html_escape_quote(s):
+ s = s.replace("&", "&amp;") # Must be done first!
+ s = s.replace("<", "&lt;")
+ s = s.replace(">", "&gt;")
+ s = s.replace('"', "&quot;")
+ s = s.replace('\'', "&#x27;")
+ return s
+
+
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
@@ -210,7 +219,9 @@ class XMLRPCDocGenerator:
methods
)
- return documenter.page(self.server_title, documentation)
+ title = _html_escape_quote(self.server_title)
+ return documenter.page(title, documentation)
+
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
diff -uNrp a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
--- a/Lib/test/test_docxmlrpc.py 2019-12-21 16:13:25.340000000 +0800
+++ b/Lib/test/test_docxmlrpc.py 2019-12-21 16:16:49.828000000 +0800
@@ -1,5 +1,6 @@
from DocXMLRPCServer import DocXMLRPCServer
import httplib
+import re
import sys
from test import test_support
threading = test_support.import_module('threading')
@@ -176,6 +177,26 @@ class DocXMLRPCHTTPGETServer(unittest.Te
self.assertIn("""Try&nbsp;self.<strong>add</strong>,&nbsp;too.""",
response.read())
+ def test_server_title_escape(self):
+ """Test that the server title and documentation
+ are escaped for HTML.
+ """
+ self.serv.set_server_title('test_title<script>')
+ self.serv.set_server_documentation('test_documentation<script>')
+ self.assertEqual('test_title<script>', self.serv.server_title)
+ self.assertEqual('test_documentation<script>',
+ self.serv.server_documentation)
+
+ generated = self.serv.generate_html_documentation()
+ title = re.search(r'<title>(.+?)</title>', generated).group()
+ documentation = re.search(r'<p><tt>(.+?)</tt></p>', generated).group()
+ self.assertEqual('<title>Python: test_title&lt;script&gt;</title>',
+ title)
+ self.assertEqual('<p><tt>test_documentation&lt;script&gt;</tt></p>',
+ documentation)
+
+
+
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
diff -uNrp a/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst b/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst
--- a/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst 1970-01-01 08:00:00.000000000 +0800
+++ b/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst 2019-12-21 16:17:13.368000000 +0800
@@ -0,0 +1,3 @@
+Escape the server title of :class:`DocXMLRPCServer.DocXMLRPCServer`
+when rendering the document page as HTML.
+(Contributed by Dong-hee Na in :issue:`38243`.)

View File

@ -1,128 +0,0 @@
diff -uNrp a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst
--- a/Doc/library/urlparse.rst 2019-12-21 15:21:09.836000000 +0800
+++ b/Doc/library/urlparse.rst 2019-12-21 15:34:37.812000000 +0800
@@ -119,12 +119,22 @@ The :mod:`urlparse` module defines the f
See section :ref:`urlparse-result-object` for more information on the result
object.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, or is not a Unicode string, no error will be
+ raised
+
.. versionchanged:: 2.5
Added attributes to return value.
.. versionchanged:: 2.7
Added IPv6 URL parsing capabilities.
+ .. versionchanged:: 2.7.17
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields]]])
@@ -232,11 +242,21 @@ The :mod:`urlparse` module defines the f
See section :ref:`urlparse-result-object` for more information on the result
object.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, or is not a Unicode string, no error will be
+ raised.
+
.. versionadded:: 2.2
.. versionchanged:: 2.5
Added attributes to return value.
+ .. versionchanged:: 2.7.17
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: urlunsplit(parts)
diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
--- a/Lib/test/test_urlparse.py 2019-12-21 15:21:09.456000000 +0800
+++ b/Lib/test/test_urlparse.py 2019-12-21 15:37:03.840000000 +0800
@@ -1,4 +1,6 @@
from test import test_support
+import sys
+import unicodedata
import unittest
import urlparse
@@ -623,6 +625,28 @@ class UrlParseTestCase(unittest.TestCase
self.assertEqual(urlparse.urlparse("https:"),('https','','','','',''))
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
+
+ def test_urlsplit_normalization(self):
+ # Certain characters should never occur in the netloc,
+ # including under normalization.
+ # Ensure that ALL of them are detected and cause an error
+ illegal_chars = u'/:#?@'
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
+ denorm_chars = [
+ c for c in map(unichr, range(128, sys.maxunicode))
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
+ and c not in illegal_chars
+ ]
+ # Sanity check that we found at least one such character
+ self.assertIn(u'\u2100', denorm_chars)
+ self.assertIn(u'\uFF03', denorm_chars)
+
+ for scheme in [u"http", u"https", u"ftp"]:
+ for c in denorm_chars:
+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
+ print "Checking %r" % url
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
--- a/Lib/urlparse.py 2019-12-21 15:21:09.304000000 +0800
+++ b/Lib/urlparse.py 2019-12-21 15:39:24.324000000 +0800
@@ -165,6 +165,21 @@ def _splitnetloc(url, start=0):
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
+def _checknetloc(netloc):
+ if not netloc or not isinstance(netloc, unicode):
+ return
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+ netloc2 = unicodedata.normalize('NFKC', netloc)
+ if netloc == netloc2:
+ return
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+ "characters under NFKC normalization")
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
@@ -193,6 +208,7 @@ def urlsplit(url, scheme='', allow_fragm
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
@@ -216,6 +232,7 @@ def urlsplit(url, scheme='', allow_fragm
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v

92
CVE-2019-9674.patch Normal file
View File

@ -0,0 +1,92 @@
From 2591fad87cefdf217fd77bf7bb9d485ced15fd8c Mon Sep 17 00:00:00 2001
From: JunWei Song <sungboss2004@gmail.com>
Date: Wed, 11 Sep 2019 23:04:12 +0800
Subject: [PATCH] bpo-36260: Add pitfalls to zipfile module documentation
(#13378)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* bpo-36260: Add pitfalls to zipfile module documentation
We saw vulnerability warning description (including zip bomb) in Doc/library/xml.rst file.
This gave us the idea of documentation improvement.
So, we moved a little bit forward :P
And the doc patch can be found (pr).
* fix trailing whitespace
* 📜🤖 Added by blurb_it.
* Reformat text for consistency.
(cherry picked from commit 3ba51d587f6897a45301ce9126300c14fcd4eba2)
Signed-off-by: Yufa Fang <fangyufa1@huawei.com>
---
Doc/library/zipfile.rst | 40 +++++++++++++++++++
.../2019-06-04-09-29-00.bpo-36260.WrGuc-.rst | 1 +
2 files changed, 41 insertions(+)
create mode 100644 Misc/NEWS.d/next/Documentation/2019-06-04-09-29-00.bpo-36260.WrGuc-.rst
diff --git a/Doc/library/zipfile.rst b/Doc/library/zipfile.rst
index ba613b3f8e..77a29fbccd 100644
--- a/Doc/library/zipfile.rst
+++ b/Doc/library/zipfile.rst
@@ -553,5 +553,45 @@ Command-line options
Test whether the zipfile is valid or not.
+Decompression pitfalls
+----------------------
+
+The extraction in zipfile module might fail due to some pitfalls listed below.
+
+From file itself
+~~~~~~~~~~~~~~~~
+
+Decompression may fail due to incorrect password / CRC checksum / ZIP format or
+unsupported compression method / decryption.
+
+File System limitations
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Exceeding limitations on different file systems can cause decompression failed.
+Such as allowable characters in the directory entries, length of the file name,
+length of the pathname, size of a single file, and number of files, etc.
+
+Resources limitations
+~~~~~~~~~~~~~~~~~~~~~
+
+The lack of memory or disk volume would lead to decompression
+failed. For example, decompression bombs (aka `ZIP bomb`_)
+apply to zipfile library that can cause disk volume exhaustion.
+
+Interruption
+~~~~~~~~~~~~
+
+Interruption during the decompression, such as pressing control-C or killing the
+decompression process may result in incomplete decompression of the archive.
+
+Default behaviors of extraction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Not knowing the default extraction behaviors
+can cause unexpected decompression results.
+For example, when extracting the same archive twice,
+it overwrites files without asking.
+
+.. _ZIP bomb: https://en.wikipedia.org/wiki/Zip_bomb
.. _PKZIP Application Note: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
diff --git a/Misc/NEWS.d/next/Documentation/2019-06-04-09-29-00.bpo-36260.WrGuc-.rst b/Misc/NEWS.d/next/Documentation/2019-06-04-09-29-00.bpo-36260.WrGuc-.rst
new file mode 100644
index 0000000000..9276516a88
--- /dev/null
+++ b/Misc/NEWS.d/next/Documentation/2019-06-04-09-29-00.bpo-36260.WrGuc-.rst
@@ -0,0 +1 @@
+Add decompression pitfalls to zipfile module documentation.
\ No newline at end of file
--
2.19.1

View File

@ -1,221 +0,0 @@
diff --git a/Lib/httplib.py b/Lib/httplib.py
index 60a8fb4..1b41c34 100644
--- a/Lib/httplib.py
+++ b/Lib/httplib.py
@@ -247,6 +247,16 @@ _MAXHEADERS = 100
_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
+# These characters are not allowed within HTTP URL paths.
+# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
+# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
+# Prevents CVE-2019-9740. Includes control characters such as \r\n.
+# Restrict non-ASCII characters above \x7f (0x80-0xff).
+_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
+# Arguably only these _should_ allowed:
+# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
+# We are more lenient for assumed real world compatibility purposes.
+
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
@@ -927,6 +937,12 @@ class HTTPConnection:
self._method = method
if not url:
url = '/'
+ # Prevent CVE-2019-9740.
+ match = _contains_disallowed_url_pchar_re.search(url)
+ if match:
+ raise InvalidURL("URL can't contain control characters. %r "
+ "(found at least %r)"
+ % (url, match.group()))
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 1ce9201..bdc6e78 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -9,6 +9,10 @@ import os
import sys
import mimetools
import tempfile
+try:
+ import ssl
+except ImportError:
+ ssl = None
from test import test_support
from base64 import b64encode
@@ -257,6 +261,33 @@ class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
finally:
self.unfakehttp()
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_control_char_rejected(self):
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
+ char = chr(char_no)
+ schemeless_url = "//localhost:7777/test%s/" % char
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(char, resp.geturl())
+ finally:
+ self.unfakehttp()
+
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_newline_header_injection_rejected(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
+ schemeless_url = "//" + host + ":8080/test/?test=a"
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(' ', resp.geturl())
+ self.assertNotIn('\r', resp.geturl())
+ self.assertNotIn('\n', resp.geturl())
+ finally:
+ self.unfakehttp()
+
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index 6d24d5d..d13f86f 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -1,5 +1,5 @@
import unittest
-from test import test_support
+from test import support
from test import test_urllib
import os
@@ -15,6 +15,9 @@ try:
except ImportError:
ssl = None
+from test.test_urllib import FakeHTTPMixin
+
+
# XXX
# Request
# CacheFTPHandler (hard to write)
@@ -683,7 +686,7 @@ class HandlerTests(unittest.TestCase):
h = urllib2.FileHandler()
o = h.parent = MockOpener()
- TESTFN = test_support.TESTFN
+ TESTFN = support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
@@ -1154,7 +1157,7 @@ class HandlerTests(unittest.TestCase):
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
msg = "Basic Auth Realm was unquoted"
- with test_support.check_warnings((msg, UserWarning)):
+ with support.check_warnings((msg, UserWarning)):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
@@ -1262,7 +1265,7 @@ class HandlerTests(unittest.TestCase):
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
-class MiscTests(unittest.TestCase):
+class MiscTests(unittest.TestCase, FakeHTTPMixin):
def test_build_opener(self):
class MyHTTPHandler(urllib2.HTTPHandler): pass
@@ -1317,6 +1320,52 @@ class MiscTests(unittest.TestCase):
"Unsupported digest authentication algorithm 'invalid'"
)
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_control_char_rejected(self):
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
+ char = chr(char_no)
+ schemeless_url = "//localhost:7777/test%s/" % char
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ try:
+ # We explicitly test urllib.request.urlopen() instead of the top
+ # level 'def urlopen()' function defined in this... (quite ugly)
+ # test suite. They use different url opening codepaths. Plain
+ # urlopen uses FancyURLOpener which goes via a codepath that
+ # calls urllib.parse.quote() on the URL which makes all of the
+ # above attempts at injection within the url _path_ safe.
+ escaped_char_repr = repr(char).replace('\\', r'\\')
+ InvalidURL = httplib.InvalidURL
+ with self.assertRaisesRegexp(
+ InvalidURL, "contain control.*" + escaped_char_repr):
+ urllib2.urlopen("http:" + schemeless_url)
+ with self.assertRaisesRegexp(
+ InvalidURL, "contain control.*" + escaped_char_repr):
+ urllib2.urlopen("https:" + schemeless_url)
+ finally:
+ self.unfakehttp()
+
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_newline_header_injection_rejected(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
+ schemeless_url = "//" + host + ":8080/test/?test=a"
+ try:
+ # We explicitly test urllib.request.urlopen() instead of the top
+ # level 'def urlopen()' function defined in this... (quite ugly)
+ # test suite. They use different url opening codepaths. Plain
+ # urlopen uses FancyURLOpener which goes via a codepath that
+ # calls urllib.parse.quote() on the URL which makes all of the
+ # above attempts at injection within the url _path_ safe.
+ InvalidURL = httplib.InvalidURL
+ with self.assertRaisesRegexp(
+ InvalidURL, r"contain control.*\\r.*(found at least . .)"):
+ urllib2.urlopen("http:" + schemeless_url)
+ with self.assertRaisesRegexp(InvalidURL, r"contain control.*\\n"):
+ urllib2.urlopen("https:" + schemeless_url)
+ finally:
+ self.unfakehttp()
+
+
class RequestTests(unittest.TestCase):
@@ -1412,14 +1461,14 @@ class RequestTests(unittest.TestCase):
def test_main(verbose=None):
from test import test_urllib2
- test_support.run_doctest(test_urllib2, verbose)
- test_support.run_doctest(urllib2, verbose)
+ support.run_doctest(test_urllib2, verbose)
+ support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
- test_support.run_unittest(*tests)
+ support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index 36b3be6..90ccb30 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -659,7 +659,13 @@ class SimpleServerTestCase(BaseServerTestCase):
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = httplib.HTTPConnection(ADDR, PORT)
- conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
+ conn.send('POST /RPC2 HTTP/1.0\r\n'
+ 'Content-Length: 100\r\n\r\n'
+ 'bye HTTP/1.1\r\n'
+ 'Host: %s:%s\r\n'
+ 'Accept-Encoding: identity\r\n'
+ 'Content-Length: 0\r\n\r\n'
+ % (ADDR, PORT))
conn.close()
class SimpleServerEncodingTestCase(BaseServerTestCase):

View File

@ -1,50 +0,0 @@
From 8f99cc799e4393bf1112b9395b2342f81b3f45ef Mon Sep 17 00:00:00 2001
From: push0ebp <push0ebp@shl-MacBook-Pro.local>
Date: Thu, 14 Feb 2019 02:05:46 +0900
Subject: [PATCH] bpo-35907: Avoid file reading as disallowing the unnecessary
URL scheme in urllib
---
Lib/test/test_urllib.py | 12 ++++++++++++
Lib/urllib.py | 5 ++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 1ce9201c0693..e5f210e62a18 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1023,6 +1023,18 @@ def open_spam(self, url):
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
+ def test_local_file_open(self):
+ class DummyURLopener(urllib.URLopener):
+ def open_local_file(self, url):
+ return url
+ self.assertEqual(DummyURLopener().open(
+ 'local-file://example'), '//example')
+ self.assertEqual(DummyURLopener().open(
+ 'local_file://example'), '//example')
+ self.assertRaises(IOError, urllib.urlopen,
+ 'local-file://example')
+ self.assertRaises(IOError, urllib.urlopen,
+ 'local_file://example')
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
diff --git a/Lib/urllib.py b/Lib/urllib.py
index d85504a5cb7e..a24e9a5c68fb 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -203,7 +203,10 @@ def open(self, fullurl, data=None):
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
- if not hasattr(self, name):
+
+ # bpo-35907: # disallow the file reading with the type not allowed
+ if not hasattr(self, name) or \
+ (self == _urlopener and name == 'open_local_file'):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:

View File

@ -1,22 +0,0 @@
From b86392511acd4cd30dc68711fa22f9f93228715a Mon Sep 17 00:00:00 2001
From: "blurb-it[bot]" <blurb-it[bot]@users.noreply.github.com>
Date: Wed, 13 Feb 2019 17:21:11 +0000
Subject: [PATCH] =?UTF-8?q?=F0=9F=93=9C=F0=9F=A4=96=20Added=20by=20blurb?=
=?UTF-8?q?=5Fit.?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst | 1 +
1 file changed, 1 insertion(+)
create mode 100644 Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
diff --git a/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
new file mode 100644
index 000000000000..8118a5f40583
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
@@ -0,0 +1 @@
+Avoid file reading as disallowing the unnecessary URL scheme in urllib.urlopen
\ No newline at end of file

View File

@ -14,8 +14,8 @@
%undefine py_auto_byte_compile
%undefine _debuginfo_subpackages
Name: python2
Version: 2.7.16
Release: 16
Version: 2.7.18
Release: 1
Summary: Python is an interpreted, interactive object-oriented programming language suitable
License: Python
URL: https://www.python.org/
@ -23,83 +23,63 @@ Source0: https://www.python.org/ftp/python/%{version}/Python-%{version}.tar.xz
#with systemtap
Source1: libpython.stp
#custom modifications
Patch0: python-2.7.1-config.patch
Patch1: 00001-pydocnogui.patch
Patch4: python-2.5-cflags.patch
Patch6: python-2.5.1-plural-fix.patch
Patch7: python-2.5.1-sqlite-encoding.patch
Patch10: 00010-2.7.13-binutils-no-dep.patch
Patch13: python-2.7rc1-socketmodule-constants.patch
Patch14: python-2.7rc1-socketmodule-constants2.patch
Patch16: python-2.6-rpath.patch
Patch17: python-2.6.4-distutils-rpath.patch
Patch55: 00055-systemtap.patch
Patch102: 00102-2.7.13-lib64.patch
Patch103: python-2.7-lib64-sysconfig.patch
Patch104: 00104-lib64-fix-for-test_install.patch
Patch111: 00111-no-static-lib.patch
Patch112: 00112-2.7.13-debug-build.patch
Patch113: 00113-more-configuration-flags.patch
Patch114: 00114-statvfs-f_flag-constants.patch
Patch121: 00121-add-Modules-to-build-path.patch
Patch128: python-2.7.1-fix_test_abc_with_COUNT_ALLOCS.patch
Patch130: python-2.7.2-add-extension-suffix-to-python-config.patch
Patch131: 00131-disable-tests-in-test_io.patch
Patch132: 00132-add-rpmbuild-hooks-to-unittest.patch
Patch133: 00133-skip-test_dl.patch
Patch136: 00136-skip-tests-of-seeking-stdin-in-rpmbuild.patch
Patch137: 00137-skip-distutils-tests-that-fail-in-rpmbuild.patch
Patch138: 00138-fix-distutils-tests-in-debug-build.patch
Patch139: 00139-skip-test_float-known-failure-on-arm.patch
Patch140: 00140-skip-test_ctypes-known-failure-on-sparc.patch
Patch142: 00142-skip-failing-pty-tests-in-rpmbuild.patch
Patch143: 00143-tsc-on-ppc.patch
Patch147: 00147-add-debug-malloc-stats.patch
Patch153: 00153-fix-test_gdb-noise.patch
Patch155: 00155-avoid-ctypes-thunks.patch
Patch156: 00156-gdb-autoload-safepath.patch
Patch157: 00157-uid-gid-overflows.patch
Patch165: 00165-crypt-module-salt-backport.patch
Patch167: 00167-disable-stack-navigation-tests-when-optimized-in-test_gdb.patch
Patch168: 00168-distutils-cflags.patch
Patch169: 00169-avoid-implicit-usage-of-md5-in-multiprocessing.patch
Patch170: 00170-gc-assertions.patch
Patch174: 00174-fix-for-usr-move.patch
Patch180: 00180-python-add-support-for-ppc64p7.patch
Patch181: 00181-allow-arbitrary-timeout-in-condition-wait.patch
Patch185: 00185-urllib2-honors-noproxy-for-ftp.patch
Patch187: 00187-add-RPATH-to-pyexpat.patch
Patch189: 00189-use-rpm-wheels.patch
Patch191: 00191-disable-NOOP.patch
Patch193: 00193-enable-loading-sqlite-extensions.patch
Patch289: 00289-disable-nis-detection.patch
Patch290: 04000-modularity-disable-tk.patch
Patch291: 05000-autotool-intermediates.patch
Patch342: 0342-bpo-36126-Fix-ref-count-leakage-in-structseq_repr.-G.patch
Patch349: 0349-2.7-bpo-13096-Fix-memory-leak-in-ctypes-POINTER-hand.patch
Patch350: 0350-2.7-bpo-36179-Fix-ref-leaks-in-_hashopenssl-GH-12158.patch
Patch351: 0351-2.7-bpo-36149-Fix-potential-use-of-uninitialized-mem.patch
Patch353: 0353-2.7-bpo-36186-Fix-linuxaudiodev.linux_audio_device-e.patch
Patch354: 0354-bpo-36147-Fix-a-memory-leak-in-ctypes-s_get-GH-12102.patch
Patch357: 0357-bpo-36140-Fix-an-incorrect-check-in-msidb_getsummary.patch
Patch358: 0358-2.7-IDLE-Fix-typo-in-keybindingDialog.py-GH-2322-GH-.patch
Patch359: python2-CVE-2019-9948-1.patch
Patch360: python2-CVE-2019-9948-2.patch
Patch361: CVE-2019-9740.patch
Patch362: python2-add-generic-os-supportr.patch
Patch363: bugfix-linux_distribution-skip-link-file.patch
Patch364: bugfix-test_locale-and-test_codecs.patch
Patch365: CVE-2019-9636-bpo-36216-Add-check-for-characters.patch
Patch366: CVE-2019-10160-1.patch
Patch367: CVE-2019-10160-2.patch
Patch368: CVE-2019-10160-3.patch
Patch369: CVE-2019-16056.patch
Patch370: CVE-2018-20852.patch
Patch371: CVE-2019-16935.patch
Patch372: CVE-2019-17514.patch
Patch373: CVE-2017-18207.patch
Patch374: bugfix-excessive-memory-usage-when-using-regular-expressions.patch
Patch375: CVE-2020-8492.patch
Patch0: python-2.7.1-config.patch
Patch1: 00001-pydocnogui.patch
Patch2: python-2.5-cflags.patch
Patch3: python-2.5.1-plural-fix.patch
Patch4: python-2.5.1-sqlite-encoding.patch
Patch5: 00010-2.7.13-binutils-no-dep.patch
Patch6: python-2.7rc1-socketmodule-constants.patch
Patch7: python-2.7rc1-socketmodule-constants2.patch
Patch8: python-2.6-rpath.patch
Patch9: python-2.6.4-distutils-rpath.patch
Patch10: 00055-systemtap.patch
Patch11: 00102-2.7.13-lib64.patch
Patch12: python-2.7-lib64-sysconfig.patch
Patch13: 00104-lib64-fix-for-test_install.patch
Patch14: 00111-no-static-lib.patch
Patch15: 00112-2.7.13-debug-build.patch
Patch16: 00113-more-configuration-flags.patch
Patch17: 00114-statvfs-f_flag-constants.patch
Patch18: 00121-add-Modules-to-build-path.patch
Patch19: python-2.7.1-fix_test_abc_with_COUNT_ALLOCS.patch
Patch20: python-2.7.2-add-extension-suffix-to-python-config.patch
Patch21: 00131-disable-tests-in-test_io.patch
Patch22: 00132-add-rpmbuild-hooks-to-unittest.patch
Patch23: 00133-skip-test_dl.patch
Patch24: 00136-skip-tests-of-seeking-stdin-in-rpmbuild.patch
Patch25: 00137-skip-distutils-tests-that-fail-in-rpmbuild.patch
Patch26: 00138-fix-distutils-tests-in-debug-build.patch
Patch27: 00139-skip-test_float-known-failure-on-arm.patch
Patch28: 00140-skip-test_ctypes-known-failure-on-sparc.patch
Patch29: 00142-skip-failing-pty-tests-in-rpmbuild.patch
Patch30: 00143-tsc-on-ppc.patch
Patch31: 00147-add-debug-malloc-stats.patch
Patch32: 00155-avoid-ctypes-thunks.patch
Patch33: 00156-gdb-autoload-safepath.patch
Patch34: 00165-crypt-module-salt-backport.patch
Patch35: 00167-disable-stack-navigation-tests-when-optimized-in-test_gdb.patch
Patch36: 00169-avoid-implicit-usage-of-md5-in-multiprocessing.patch
Patch37: 00170-gc-assertions.patch
Patch38: 00174-fix-for-usr-move.patch
Patch39: 00180-python-add-support-for-ppc64p7.patch
Patch40: 00181-allow-arbitrary-timeout-in-condition-wait.patch
Patch41: 00185-urllib2-honors-noproxy-for-ftp.patch
Patch42: 00187-add-RPATH-to-pyexpat.patch
Patch43: 00189-use-rpm-wheels.patch
Patch44: 00191-disable-NOOP.patch
Patch45: 00193-enable-loading-sqlite-extensions.patch
Patch46: 00289-disable-nis-detection.patch
Patch47: 04000-modularity-disable-tk.patch
Patch48: 05000-autotool-intermediates.patch
Patch49: python2-add-generic-os-supportr.patch
Patch50: bugfix-linux_distribution-skip-link-file.patch
Patch51: bugfix-test_locale-and-test_codecs.patch
Patch52: CVE-2019-17514.patch
Patch53: CVE-2017-18207.patch
Patch54: bugfix-excessive-memory-usage-when-using-regular-expressions.patch
Patch55: CVE-2020-8492.patch
Patch56: CVE-2019-9674.patch
BuildRequires: libdb-devel libffi-devel valgrind-devel ncurses-devel expat-devel readline-devel
BuildRequires: openssl-devel libtirpc-devel tcl-devel tk-devel glibc-devel libnsl2-devel
@ -634,6 +614,9 @@ sed -e "s|LIBRARY_PATH|%{_libdir}/%{py_INSTSONAME_debug}|" %{SOURCE1} \
%{dynload_dir}/_testcapimodule_d.so
%changelog
* Thu Aug 06 2020 Leo Fang <leofang_94@163.com> - 2.7.18-1
- Upgrade to version 2.7.18 and fix CVE-2019-9674
* Thu Jul 9 2020 shixuantong<shixuantong@huawei.com> - 2.7.16-16
- Type:bugfix
- ID:NA