mirror of
https://git.yoctoproject.org/poky
synced 2026-02-07 01:06:37 +01:00
(From OE-Core rev: 9d23b982fa4e0290761b3d15f6959779fed72ad6) Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
112 lines
4.5 KiB
Diff
112 lines
4.5 KiB
Diff
From 3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e Mon Sep 17 00:00:00 2001
|
|
From: Steve Dower <steve.dower@python.org>
|
|
Date: Thu, 7 Mar 2019 08:25:22 -0800
|
|
Subject: [PATCH] bpo-36216: Add check for characters in netloc that normalize
|
|
to separators (GH-12201)
|
|
|
|
CVE: CVE-2019-9636
|
|
|
|
Upstream-Status: Backport https://github.com/python/cpython/pull/12216/commits/3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e
|
|
|
|
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
|
|
---
|
|
Doc/library/urlparse.rst | 20 ++++++++++++++++
|
|
Lib/test/test_urlparse.py | 24 +++++++++++++++++++
|
|
Lib/urlparse.py | 17 +++++++++++++
|
|
.../2019-03-06-09-38-40.bpo-36216.6q1m4a.rst | 3 +++
|
|
4 files changed, 64 insertions(+)
|
|
create mode 100644 Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
|
|
|
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
|
|
index 4e1ded73c266..73b0228ea8e3 100644
|
|
--- a/Lib/test/test_urlparse.py
|
|
+++ b/Lib/test/test_urlparse.py
|
|
@@ -1,4 +1,6 @@
|
|
from test import test_support
|
|
+import sys
|
|
+import unicodedata
|
|
import unittest
|
|
import urlparse
|
|
|
|
@@ -624,6 +626,28 @@ def test_portseparator(self):
|
|
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
|
|
('http','www.python.org:80','','','',''))
|
|
|
|
+ def test_urlsplit_normalization(self):
|
|
+ # Certain characters should never occur in the netloc,
|
|
+ # including under normalization.
|
|
+ # Ensure that ALL of them are detected and cause an error
|
|
+ illegal_chars = u'/:#?@'
|
|
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
|
|
+ denorm_chars = [
|
|
+ c for c in map(unichr, range(128, sys.maxunicode))
|
|
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
|
|
+ and c not in illegal_chars
|
|
+ ]
|
|
+ # Sanity check that we found at least one such character
|
|
+ self.assertIn(u'\u2100', denorm_chars)
|
|
+ self.assertIn(u'\uFF03', denorm_chars)
|
|
+
|
|
+ for scheme in [u"http", u"https", u"ftp"]:
|
|
+ for c in denorm_chars:
|
|
+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
|
|
+ print "Checking %r" % url
|
|
+ with self.assertRaises(ValueError):
|
|
+ urlparse.urlsplit(url)
|
|
+
|
|
def test_main():
|
|
test_support.run_unittest(UrlParseTestCase)
|
|
|
|
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
|
|
index f7c2b032b097..54eda08651ab 100644
|
|
--- a/Lib/urlparse.py
|
|
+++ b/Lib/urlparse.py
|
|
@@ -165,6 +165,21 @@ def _splitnetloc(url, start=0):
|
|
delim = min(delim, wdelim) # use earliest delim position
|
|
return url[start:delim], url[delim:] # return (domain, rest)
|
|
|
|
+def _checknetloc(netloc):
|
|
+ if not netloc or not isinstance(netloc, unicode):
|
|
+ return
|
|
+ # looking for characters like \u2100 that expand to 'a/c'
|
|
+ # IDNA uses NFKC equivalence, so normalize for this check
|
|
+ import unicodedata
|
|
+ netloc2 = unicodedata.normalize('NFKC', netloc)
|
|
+ if netloc == netloc2:
|
|
+ return
|
|
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
|
|
+ for c in '/?#@:':
|
|
+ if c in netloc2:
|
|
+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
|
|
+ "characters under NFKC normalization")
|
|
+
|
|
def urlsplit(url, scheme='', allow_fragments=True):
|
|
"""Parse a URL into 5 components:
|
|
<scheme>://<netloc>/<path>?<query>#<fragment>
|
|
@@ -193,6 +208,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
|
url, fragment = url.split('#', 1)
|
|
if '?' in url:
|
|
url, query = url.split('?', 1)
|
|
+ _checknetloc(netloc)
|
|
v = SplitResult(scheme, netloc, url, query, fragment)
|
|
_parse_cache[key] = v
|
|
return v
|
|
@@ -216,6 +232,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
|
url, fragment = url.split('#', 1)
|
|
if '?' in url:
|
|
url, query = url.split('?', 1)
|
|
+ _checknetloc(netloc)
|
|
v = SplitResult(scheme, netloc, url, query, fragment)
|
|
_parse_cache[key] = v
|
|
return v
|
|
diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
|
new file mode 100644
|
|
index 000000000000..1e1ad92c6feb
|
|
--- /dev/null
|
|
+++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
|
@@ -0,0 +1,3 @@
|
|
+Changes urlsplit() to raise ValueError when the URL contains characters that
|
|
+decompose under IDNA encoding (NFKC-normalization) into characters that
|
|
+affect how the URL is parsed.
|
|
\ No newline at end of file
|