mirror of
https://git.yoctoproject.org/poky
synced 2026-01-29 21:08:42 +01:00
Compare commits
133 Commits
yocto-5.2.
...
zeus-22.0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12a4c177bb | ||
|
|
cf0cefd53c | ||
|
|
06c6dc9301 | ||
|
|
7719a7af93 | ||
|
|
a972597652 | ||
|
|
43710d4805 | ||
|
|
823a79d873 | ||
|
|
6c5493908c | ||
|
|
8222c38b52 | ||
|
|
9dc42a094a | ||
|
|
3277ee2c16 | ||
|
|
adde04efac | ||
|
|
d482c10335 | ||
|
|
3a4712e834 | ||
|
|
8e363fb137 | ||
|
|
baf9b40fb2 | ||
|
|
69da36a0a6 | ||
|
|
e571c88c44 | ||
|
|
a8f6e31beb | ||
|
|
5655adda72 | ||
|
|
a629b6ca52 | ||
|
|
e8e3b2ce49 | ||
|
|
4c8a760115 | ||
|
|
c1cbb6fd15 | ||
|
|
1f4750c47f | ||
|
|
728f969be0 | ||
|
|
7eaa8b0c36 | ||
|
|
f307a225ad | ||
|
|
1dbea71db4 | ||
|
|
5dd9ef97d3 | ||
|
|
9971e87cad | ||
|
|
7d43cbd293 | ||
|
|
1f29596ca7 | ||
|
|
f1098122e1 | ||
|
|
c84b0dbcd8 | ||
|
|
67e155c209 | ||
|
|
605f4d4c32 | ||
|
|
aa72758866 | ||
|
|
fbd00df78b | ||
|
|
1c0a93e1be | ||
|
|
26f62a423d | ||
|
|
f5efafffbc | ||
|
|
f86baae14d | ||
|
|
456f5e0d23 | ||
|
|
8e2bb3baf9 | ||
|
|
3e8e6700cc | ||
|
|
49bc773cd0 | ||
|
|
0d6c922af9 | ||
|
|
59edce3af5 | ||
|
|
9350b76f27 | ||
|
|
4010e6a25d | ||
|
|
0f408d8a2e | ||
|
|
0b55d6c27e | ||
|
|
2d6a3655e9 | ||
|
|
0275e68b05 | ||
|
|
b990f0c7dc | ||
|
|
c4bd636938 | ||
|
|
e97c2d769f | ||
|
|
348778f89c | ||
|
|
85e3e6dfd6 | ||
|
|
724eb2e369 | ||
|
|
1fc208bd48 | ||
|
|
01850c786a | ||
|
|
409b656f74 | ||
|
|
4ad64a5cf5 | ||
|
|
1da39077e5 | ||
|
|
006b110cdb | ||
|
|
8d90432a73 | ||
|
|
8775cdf59a | ||
|
|
a8580a49b0 | ||
|
|
3023bc10ce | ||
|
|
00c723ea5b | ||
|
|
90769125ee | ||
|
|
c1fbd56f01 | ||
|
|
6c50928df6 | ||
|
|
faf0ebf337 | ||
|
|
9c38686e17 | ||
|
|
2a6773b376 | ||
|
|
db8e206970 | ||
|
|
cc9836f601 | ||
|
|
495005ae3c | ||
|
|
cf1117859e | ||
|
|
3f9476d8dd | ||
|
|
f3ba167c21 | ||
|
|
6949ba6d41 | ||
|
|
d75c717c7f | ||
|
|
d95f3a8277 | ||
|
|
25fb823284 | ||
|
|
f2c963a930 | ||
|
|
5c73cd2707 | ||
|
|
6c5b31e7a6 | ||
|
|
2d43b1e2f1 | ||
|
|
f9d4afbb06 | ||
|
|
17c1e5fc07 | ||
|
|
35a556a732 | ||
|
|
2404633259 | ||
|
|
72e9458dba | ||
|
|
b3d7793ccd | ||
|
|
4094715887 | ||
|
|
811c957332 | ||
|
|
645d114f72 | ||
|
|
db4b5fd686 | ||
|
|
149ad2c3e2 | ||
|
|
6b86b7fd57 | ||
|
|
bfd2ab3d67 | ||
|
|
26d04d5b11 | ||
|
|
70ccceb33a | ||
|
|
56ce08a9ed | ||
|
|
fbb38be2c1 | ||
|
|
0b10203499 | ||
|
|
6fb5afb65d | ||
|
|
13a3b14503 | ||
|
|
ad9606f877 | ||
|
|
c07d45d20c | ||
|
|
cd24be7c47 | ||
|
|
78e173090b | ||
|
|
ed29b7291d | ||
|
|
5fb336e957 | ||
|
|
527456a57a | ||
|
|
895a9d0ef7 | ||
|
|
93a2a3c64a | ||
|
|
6da9997056 | ||
|
|
4f5919d899 | ||
|
|
4cecfe6e9d | ||
|
|
728ad848a8 | ||
|
|
5d4879def3 | ||
|
|
5789bb4985 | ||
|
|
eb6c765b24 | ||
|
|
8221ea3602 | ||
|
|
b22c6f673f | ||
|
|
ea55d874e8 | ||
|
|
e398bd4cd0 | ||
|
|
a3ece7d5d8 |
@@ -195,9 +195,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
global worker_pipe_lock
|
||||
pipein.close()
|
||||
|
||||
signal.signal(signal.SIGTERM, sigterm_handler)
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
bb.utils.signal_on_parent_exit("SIGTERM")
|
||||
|
||||
# Save out the PID so that the event can include it the
|
||||
@@ -212,6 +209,11 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
# This ensures signals sent to the controlling terminal like Ctrl+C
|
||||
# don't stop the child processes.
|
||||
os.setsid()
|
||||
|
||||
signal.signal(signal.SIGTERM, sigterm_handler)
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
@@ -371,10 +371,6 @@ class BBCooker:
|
||||
|
||||
self.data.setVar('BB_CMDLINE', self.ui_cmdline)
|
||||
|
||||
#
|
||||
# Copy of the data store which has been expanded.
|
||||
# Used for firing events and accessing variables where expansion needs to be accounted for
|
||||
#
|
||||
if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
|
||||
self.disableDataTracking()
|
||||
|
||||
|
||||
@@ -1593,7 +1593,7 @@ class Fetch(object):
|
||||
fn = d.getVar('FILE')
|
||||
mc = d.getVar('__BBMULTICONFIG') or ""
|
||||
if cache and fn and mc + fn in urldata_cache:
|
||||
self.ud = urldata_cache[mc + fn]
|
||||
self.ud = urldata_cache[mc + fn + str(id(d))]
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
@@ -1605,7 +1605,7 @@ class Fetch(object):
|
||||
pass
|
||||
|
||||
if fn and cache:
|
||||
urldata_cache[mc + fn] = self.ud
|
||||
urldata_cache[mc + fn + str(id(d))] = self.ud
|
||||
|
||||
def localpath(self, url):
|
||||
if url not in self.urls:
|
||||
|
||||
@@ -145,18 +145,18 @@ class ClearCase(FetchMethod):
|
||||
|
||||
basecmd = "%s %s" % (ud.basecmd, command)
|
||||
|
||||
if command is 'mkview':
|
||||
if command == 'mkview':
|
||||
if not "rcleartool" in ud.basecmd:
|
||||
# Cleartool needs a -snapshot view
|
||||
options.append("-snapshot")
|
||||
options.append("-tag %s" % ud.viewname)
|
||||
options.append(ud.viewdir)
|
||||
|
||||
elif command is 'rmview':
|
||||
elif command == 'rmview':
|
||||
options.append("-force")
|
||||
options.append("%s" % ud.viewdir)
|
||||
|
||||
elif command is 'setcs':
|
||||
elif command == 'setcs':
|
||||
options.append("-overwrite")
|
||||
options.append(ud.configspecfile)
|
||||
|
||||
|
||||
@@ -292,11 +292,21 @@ class Git(FetchMethod):
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
if ud.shallow and ud.write_shallow_tarballs and self.clonedir_need_shallow_revs(ud, d):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
for rev in ud.shallow_revs:
|
||||
try:
|
||||
runfetchcmd('%s rev-parse -q --verify %s' % (ud.basecmd, rev), d, quiet=True, workdir=ud.clonedir)
|
||||
except bb.fetch2.FetchError:
|
||||
return rev
|
||||
return None
|
||||
|
||||
def shallow_tarball_need_update(self, ud):
|
||||
return ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow)
|
||||
|
||||
@@ -339,13 +349,7 @@ class Git(FetchMethod):
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
|
||||
# Update the checkout if needed
|
||||
needupdate = False
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
needupdate = True
|
||||
break
|
||||
|
||||
if needupdate:
|
||||
if self.clonedir_need_update(ud, d):
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if "origin" in output:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
@@ -369,6 +373,11 @@ class Git(FetchMethod):
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
missing_rev = self.clonedir_need_shallow_revs(ud, d)
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
|
||||
@@ -54,13 +54,6 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
ud.proto = "hg"
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
elif not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
# Create paths to mercurial checkouts
|
||||
hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
|
||||
ud.host, ud.path.replace('/', '.'))
|
||||
@@ -74,6 +67,13 @@ class Hg(FetchMethod):
|
||||
ud.localfile = ud.moddir
|
||||
ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
elif not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
|
||||
|
||||
def need_update(self, ud, d):
|
||||
@@ -139,7 +139,7 @@ class Hg(FetchMethod):
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (ud.basecmd, ud.user, ud.pswd, proto)
|
||||
else:
|
||||
cmd = "%s pull" % (ud.basecmd)
|
||||
elif command == "update":
|
||||
elif command == "update" or command == "up":
|
||||
if ud.user and ud.pswd:
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" update -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options))
|
||||
else:
|
||||
@@ -247,12 +247,19 @@ class Hg(FetchMethod):
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
if not os.access(os.path.join(codir, '.hg'), os.R_OK):
|
||||
logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'")
|
||||
runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
|
||||
logger.debug(2, "Unpack: updating source in '" + codir + "'")
|
||||
runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir)
|
||||
runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
|
||||
if ud.user and ud.pswd:
|
||||
runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull %s" % (ud.basecmd, ud.user, ud.pswd, proto, ud.moddir), d, workdir=codir)
|
||||
else:
|
||||
runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir)
|
||||
if ud.user and ud.pswd:
|
||||
runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" up -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, revflag), d, workdir=codir)
|
||||
else:
|
||||
runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
|
||||
else:
|
||||
logger.debug(2, "Unpack: extracting source to '" + codir + "'")
|
||||
runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir)
|
||||
|
||||
@@ -1397,7 +1397,7 @@ class RunQueue:
|
||||
cache[tid] = iscurrent
|
||||
return iscurrent
|
||||
|
||||
def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False):
|
||||
def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True):
|
||||
valid = set()
|
||||
if self.hashvalidate:
|
||||
sq_data = {}
|
||||
@@ -1410,15 +1410,15 @@ class RunQueue:
|
||||
sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
|
||||
sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
|
||||
|
||||
valid = self.validate_hash(sq_data, data, siginfo, currentcount)
|
||||
valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
|
||||
|
||||
return valid
|
||||
|
||||
def validate_hash(self, sq_data, d, siginfo, currentcount):
|
||||
locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
|
||||
def validate_hash(self, sq_data, d, siginfo, currentcount, summary):
|
||||
locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary}
|
||||
|
||||
# Metadata has **kwargs so args can be added, sq_data can also gain new fields
|
||||
call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
|
||||
call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)"
|
||||
|
||||
return bb.utils.better_eval(call, locs)
|
||||
|
||||
@@ -1605,7 +1605,7 @@ class RunQueue:
|
||||
|
||||
tocheck.add(tid)
|
||||
|
||||
valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True)
|
||||
valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
|
||||
|
||||
# Tasks which are both setscene and noexec never care about dependencies
|
||||
# We therefore find tasks which are setscene and noexec and mark their
|
||||
@@ -1986,7 +1986,7 @@ class RunQueueExecute:
|
||||
continue
|
||||
logger.debug(1, "Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False)
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
if not valid:
|
||||
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
|
||||
self.sq_task_failoutright(nexttask)
|
||||
@@ -2303,16 +2303,22 @@ class RunQueueExecute:
|
||||
for tid in changed:
|
||||
if tid not in self.rqdata.runq_setscene_tids:
|
||||
continue
|
||||
if tid in self.runq_running:
|
||||
continue
|
||||
if tid in self.scenequeue_covered:
|
||||
# Potentially risky, should we report this hash as a match?
|
||||
logger.info("Already covered setscene for %s so ignoring rehash" % (tid))
|
||||
continue
|
||||
if tid not in self.pending_migrations:
|
||||
self.pending_migrations.add(tid)
|
||||
|
||||
for tid in self.pending_migrations.copy():
|
||||
if tid in self.runq_running:
|
||||
# Too late, task already running, not much we can do now
|
||||
self.pending_migrations.remove(tid)
|
||||
continue
|
||||
|
||||
if tid in self.scenequeue_covered or tid in self.sq_live:
|
||||
# Already ran this setscene task or it running
|
||||
# Potentially risky, should we report this hash as a match?
|
||||
logger.info("Already covered setscene for %s so ignoring rehash" % (tid))
|
||||
self.pending_migrations.remove(tid)
|
||||
continue
|
||||
|
||||
valid = True
|
||||
# Check no tasks this covers are running
|
||||
for dep in self.sqdata.sq_covered_tasks[tid]:
|
||||
@@ -2337,7 +2343,12 @@ class RunQueueExecute:
|
||||
self.sq_buildable.remove(tid)
|
||||
if tid in self.sq_running:
|
||||
self.sq_running.remove(tid)
|
||||
if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
harddepfail = False
|
||||
for t in self.sqdata.sq_harddeps:
|
||||
if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
|
||||
harddepfail = True
|
||||
break
|
||||
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if tid not in self.sq_buildable:
|
||||
self.sq_buildable.add(tid)
|
||||
if len(self.sqdata.sq_revdeps[tid]) == 0:
|
||||
@@ -2361,9 +2372,15 @@ class RunQueueExecute:
|
||||
if tid in self.build_stamps:
|
||||
del self.build_stamps[tid]
|
||||
|
||||
logger.info("Setscene task %s now valid and being rerun" % tid)
|
||||
origvalid = False
|
||||
if tid in self.sqdata.valid:
|
||||
origvalid = True
|
||||
self.sqdone = False
|
||||
update_scenequeue_data([tid], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
|
||||
update_scenequeue_data([tid], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
if tid in self.sqdata.valid and not origvalid:
|
||||
logger.info("Setscene task %s became valid" % tid)
|
||||
if harddepfail:
|
||||
self.sq_task_failoutright(tid)
|
||||
|
||||
if changed:
|
||||
self.holdoff_need_update = True
|
||||
@@ -2692,9 +2709,9 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sqdata.stamppresent = set()
|
||||
sqdata.valid = set()
|
||||
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq)
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
|
||||
|
||||
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
|
||||
|
||||
tocheck = set()
|
||||
|
||||
@@ -2728,7 +2745,7 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
|
||||
tocheck.add(tid)
|
||||
|
||||
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False)
|
||||
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
|
||||
|
||||
sqdata.hashes = {}
|
||||
for mc in sorted(sqdata.multiconfigs):
|
||||
|
||||
@@ -508,6 +508,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if new_unihash != unihash:
|
||||
bb.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
|
||||
self.set_unihash(tid, new_unihash)
|
||||
else:
|
||||
bb.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
|
||||
@@ -1863,6 +1863,26 @@ class GitShallowTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
self.fetch()
|
||||
|
||||
def test_shallow_fetch_missing_revs(self):
|
||||
self.add_empty_file('a')
|
||||
self.add_empty_file('b')
|
||||
fetcher, ud = self.fetch(self.d.getVar('SRC_URI'))
|
||||
self.git('tag v0.0 master', cwd=self.srcdir)
|
||||
self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
|
||||
self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
|
||||
self.fetch_shallow()
|
||||
|
||||
def test_shallow_fetch_missing_revs_fails(self):
|
||||
self.add_empty_file('a')
|
||||
self.add_empty_file('b')
|
||||
fetcher, ud = self.fetch(self.d.getVar('SRC_URI'))
|
||||
self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
|
||||
self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
|
||||
|
||||
with self.assertRaises(bb.fetch2.FetchError), self.assertLogs("BitBake.Fetcher", level="ERROR") as cm:
|
||||
self.fetch_shallow()
|
||||
self.assertIn("Unable to find revision v0.0 even from upstream", cm.output[0])
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_bitbake(self):
|
||||
self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir)
|
||||
|
||||
@@ -255,7 +255,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
cmd = ["bitbake", "a1", "b1"]
|
||||
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
|
||||
expected = ['a1:populate_sysroot', 'a1:package', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
|
||||
'a1:package_write_ipk_setscene', 'a1:package_qa_setscene']
|
||||
'a1:package_write_ipk_setscene', 'a1:package_qa_setscene', 'a1:build']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
@@ -103,6 +103,32 @@ class Path(unittest.TestCase):
|
||||
result = bb.utils._check_unsafe_delete_path(arg1)
|
||||
self.assertEqual(result, correctresult, '_check_unsafe_delete_path("%s") != %s' % (arg1, correctresult))
|
||||
|
||||
class Checksum(unittest.TestCase):
|
||||
filler = b"Shiver me timbers square-rigged spike Gold Road galleon bilge water boatswain wherry jack pirate. Mizzenmast rum lad Privateer jack salmagundi hang the jib piracy Pieces of Eight Corsair. Parrel marooned black spot yawl provost quarterdeck cable no prey, no pay spirits lateen sail."
|
||||
|
||||
def test_md5(self):
|
||||
import hashlib
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(self.filler)
|
||||
f.flush()
|
||||
checksum = bb.utils.md5_file(f.name)
|
||||
self.assertEqual(checksum, "bd572cd5de30a785f4efcb6eaf5089e3")
|
||||
|
||||
def test_sha1(self):
|
||||
import hashlib
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(self.filler)
|
||||
f.flush()
|
||||
checksum = bb.utils.sha1_file(f.name)
|
||||
self.assertEqual(checksum, "249eb8fd654732ea836d5e702d7aa567898eca71")
|
||||
|
||||
def test_sha256(self):
|
||||
import hashlib
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(self.filler)
|
||||
f.flush()
|
||||
checksum = bb.utils.sha256_file(f.name)
|
||||
self.assertEqual(checksum, "fcfbae8bf6b721dbb9d2dc6a9334a58f2031a9a9b302999243f99da4d7f12d0f")
|
||||
|
||||
class EditMetadataFile(unittest.TestCase):
|
||||
_origfile = """
|
||||
|
||||
@@ -520,22 +520,26 @@ def unlockfile(lf):
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
|
||||
lf.close()
|
||||
|
||||
def _hasher(method, filename):
|
||||
import mmap
|
||||
|
||||
with open(filename, "rb") as f:
|
||||
try:
|
||||
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
|
||||
for chunk in iter(lambda: mm.read(8192), b''):
|
||||
method.update(chunk)
|
||||
except ValueError:
|
||||
# You can't mmap() an empty file so silence this exception
|
||||
pass
|
||||
return method.hexdigest()
|
||||
|
||||
|
||||
def md5_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
import hashlib, mmap
|
||||
|
||||
with open(filename, "rb") as f:
|
||||
m = hashlib.md5()
|
||||
try:
|
||||
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
|
||||
for chunk in iter(lambda: mm.read(8192), b''):
|
||||
m.update(chunk)
|
||||
except ValueError:
|
||||
# You can't mmap() an empty file so silence this exception
|
||||
pass
|
||||
return m.hexdigest()
|
||||
import hashlib
|
||||
return _hasher(hashlib.md5(), filename)
|
||||
|
||||
def sha256_file(filename):
|
||||
"""
|
||||
@@ -543,24 +547,14 @@ def sha256_file(filename):
|
||||
filename.
|
||||
"""
|
||||
import hashlib
|
||||
|
||||
s = hashlib.sha256()
|
||||
with open(filename, "rb") as f:
|
||||
for line in f:
|
||||
s.update(line)
|
||||
return s.hexdigest()
|
||||
return _hasher(hashlib.sha256(), filename)
|
||||
|
||||
def sha1_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the SHA1 checksum of the filename
|
||||
"""
|
||||
import hashlib
|
||||
|
||||
s = hashlib.sha1()
|
||||
with open(filename, "rb") as f:
|
||||
for line in f:
|
||||
s.update(line)
|
||||
return s.hexdigest()
|
||||
return _hasher(hashlib.sha1(), filename)
|
||||
|
||||
def preserved_envvars_exported():
|
||||
"""Variables which are taken from the environment and placed in and exported
|
||||
|
||||
@@ -243,6 +243,7 @@ class PRServer(SimpleXMLRPCServer):
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
self.socket.close() # avoid ResourceWarning in parent
|
||||
return pid
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
@@ -449,29 +450,35 @@ class PRServiceConfigError(Exception):
|
||||
def auto_start(d):
|
||||
global singleton
|
||||
|
||||
# Shutdown any existing PR Server
|
||||
auto_shutdown()
|
||||
|
||||
host_params = list(filter(None, (d.getVar('PRSERV_HOST') or '').split(':')))
|
||||
if not host_params:
|
||||
# Shutdown any existing PR Server
|
||||
auto_shutdown()
|
||||
return None
|
||||
|
||||
if len(host_params) != 2:
|
||||
# Shutdown any existing PR Server
|
||||
auto_shutdown()
|
||||
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
|
||||
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
|
||||
raise PRServiceConfigError
|
||||
|
||||
if is_local_special(host_params[0], int(host_params[1])) and not singleton:
|
||||
if is_local_special(host_params[0], int(host_params[1])):
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or d.getVar("CACHE"))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
raise PRServiceConfigError
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
dbfile = os.path.join(cachedir, "prserv.sqlite3")
|
||||
logfile = os.path.join(cachedir, "prserv.log")
|
||||
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
|
||||
singleton.start()
|
||||
if singleton:
|
||||
if singleton.dbfile != dbfile:
|
||||
# Shutdown any existing PR Server as doesn't match config
|
||||
auto_shutdown()
|
||||
if not singleton:
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
|
||||
singleton.start()
|
||||
if singleton:
|
||||
host, port = singleton.getinfo()
|
||||
else:
|
||||
|
||||
@@ -132,9 +132,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -2349,7 +2349,7 @@
|
||||
Most software provides some means of setting build-time
|
||||
configuration options before compilation.
|
||||
Typically, setting these options is accomplished by running a
|
||||
configure script with some options, or by modifying a build
|
||||
configure script with options, or by modifying a build
|
||||
configuration file.
|
||||
<note>
|
||||
As of Yocto Project Release 1.7, some of the core recipes
|
||||
@@ -2389,6 +2389,7 @@
|
||||
software is built using Autotools.
|
||||
If this is the case, you just need to worry about
|
||||
modifying the configuration.</para>
|
||||
|
||||
<para>When using Autotools, your recipe needs to inherit
|
||||
the
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-autotools'><filename>autotools</filename></ulink>
|
||||
@@ -2401,13 +2402,15 @@
|
||||
or
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGECONFIG_CONFARGS'><filename>PACKAGECONFIG_CONFARGS</filename></ulink>
|
||||
to pass any needed configure options that are specific
|
||||
to the recipe.</para></listitem>
|
||||
to the recipe.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>CMake:</emphasis>
|
||||
If your source files have a
|
||||
<filename>CMakeLists.txt</filename> file, then your
|
||||
software is built using CMake.
|
||||
If this is the case, you just need to worry about
|
||||
modifying the configuration.</para>
|
||||
|
||||
<para>When you use CMake, your recipe needs to inherit
|
||||
the
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-cmake'><filename>cmake</filename></ulink>
|
||||
@@ -2417,7 +2420,16 @@
|
||||
You can make some adjustments by setting
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-EXTRA_OECMAKE'><filename>EXTRA_OECMAKE</filename></ulink>
|
||||
to pass any needed configure options that are specific
|
||||
to the recipe.</para></listitem>
|
||||
to the recipe.
|
||||
<note>
|
||||
If you need to install one or more custom CMake
|
||||
toolchain files that are supplied by the
|
||||
application you are building, install the files to
|
||||
<filename>${D}${datadir}/cmake/</filename> Modules
|
||||
during
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#ref-tasks-install'><filename>do_install</filename></ulink>.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Other:</emphasis>
|
||||
If your source files do not have a
|
||||
<filename>configure.ac</filename> or
|
||||
@@ -2780,6 +2792,14 @@
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-PARALLEL_MAKEINST'><filename>PARALLEL_MAKEINST</filename></ulink>
|
||||
for additional information.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
If you need to install one or more custom CMake
|
||||
toolchain files that are supplied by the
|
||||
application you are building, install the files to
|
||||
<filename>${D}${datadir}/cmake/</filename> Modules
|
||||
during
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#ref-tasks-install'><filename>do_install</filename></ulink>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
@@ -5420,12 +5440,16 @@
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTICONFIG = "x86 arm"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>Please note, that a "default" configuration already exists by definition,
|
||||
this configuration is named: "" (empty string) and is defined by the variables
|
||||
coming from your local.conf file. So, the previous example actually adds two
|
||||
additional configurations to your build "arm" and "x86" along with "".
|
||||
<note>
|
||||
A "default" configuration already exists by
|
||||
definition.
|
||||
This configuration is named: "" (i.e. empty
|
||||
string) and is defined by the variables coming
|
||||
from your <filename>local.conf</filename> file.
|
||||
Consequently, the previous example actually
|
||||
adds two additional configurations to your
|
||||
build: "arm" and "x86" along with "".
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>Launch BitBake</emphasis>:
|
||||
@@ -5445,9 +5469,10 @@
|
||||
<filename>x86.conf</filename> configuration file,
|
||||
a <filename>core-image-sato</filename>
|
||||
image that is configured through the
|
||||
<filename>arm.conf</filename> configuration file and a
|
||||
<filename>core-image-base</filename> that is configured
|
||||
through your <filename>local.conf</filename> configuration file.
|
||||
<filename>arm.conf</filename> configuration file
|
||||
and a <filename>core-image-base</filename> that is
|
||||
configured through your
|
||||
<filename>local.conf</filename> configuration file.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
@@ -10819,6 +10844,47 @@
|
||||
features that are used by many distributions.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, the Yocto Project uses SysVinit as the initialization
|
||||
manager.
|
||||
However, support also exists for systemd,
|
||||
which is a full replacement for init with
|
||||
parallel starting of services, reduced shell overhead and other
|
||||
features that are used by many distributions.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Within the system, SysVinit treats system components as services.
|
||||
These services are maintained as shell scripts stored in the
|
||||
<filename>/etc/init.d/</filename> directory.
|
||||
Services organize into different run levels.
|
||||
This organization is maintained by putting links to the services
|
||||
in the <filename>/etc/rcN.d/</filename> directories, where
|
||||
<replaceable>N/</replaceable> is one of the following options:
|
||||
"S", "0", "1", "2", "3", "4", "5", or "6".
|
||||
<note>
|
||||
Each runlevel has a dependency on the previous runlevel.
|
||||
This dependency allows the services to work properly.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In comparison, systemd treats components as units.
|
||||
Using units is a broader concept as compared to using a service.
|
||||
A unit includes several different types of entities.
|
||||
Service is one of the types of entities.
|
||||
The runlevel concept in SysVinit corresponds to the concept of a
|
||||
target in systemd, where target is also a type of supported unit.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In a SysVinit-based system, services load sequentially (i.e. one
|
||||
by one) during and parallelization is not supported.
|
||||
With systemd, services start in parallel.
|
||||
Needless to say, the method can have an impact on system startup
|
||||
performance.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you want to use SysVinit, you do
|
||||
not have to do anything.
|
||||
|
||||
@@ -117,9 +117,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -549,9 +549,9 @@
|
||||
<literallayout class='monospaced'>
|
||||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
|
||||
|
||||
SRC_URI_append += "file://<replaceable>patch-file-one</replaceable>"
|
||||
SRC_URI_append += "file://<replaceable>patch-file-two</replaceable>"
|
||||
SRC_URI_append += "file://<replaceable>patch-file-three</replaceable>"
|
||||
SRC_URI_append = " file://<replaceable>patch-file-one</replaceable>"
|
||||
SRC_URI_append = " file://<replaceable>patch-file-two</replaceable>"
|
||||
SRC_URI_append = " file://<replaceable>patch-file-three</replaceable>"
|
||||
</literallayout>
|
||||
The
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-FILESEXTRAPATHS'><filename>FILESEXTRAPATHS</filename></ulink>
|
||||
|
||||
@@ -102,9 +102,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -88,9 +88,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
@@ -332,22 +332,22 @@
|
||||
</para>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.xml"/>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.xml"/>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.xml"/>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.xml"/>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.xml"/>
|
||||
|
||||
<xi:include
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.xml"/>
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude" href="../../bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.xml"/>
|
||||
|
||||
</book>
|
||||
|
||||
|
||||
@@ -47,9 +47,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
<!ENTITY DISTRO "2.8">
|
||||
<!ENTITY DISTRO_COMPRESSED "28">
|
||||
<!ENTITY DISTRO "3.0">
|
||||
<!ENTITY DISTRO_COMPRESSED "30">
|
||||
<!ENTITY DISTRO_NAME_NO_CAP "zeus">
|
||||
<!ENTITY DISTRO_NAME "Zeus">
|
||||
<!ENTITY DISTRO_NAME_NO_CAP_MINUS_ONE "warrior">
|
||||
<!ENTITY DISTRO_NAME_MINUS_ONE "Warrior">
|
||||
<!ENTITY YOCTO_DOC_VERSION "2.8">
|
||||
<!ENTITY YOCTO_DOC_VERSION "3.0">
|
||||
<!ENTITY YOCTO_DOC_VERSION_MINUS_ONE "2.7">
|
||||
<!ENTITY DISTRO_REL_TAG "yocto-2.8">
|
||||
<!ENTITY DISTRO_REL_TAG "yocto-3.0">
|
||||
<!ENTITY METAINTELVERSION "9.0">
|
||||
<!ENTITY REL_MONTH_YEAR "TBD">
|
||||
<!ENTITY REL_MONTH_YEAR "October 2019">
|
||||
<!ENTITY META_INTEL_REL_TAG "&METAINTELVERSION;-&DISTRO_NAME_NO_CAP;-&YOCTO_DOC_VERSION;">
|
||||
<!ENTITY POKYVERSION "22.0.0">
|
||||
<!ENTITY POKYVERSION_COMPRESSED "2200">
|
||||
|
||||
@@ -102,9 +102,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -2103,10 +2103,9 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Additionally, a
|
||||
<link linkend='ref-classes-bluetooth'><filename>bluetooth</filename></link>
|
||||
class has been added to make selection of the appropriate bluetooth
|
||||
support within a recipe a little easier.
|
||||
Additionally, a <filename>bluetooth</filename> class has been added
|
||||
to make selection of the appropriate bluetooth support within a
|
||||
recipe a little easier.
|
||||
If you wish to make use of this class in a recipe, add something
|
||||
such as the following:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -4740,7 +4739,7 @@ id=f4d4f99cfbc2396e49c1613a7d237b9e57f06f81'>commit message</ulink>.
|
||||
|
||||
<para>
|
||||
This section provides information about packaging changes that have
|
||||
ocurred:
|
||||
occurred:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis><filename>python3</filename> Changes:</emphasis>
|
||||
@@ -6597,6 +6596,505 @@ id=f4d4f99cfbc2396e49c1613a7d237b9e57f06f81'>commit message</ulink>.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='moving-to-the-yocto-project-3.0-release'>
|
||||
<title>Moving to the Yocto Project 3.0 Release</title>
|
||||
|
||||
<para>
|
||||
This section provides migration information for moving to the
|
||||
Yocto Project 3.0 Release from the prior release.
|
||||
</para>
|
||||
|
||||
<section id='migration-3.0-init-system-selection'>
|
||||
<title>Init System Selection</title>
|
||||
|
||||
<para>
|
||||
Changing the init system manager previously required setting a
|
||||
number of different variables.
|
||||
You can now change the manager by setting the
|
||||
<filename>INIT_MANAGER</filename> variable and the corresponding
|
||||
include files
|
||||
(i.e. <filename>conf/distro/include/init-manager-*.conf</filename>).
|
||||
Include files are provided for four values: "none", "sysvinit",
|
||||
"systemd", and "mdev-busybox".
|
||||
The default value, "none", for <filename>INIT_MANAGER</filename>
|
||||
should allow your current settings to continue working.
|
||||
However, it is advisable to explicitly set
|
||||
<filename>INIT_MANAGER</filename>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-lsb-support-removed'>
|
||||
<title>LSB Support Removed</title>
|
||||
|
||||
<para>
|
||||
Linux Standard Base (LSB) as a standard is not current, and
|
||||
is not well suited for embedded applications.
|
||||
Support can be continued in a separate layer if needed.
|
||||
However, presently LSB support has been removed from the core.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As a result of this change, the <filename>poky-lsb</filename>
|
||||
derivative distribution configuration that was also used for
|
||||
testing alternative configurations has been replaced with a
|
||||
<filename>poky-altcfg</filename> distribution that has LSB
|
||||
parts removed.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-removed-recipes'>
|
||||
<title>Removed Recipes</title>
|
||||
|
||||
<para>
|
||||
The following recipes have been removed.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>core-image-lsb-dev</filename>: Part of removed
|
||||
LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>core-image-lsb</filename>: Part of removed
|
||||
LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>core-image-lsb-sdk</filename>: Part of removed
|
||||
LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>cve-check-tool</filename>: Functionally replaced
|
||||
by the <filename>cve-update-db</filename> recipe and
|
||||
<filename>cve-check</filename> class.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>eglinfo</filename>: No longer maintained.
|
||||
<filename>eglinfo</filename> from
|
||||
<filename>mesa-demos</filename> is an adequate and
|
||||
maintained alternative.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>gcc-8.3</filename>: Version 8.3 removed.
|
||||
Replaced by 9.2.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>gnome-themes-standard</filename>: Only needed
|
||||
by gtk+ 2.x, which has been removed.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>gtk+</filename>: GTK+ 2 is obsolete and has been
|
||||
replaced by gtk+3.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>irda-utils</filename>: Has become obsolete.
|
||||
IrDA support has been removed from the Linux kernel in
|
||||
version 4.17 and later.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libnewt-python</filename>:
|
||||
<filename>libnewt</filename> Python support merged into
|
||||
main <filename>libnewt</filename> recipe.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libsdl</filename>: Replaced by newer
|
||||
<filename>libsdl2</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libx11-diet</filename>: Became obsolete.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libxx86dga</filename>: Removed obsolete client
|
||||
library.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libxx86misc</filename>: Removed. Library is
|
||||
redundant.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>linux-yocto</filename>: Version 5.0 removed,
|
||||
which is now redundant (5.2 / 4.19 present).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>lsbinitscripts</filename>: Part of removed LSB
|
||||
support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>lsb</filename>: Part of removed LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>lsbtest</filename>: Part of removed LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>openssl10</filename>: Replaced by newer
|
||||
<filename>openssl</filename> version 1.1.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>packagegroup-core-lsb</filename>: Part of removed
|
||||
LSB support.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>python-nose</filename>: Removed the Python 2.x
|
||||
version of the recipe.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>python-numpy</filename>: Removed the Python 2.x
|
||||
version of the recipe.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>python-scons</filename>: Removed the Python 2.x
|
||||
version of the recipe.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>source-highlight</filename>: No longer needed.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>stress</filename>: Replaced by
|
||||
<filename>stress-ng</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>vulkan</filename>: Split into
|
||||
<filename>vulkan-loader</filename>,
|
||||
<filename>vulkan-headers</filename>, and
|
||||
<filename>vulkan-tools</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>weston-conf</filename>: Functionality moved to
|
||||
<filename>weston-init</filename>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-packaging-changes'>
|
||||
<title>Packaging Changes</title>
|
||||
|
||||
<para>
|
||||
The following packaging changes have occurred.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
The
|
||||
<ulink url='https://en.wikipedia.org/wiki/GNOME_Web'>Epiphany</ulink>
|
||||
browser has been dropped from
|
||||
<filename>packagegroup-self-hosted</filename> as it has
|
||||
not been needed inside
|
||||
<filename>build-appliance-image</filename> for
|
||||
quite some time and was causing resource problems.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libcap-ng</filename> Python support has been
|
||||
moved to a separate <filename>libcap-ng-python</filename>
|
||||
recipe to streamline the build process when the Python
|
||||
bindings are not needed.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>libdrm</filename> now packages the file
|
||||
<filename>amdgpu.ids</filename> into a separate
|
||||
<filename>libdrm-amdgpu</filename> package.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>python3</filename>: The
|
||||
<filename>runpy</filename> module is now in the
|
||||
<filename>python3-core</filename> package as it is
|
||||
required to support the common "python3 -m" command usage.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>distcc</filename> now provides separate
|
||||
<filename>distcc-client</filename> and
|
||||
<filename>distcc-server</filename> packages as typically
|
||||
one or the other are needed, rather than both.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>python*-setuptools</filename> recipes now
|
||||
separately package the <filename>pkg_resources</filename>
|
||||
module in a <filename>python-pkg-resources</filename> /
|
||||
<filename>python3-pkg-resources</filename> package as
|
||||
the module is useful independent of the rest of the
|
||||
setuptools package.
|
||||
The main <filename>python-setuptools</filename> /
|
||||
<filename>python3-setuptools</filename> package depends
|
||||
on this new package so you should only need to update
|
||||
dependencies unless you want to take advantage of the
|
||||
increased granularity.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-cve-checking'>
|
||||
<title>CVE Checking</title>
|
||||
|
||||
<para>
|
||||
<filename>cve-check-tool</filename> has been functionally replaced
|
||||
by a new <filename>cve-update-db</filename> recipe and
|
||||
functionality built into the <filename>cve-check</filename> class.
|
||||
The result uses NVD JSON data feeds rather than the deprecated
|
||||
XML feeds that <filename>cve-check-tool</filename> was using,
|
||||
supports CVSSv3 scoring, and makes other improvements.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Additionally, the <filename>CVE_CHECK_CVE_WHITELIST</filename>
|
||||
variable has been replaced by
|
||||
<filename>CVE_CHECK_WHITELIST</filename>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-bitbake-changes'>
|
||||
<title>Bitbake Changes</title>
|
||||
|
||||
<para>
|
||||
The following BitBake changes have occurred.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>addtask</filename> statements now properly
|
||||
validate dependent tasks.
|
||||
Previously, an invalid task was silently ignored.
|
||||
With this change, the invalid task generates a warning.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Other invalid <filename>addtask</filename> and
|
||||
<filename>deltask</filename> usages now trigger these
|
||||
warnings: "multiple target tasks arguments with
|
||||
addtask / deltask", and "multiple before/after clauses".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The "multiconfig" prefix is now shortened to "mc".
|
||||
"multiconfig" will continue to work, however it may be
|
||||
removed in a future release.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The <filename>bitbake -g</filename> command no longer
|
||||
generates a <filename>recipe-depends.dot</filename> file
|
||||
as the contents (i.e. a reprocessed version of
|
||||
<filename>task-depends.dot</filename>) were confusing.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The <filename>bb.build.FuncFailed</filename> exception,
|
||||
previously raised by
|
||||
<filename>bb.build.exec_func()</filename> when certain
|
||||
other exceptions have occurred, has been removed.
|
||||
The real underlying exceptions will be raised instead.
|
||||
If you have calls to
|
||||
<filename>bb.build.exec_func()</filename> in custom classes
|
||||
or <filename>tinfoil-using</filename> scripts, any
|
||||
references to <filename>bb.build.FuncFailed</filename>
|
||||
should be cleaned up.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Additionally, the
|
||||
<filename>bb.build.exec_func()</filename> no longer accepts
|
||||
the "pythonexception" parameter.
|
||||
The function now always raises exceptions.
|
||||
Remove this argument in any calls to
|
||||
<filename>bb.build.exec_func()</filename> in custom classes
|
||||
or scripts.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The
|
||||
<ulink url='&YOCTO_DOCS_BB_URL;#var-bb-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></ulink>
|
||||
is no longer used.
|
||||
In the unlikely event that you have any references to it,
|
||||
they should be removed.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The <filename>RunQueueExecuteScenequeue</filename> and
|
||||
<filename>RunQueueExecuteTasks</filename> events have been
|
||||
removed since setscene tasks are now executed as part of
|
||||
the normal runqueue.
|
||||
Any event handling code in custom classes or scripts that
|
||||
handles these two events need to be updated.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The arguments passed to functions used with
|
||||
<ulink url='&YOCTO_DOCS_BB_URL;#var-bb-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></ulink>
|
||||
have changed.
|
||||
If you are using your own custom hash check function, see
|
||||
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?id=40a5e193c4ba45c928fccd899415ea56b5417725'></ulink>
|
||||
for details.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Task specifications in <filename>BB_TASKDEPDATA</filename>
|
||||
and class implementations used in signature generator
|
||||
classes now use "<fn>:<task>" everywhere rather than
|
||||
the "." delimiter that was being used in some places.
|
||||
This change makes it consistent with all areas in the code.
|
||||
Custom signature generator classes and code that reads
|
||||
<filename>BB_TASKDEPDATA</filename> need to be updated to
|
||||
use ':' as a separator rather than '.'.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-sanity-checks'>
|
||||
<title>Sanity Checks</title>
|
||||
|
||||
<para>
|
||||
The following sanity check changes occurred.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
is now checked for usage of two problematic items:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
"${PN}" prefix/suffix use - Warnings always appear
|
||||
if ${PN} is used.
|
||||
You must fix the issue regardless of whether
|
||||
multiconfig or anything else that would cause
|
||||
prefixing/suffixing to happen.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Github archive tarballs - these are not guaranteed
|
||||
to be stable.
|
||||
Consequently, it is likely that the tarballs will
|
||||
be refreshed and thus the SRC_URI checksums
|
||||
will fail to apply.
|
||||
It is recommended that you fetch either an official
|
||||
release tarball or a specific revision from the
|
||||
actual Git repository instead.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Either one of these items now trigger a warning by default.
|
||||
If you wish to disable this check, remove
|
||||
<filename>src-uri-bad</filename> from
|
||||
<link linkend='var-WARN_QA'><filename>WARN_QA</filename></link>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The <filename>file-rdeps</filename> runtime dependency
|
||||
check no longer expands
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
recursively as there is no mechanism to ensure they can be
|
||||
fully computed, and thus races sometimes result in errors
|
||||
either showing up or not.
|
||||
Thus, you might now see errors for missing runtime
|
||||
dependencies that were previously satisfied recursively.
|
||||
Here is an example: package A contains a shell script
|
||||
starting with <filename>#!/bin/bash</filename> but has no
|
||||
dependency on bash.
|
||||
However, package A depends on package B, which does depend
|
||||
on bash.
|
||||
You need to add the missing dependency or dependencies to
|
||||
resolve the warning.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Setting <filename>DEPENDS_${PN}</filename> anywhere
|
||||
(i.e. typically in a recipe) now triggers an error.
|
||||
The error is triggered because
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
is not a package-specific variable unlike RDEPENDS.
|
||||
You should set <filename>DEPENDS</filename> instead.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
systemd currently does not work well with the musl C
|
||||
library because only upstream officially supports linking
|
||||
the library with glibc.
|
||||
Thus, a warning is shown when building systemd in
|
||||
conjunction with musl.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='migration-3.0-miscellaneous-changes'>
|
||||
<title>Miscellaneous Changes</title>
|
||||
|
||||
<para>
|
||||
The following miscellaneous changes have occurred.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
The <filename>gnome</filename>
|
||||
class has been removed because it now does very little.
|
||||
You should update recipes that previously inherited this
|
||||
class to do the following:
|
||||
<literallayout class='monospaced'>
|
||||
inherit gnomebase gtk-icon-cache gconf mime
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The
|
||||
<filename>meta/recipes-kernel/linux/linux-dtb.inc</filename>
|
||||
file has been removed.
|
||||
This file was previously deprecated in favor of setting
|
||||
<link linkend='var-KERNEL_DEVICETREE'><filename>KERNEL_DEVICETREE</filename></link>
|
||||
in any kernel recipe and only produced a warning.
|
||||
Remove any <filename>include</filename> or
|
||||
<filename>require</filename> statements pointing to this
|
||||
file.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-TARGET_CFLAGS'><filename>TARGET_CFLAGS</filename></link>,
|
||||
<link linkend='var-TARGET_CPPFLAGS'><filename>TARGET_CPPFLAGS</filename></link>,
|
||||
<link linkend='var-TARGET_CXXFLAGS'><filename>TARGET_CXXFLAGS</filename></link>,
|
||||
and
|
||||
<link linkend='var-TARGET_LDFLAGS'><filename>TARGET_LDFLAGS</filename></link>
|
||||
are no longer exported to the external environment.
|
||||
This change did not require any changes to core recipes,
|
||||
which is a good indicator that no changes will be
|
||||
required.
|
||||
However, if for some reason the software being built by one
|
||||
of your recipes is expecting these variables to be set,
|
||||
then building the recipe will fail.
|
||||
In such cases, you must either export the variable or
|
||||
variables in the recipe or change the scripts so that
|
||||
exporting is not necessary.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
You must change the host distro identifier used in
|
||||
<link linkend='var-NATIVELSBSTRING'><filename>NATIVELSBSTRING</filename></link>
|
||||
to use all lowercase characters even if it does not contain
|
||||
a version number.
|
||||
This change is necessary only if you are not using
|
||||
<filename>uninative</filename> and
|
||||
<link linkend='var-SANITY_TESTED_DISTROS'><filename>SANITY_TESTED_DISTROS</filename></link>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
In the <filename>base-files</filename> recipe, writing the
|
||||
hostname into <filename>/etc/hosts</filename> and
|
||||
<filename>/etc/hostname</filename> is now done within the
|
||||
main
|
||||
<link linkend='ref-tasks-install'><filename>do_install</filename></link>
|
||||
function rather than in the
|
||||
<filename>do_install_basefilesissue</filename> function.
|
||||
The reason for the change is because
|
||||
<filename>do_install_basefilesissue</filename> is more
|
||||
easily overridden without having to duplicate the hostname
|
||||
functionality.
|
||||
If you have done the latter (e.g. in a
|
||||
<filename>base-files</filename> bbappend), then you should
|
||||
remove it from your customized
|
||||
<filename>do_install_basefilesissue</filename> function.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The <filename>wic --expand</filename> command now uses
|
||||
commas to separate "key:value" pairs rather than hyphens.
|
||||
<note>
|
||||
The wic command-line help is not updated.
|
||||
</note>
|
||||
You must update any scripts or commands where you use
|
||||
<filename>wic --expand</filename> with multiple
|
||||
"key:value" pairs.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
UEFI image variable settings have been moved from various
|
||||
places to a central
|
||||
<filename>conf/image-uefi.conf</filename>.
|
||||
This change should not influence any existing configuration
|
||||
as the <filename>meta/conf/image-uefi.conf</filename>
|
||||
in the core metadata sets defaults that can be overridden
|
||||
in the same manner as before.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>conf/distro/include/world-broken.inc</filename>
|
||||
has been removed.
|
||||
For cases where certain recipes need to be disabled when
|
||||
using the musl C library, these recipes now have
|
||||
<filename>COMPATIBLE_HOST_libc-musl</filename> set with a
|
||||
comment that explains why.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
|
||||
@@ -428,6 +428,14 @@
|
||||
variable to specify additional configuration options to be passed
|
||||
using the <filename>cmake</filename> command line.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
On the occasion that you would be installing custom CMake toolchain
|
||||
files supplied by the application being built, you should install them
|
||||
to the preferred CMake Module directory:
|
||||
<filename>${D}${datadir}/cmake/</filename> Modules during
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#ref-tasks-install'><filename>do_install</filename></ulink>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='ref-classes-cml1'>
|
||||
|
||||
@@ -133,9 +133,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -1349,8 +1349,8 @@
|
||||
<glossdef>
|
||||
<para role="glossdeffirst">
|
||||
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||
Specifies each additional separate configuration when you are
|
||||
building targets with multiple configurations.
|
||||
Specifies each additional separate configuration when you
|
||||
are building targets with multiple configurations.
|
||||
Use this variable in your
|
||||
<filename>conf/local.conf</filename> configuration file.
|
||||
Specify a <replaceable>multiconfigname</replaceable> for
|
||||
@@ -2182,9 +2182,10 @@
|
||||
|
||||
<para>
|
||||
The <filename>BUSYBOX_SPLIT_SUID</filename> variable
|
||||
defaults to "1", which results in a single output
|
||||
defaults to "1", which results in splitting the output
|
||||
executable file.
|
||||
Set the variable to "0" to split the output file.
|
||||
Set the variable to "0" to get a single output executable
|
||||
file.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -67,9 +67,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -77,9 +77,9 @@
|
||||
<revremark>Released with the Yocto Project 2.7 Release.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>2.8</revnumber>
|
||||
<revnumber>3.0</revnumber>
|
||||
<date>&REL_MONTH_YEAR;</date>
|
||||
<revremark>Released with the Yocto Project 2.8 Release.</revremark>
|
||||
<revremark>Released with the Yocto Project 3.0 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
# Processes bitbake-user-manual (<word>-<word>-<word> style).
|
||||
# This style is for manual three-word folders, which currently is only the BitBake User Manual.
|
||||
# We used to have the "yocto-project-qs" and "poky-ref-manual" folders but no longer do.
|
||||
# s@"ulink" href="http://www.yoctoproject.org/docs/2.8/[a-z]*-[a-z]*-[a-z]*/[a-z]*-[a-z]*-[a-z]*.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/bitbake-user-manual/bitbake-user-manual.html#@"link" href="#@g
|
||||
# s@"ulink" href="http://www.yoctoproject.org/docs/3.0/[a-z]*-[a-z]*-[a-z]*/[a-z]*-[a-z]*-[a-z]*.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/bitbake-user-manual/bitbake-user-manual.html#@"link" href="#@g
|
||||
|
||||
# Processes all other manuals (<word>-<word> style).
|
||||
# This style is for manual folders that use two word, which is the standard now (e.g. "ref-manual").
|
||||
# Here is the one-liner:
|
||||
# s@"ulink" href="http://www.yoctoproject.org/docs/2.8/[a-z]*-[a-z]*/[a-z]*-[a-z]*.html#@"link" href="#@g
|
||||
# s@"ulink" href="http://www.yoctoproject.org/docs/3.0/[a-z]*-[a-z]*/[a-z]*-[a-z]*.html#@"link" href="#@g
|
||||
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/sdk-manual/sdk-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/bsp-guide/bsp-guide.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/dev-manual/dev-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/overview-manual/overview-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/brief-yoctoprojectqs/brief-yoctoprojectqs.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/kernel-dev/kernel-dev.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/profile-manual/profile-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/ref-manual/ref-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/2.8/toaster-manual/toaster-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/sdk-manual/sdk-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/bsp-guide/bsp-guide.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/dev-manual/dev-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/overview-manual/overview-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/brief-yoctoprojectqs/brief-yoctoprojectqs.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/kernel-dev/kernel-dev.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/profile-manual/profile-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/ref-manual/ref-manual.html#@"link" href="#@g
|
||||
s@"ulink" href="http://www.yoctoproject.org/docs/3.0/toaster-manual/toaster-manual.html#@"link" href="#@g
|
||||
|
||||
# Process cases where just an external manual is referenced without an id anchor
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/brief-yoctoprojectqs/brief-yoctoprojectqs.html" target="_top">Yocto Project Quick Build</a>@Yocto Project Quick Build@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/bitbake-user-manual/bitbake-user-manual.html" target="_top">BitBake User Manual</a>@BitBake User Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/dev-manual/dev-manual.html" target="_top">Yocto Project Development Tasks Manual</a>@Yocto Project Development Tasks Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/overview-manual/overview-manual.html" target="_top">Yocto Project Overview and Concepts Manual</a>@Yocto project Overview and Concepts Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/sdk-manual/sdk-manual.html" target="_top">Yocto Project Application Development and the Extensible Software Development Kit (eSDK)</a>@Yocto Project Application Development and the Extensible Software Development Kit (eSDK)@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/bsp-guide/bsp-guide.html" target="_top">Yocto Project Board Support Package (BSP) Developer's Guide</a>@Yocto Project Board Support Package (BSP) Developer's Guide@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/profile-manual/profile-manual.html" target="_top">Yocto Project Profiling and Tracing Manual</a>@Yocto Project Profiling and Tracing Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/kernel-dev/kernel-dev.html" target="_top">Yocto Project Linux Kernel Development Manual</a>@Yocto Project Linux Kernel Development Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/ref-manual/ref-manual.html" target="_top">Yocto Project Reference Manual</a>@Yocto Project Reference Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/toaster-manual/toaster-manual.html" target="_top">Toaster User Manual</a>@Toaster User Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/brief-yoctoprojectqs/brief-yoctoprojectqs.html" target="_top">Yocto Project Quick Build</a>@Yocto Project Quick Build@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/bitbake-user-manual/bitbake-user-manual.html" target="_top">BitBake User Manual</a>@BitBake User Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/dev-manual/dev-manual.html" target="_top">Yocto Project Development Tasks Manual</a>@Yocto Project Development Tasks Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/overview-manual/overview-manual.html" target="_top">Yocto Project Overview and Concepts Manual</a>@Yocto project Overview and Concepts Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/sdk-manual/sdk-manual.html" target="_top">Yocto Project Application Development and the Extensible Software Development Kit (eSDK)</a>@Yocto Project Application Development and the Extensible Software Development Kit (eSDK)@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/bsp-guide/bsp-guide.html" target="_top">Yocto Project Board Support Package (BSP) Developer's Guide</a>@Yocto Project Board Support Package (BSP) Developer's Guide@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/profile-manual/profile-manual.html" target="_top">Yocto Project Profiling and Tracing Manual</a>@Yocto Project Profiling and Tracing Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/kernel-dev/kernel-dev.html" target="_top">Yocto Project Linux Kernel Development Manual</a>@Yocto Project Linux Kernel Development Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/ref-manual/ref-manual.html" target="_top">Yocto Project Reference Manual</a>@Yocto Project Reference Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/toaster-manual/toaster-manual.html" target="_top">Toaster User Manual</a>@Toaster User Manual@g
|
||||
|
||||
# Process a single, rouge occurrence of a linked reference to the Mega-Manual.
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/2.8/mega-manual/mega-manual.html" target="_top">Yocto Project Mega-Manual</a>@Yocto Project Mega-Manual@g
|
||||
s@<a class="ulink" href="http://www.yoctoproject.org/docs/3.0/mega-manual/mega-manual.html" target="_top">Yocto Project Mega-Manual</a>@Yocto Project Mega-Manual@g
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
DISTRO = "poky"
|
||||
DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
|
||||
DISTRO_VERSION = "3.0"
|
||||
DISTRO_VERSION = "3.0.1"
|
||||
DISTRO_CODENAME = "zeus"
|
||||
SDK_VENDOR = "-pokysdk"
|
||||
SDK_VERSION = "${@d.getVar('DISTRO_VERSION').replace('snapshot-${DATE}', 'snapshot')}"
|
||||
|
||||
@@ -13,11 +13,6 @@ class VirglTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['virgl.VirglTest.test_kernel_driver'])
|
||||
def test_kmscube(self):
|
||||
|
||||
distro = oe.lsb.distro_identifier()
|
||||
if distro and distro == 'centos-7':
|
||||
self.skipTest('kmscube is not working when centos 7 is the host OS')
|
||||
|
||||
status, output = self.target.run('kmscube', timeout=30)
|
||||
self.assertEqual(status, 0, "kmscube exited with non-zero status %d and output:\n%s" %(status, output))
|
||||
self.assertIn('renderer: "virgl"', output, "kmscube does not seem to use virgl:\n%s" %(output))
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
commit ced2ec32b657a7f52604b2e16e5d5881041c517a
|
||||
Author: OpenEmbedded <oe.patch@oe>
|
||||
Date: Mon Nov 18 18:43:15 2019 +0100
|
||||
|
||||
Backport of the NEWS file from version 1.6.0
|
||||
|
||||
diff --git a/doc/NEWS b/doc/NEWS
|
||||
index 69793fc..fd49b1c 100644
|
||||
--- a/doc/NEWS
|
||||
+++ b/doc/NEWS
|
||||
@@ -1,3 +1,26 @@
|
||||
+1.6.0 - 15 March 2015
|
||||
+ - fix lstat64 support when unavailable - separate patches supplied by
|
||||
+ Ganael Laplanche and Peter Korsgaard
|
||||
+ - (#1506) new option "-D" / "--delay-start" to only show bar after N
|
||||
+ seconds (Damon Harper)
|
||||
+ - new option "--fineta" / "-I" to show ETA as time of day rather than time
|
||||
+ remaining - patch supplied by Erkki Seppälä (r147)
|
||||
+ - (#1509) change ETA (--eta / -e) so that days are given if the hours
|
||||
+ remaining are 24 or more (Jacek Wielemborek)
|
||||
+ - (#1499) repeat read and write attempts on partial buffer fill/empty to
|
||||
+ work around post-signal transfer rate drop reported by Ralf Ramsauer
|
||||
+ - (#1507) do not try to calculate total size in line mode, due to bug
|
||||
+ reported by Jacek Wielemborek and Michiel Van Herwegen
|
||||
+ - cleanup: removed defunct RATS comments and unnecessary copyright notices
|
||||
+ - clean up displayed lines when using --watchfd PID, when PID exits
|
||||
+ - output errors on a new line to avoid overwriting transfer bar
|
||||
+
|
||||
+1.5.7 - 26 August 2014
|
||||
+ - show KiB instead of incorrect kiB (Debian bug #706175)
|
||||
+ - (#1284) do not gzip man page, for non-Linux OSes (Bob Friesenhahn)
|
||||
+ - work around "awk" bug in tests/016-numeric-timer in decimal "," locales
|
||||
+ - fix "make rpm" and "make srpm", extend "make release" to sign releases
|
||||
+
|
||||
1.5.3 - 4 May 2014
|
||||
- remove SPLICE_F_NONBLOCK to fix problem with slow splice() (Jan Seda)
|
||||
|
||||
@@ -3,7 +3,9 @@ LICENSE = "Artistic-2.0"
|
||||
LIC_FILES_CHKSUM = "file://doc/COPYING;md5=9c50db2589ee3ef10a9b7b2e50ce1d02"
|
||||
|
||||
SRC_URI = "http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz \
|
||||
file://0001-Add-a-note-line-to-the-quick-reference.patch"
|
||||
file://0001-Add-a-note-line-to-the-quick-reference.patch \
|
||||
file://backported.patch \
|
||||
"
|
||||
UPSTREAM_CHECK_URI = "http://www.ivarch.com/programs/pv.shtml"
|
||||
RECIPE_NO_UPDATE_REASON = "This recipe is used to test devtool upgrade feature"
|
||||
|
||||
|
||||
@@ -3,7 +3,9 @@ LICENSE = "Artistic-2.0"
|
||||
LIC_FILES_CHKSUM = "file://doc/COPYING;md5=9c50db2589ee3ef10a9b7b2e50ce1d02"
|
||||
|
||||
SRC_URI = "http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz \
|
||||
file://0001-Add-a-note-line-to-the-quick-reference.patch"
|
||||
file://0001-Add-a-note-line-to-the-quick-reference.patch \
|
||||
file://backported.patch \
|
||||
"
|
||||
UPSTREAM_CHECK_URI = "http://www.ivarch.com/programs/pv.shtml"
|
||||
RECIPE_NO_UPDATE_REASON = "This recipe is used to test devtool upgrade feature"
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ require recipeutils-test.inc
|
||||
|
||||
LICENSE = "Proprietary"
|
||||
LIC_FILES_CHKSUM = "file://${WORKDIR}/somefile;md5=d41d8cd98f00b204e9800998ecf8427e"
|
||||
DEPENDS += "virtual/libx11"
|
||||
DEPENDS += "zlib"
|
||||
|
||||
BBCLASSEXTEND = "native nativesdk"
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ python do_cve_check () {
|
||||
|
||||
}
|
||||
|
||||
addtask cve_check after do_unpack before do_build
|
||||
addtask cve_check before do_build
|
||||
do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
|
||||
do_cve_check[nostamp] = "1"
|
||||
|
||||
@@ -70,7 +70,6 @@ python cve_check_cleanup () {
|
||||
"""
|
||||
Delete the file used to gather all the CVE information.
|
||||
"""
|
||||
|
||||
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
|
||||
}
|
||||
|
||||
@@ -166,7 +165,6 @@ def check_cves(d, patched_cves):
|
||||
"""
|
||||
Connect to the NVD database and find unpatched cves.
|
||||
"""
|
||||
import ast, csv, tempfile, subprocess, io
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
cves_unpatched = []
|
||||
@@ -188,63 +186,74 @@ def check_cves(d, patched_cves):
|
||||
cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
|
||||
|
||||
import sqlite3
|
||||
db_file = d.getVar("CVE_CHECK_DB_FILE")
|
||||
conn = sqlite3.connect(db_file)
|
||||
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
|
||||
conn = sqlite3.connect(db_file, uri=True)
|
||||
|
||||
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
|
||||
for product in products:
|
||||
c = conn.cursor()
|
||||
if ":" in product:
|
||||
vendor, product = product.split(":", 1)
|
||||
c.execute("SELECT * FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR IS ?", (product, vendor))
|
||||
else:
|
||||
c.execute("SELECT * FROM PRODUCTS WHERE PRODUCT IS ?", (product,))
|
||||
vendor = "%"
|
||||
|
||||
for row in c:
|
||||
cve = row[0]
|
||||
version_start = row[3]
|
||||
operator_start = row[4]
|
||||
version_end = row[5]
|
||||
operator_end = row[6]
|
||||
# Find all relevant CVE IDs.
|
||||
for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
|
||||
cve = cverow[0]
|
||||
|
||||
if cve in cve_whitelist:
|
||||
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
|
||||
# TODO: this should be in the report as 'whitelisted'
|
||||
patched_cves.add(cve)
|
||||
continue
|
||||
elif cve in patched_cves:
|
||||
bb.note("%s has been patched" % (cve))
|
||||
else:
|
||||
to_append = False
|
||||
continue
|
||||
|
||||
vulnerable = False
|
||||
for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
|
||||
(_, _, _, version_start, operator_start, version_end, operator_end) = row
|
||||
#bb.debug(2, "Evaluating row " + str(row))
|
||||
|
||||
if (operator_start == '=' and pv == version_start):
|
||||
cves_unpatched.append(cve)
|
||||
vulnerable = True
|
||||
else:
|
||||
if operator_start:
|
||||
try:
|
||||
to_append_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
|
||||
to_append_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
|
||||
vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
|
||||
vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
|
||||
except:
|
||||
bb.note("%s: Failed to compare %s %s %s for %s" %
|
||||
bb.warn("%s: Failed to compare %s %s %s for %s" %
|
||||
(product, pv, operator_start, version_start, cve))
|
||||
to_append_start = False
|
||||
vulnerable_start = False
|
||||
else:
|
||||
to_append_start = False
|
||||
vulnerable_start = False
|
||||
|
||||
if operator_end:
|
||||
try:
|
||||
to_append_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
|
||||
to_append_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
|
||||
vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
|
||||
vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
|
||||
except:
|
||||
bb.note("%s: Failed to compare %s %s %s for %s" %
|
||||
bb.warn("%s: Failed to compare %s %s %s for %s" %
|
||||
(product, pv, operator_end, version_end, cve))
|
||||
to_append_end = False
|
||||
vulnerable_end = False
|
||||
else:
|
||||
to_append_end = False
|
||||
vulnerable_end = False
|
||||
|
||||
if operator_start and operator_end:
|
||||
to_append = to_append_start and to_append_end
|
||||
vulnerable = vulnerable_start and vulnerable_end
|
||||
else:
|
||||
to_append = to_append_start or to_append_end
|
||||
vulnerable = vulnerable_start or vulnerable_end
|
||||
|
||||
if to_append:
|
||||
if vulnerable:
|
||||
bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
|
||||
cves_unpatched.append(cve)
|
||||
bb.debug(2, "%s-%s is not patched for %s" % (product, pv, cve))
|
||||
break
|
||||
|
||||
if not vulnerable:
|
||||
bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
|
||||
# TODO: not patched but not vulnerable
|
||||
patched_cves.add(cve)
|
||||
|
||||
conn.close()
|
||||
|
||||
return (list(patched_cves), cves_unpatched)
|
||||
@@ -252,31 +261,23 @@ def check_cves(d, patched_cves):
|
||||
def get_cve_info(d, cves):
|
||||
"""
|
||||
Get CVE information from the database.
|
||||
|
||||
Unfortunately the only way to get CVE info is set the output to
|
||||
html (hard to parse) or query directly the database.
|
||||
"""
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
import sqlite3
|
||||
|
||||
cve_data = {}
|
||||
db_file = d.getVar("CVE_CHECK_DB_FILE")
|
||||
placeholder = ",".join("?" * len(cves))
|
||||
query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
|
||||
conn = sqlite3.connect(db_file)
|
||||
cur = conn.cursor()
|
||||
for row in cur.execute(query, tuple(cves)):
|
||||
cve_data[row[0]] = {}
|
||||
cve_data[row[0]]["summary"] = row[1]
|
||||
cve_data[row[0]]["scorev2"] = row[2]
|
||||
cve_data[row[0]]["scorev3"] = row[3]
|
||||
cve_data[row[0]]["modified"] = row[4]
|
||||
cve_data[row[0]]["vector"] = row[5]
|
||||
conn.close()
|
||||
conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
|
||||
|
||||
for cve in cves:
|
||||
for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
|
||||
cve_data[row[0]] = {}
|
||||
cve_data[row[0]]["summary"] = row[1]
|
||||
cve_data[row[0]]["scorev2"] = row[2]
|
||||
cve_data[row[0]]["scorev3"] = row[3]
|
||||
cve_data[row[0]]["modified"] = row[4]
|
||||
cve_data[row[0]]["vector"] = row[5]
|
||||
|
||||
conn.close()
|
||||
return cve_data
|
||||
|
||||
def cve_write_data(d, patched, unpatched, cve_data):
|
||||
|
||||
@@ -356,17 +356,6 @@ set_icecc_env() {
|
||||
return
|
||||
fi
|
||||
|
||||
# Create symlinks to icecc in the recipe-sysroot directory
|
||||
mkdir -p ${ICE_PATH}
|
||||
if [ -n "${KERNEL_CC}" ]; then
|
||||
compilers="${@get_cross_kernel_cc(bb,d)}"
|
||||
else
|
||||
compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
|
||||
fi
|
||||
for compiler in $compilers; do
|
||||
ln -sf ${ICECC_BIN} ${ICE_PATH}/$compiler
|
||||
done
|
||||
|
||||
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
|
||||
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
|
||||
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
|
||||
@@ -385,6 +374,26 @@ set_icecc_env() {
|
||||
return
|
||||
fi
|
||||
|
||||
# Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
|
||||
mkdir -p $ICE_PATH/symlinks
|
||||
if [ -n "${KERNEL_CC}" ]; then
|
||||
compilers="${@get_cross_kernel_cc(bb,d)}"
|
||||
else
|
||||
compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
|
||||
fi
|
||||
for compiler in $compilers; do
|
||||
ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
|
||||
rm -f $ICE_PATH/$compiler
|
||||
cat <<-__EOF__ > $ICE_PATH/$compiler
|
||||
#!/bin/sh -e
|
||||
export ICECC_VERSION=$ICECC_VERSION
|
||||
export ICECC_CC=$ICECC_CC
|
||||
export ICECC_CXX=$ICECC_CXX
|
||||
$ICE_PATH/symlinks/$compiler "\$@"
|
||||
__EOF__
|
||||
chmod 775 $ICE_PATH/$compiler
|
||||
done
|
||||
|
||||
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
|
||||
# for target recipes should return something like:
|
||||
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
|
||||
@@ -417,7 +426,6 @@ set_icecc_env() {
|
||||
export CCACHE_PATH="$PATH"
|
||||
export CCACHE_DISABLE="1"
|
||||
|
||||
export ICECC_VERSION ICECC_CC ICECC_CXX
|
||||
export PATH="$ICE_PATH:$PATH"
|
||||
|
||||
bbnote "Using icecc path: $ICE_PATH"
|
||||
|
||||
@@ -64,6 +64,15 @@ do_rm_work () {
|
||||
mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
|
||||
i=dummy
|
||||
;;
|
||||
*do_image_qa_setscene*)
|
||||
# Ensure we don't 'stack' setscene extensions to this stamp with the section below
|
||||
i=dummy
|
||||
;;
|
||||
*do_image_qa*)
|
||||
# Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
|
||||
mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
|
||||
i=dummy
|
||||
;;
|
||||
*do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
|
||||
i=dummy
|
||||
;;
|
||||
|
||||
@@ -523,6 +523,7 @@ def check_wsl(d):
|
||||
|
||||
# Tar version 1.24 and onwards handle overwriting symlinks correctly
|
||||
# but earlier versions do not; this needs to work properly for sstate
|
||||
# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
|
||||
def check_tar_version(sanity_data):
|
||||
from distutils.version import LooseVersion
|
||||
import subprocess
|
||||
@@ -532,7 +533,9 @@ def check_tar_version(sanity_data):
|
||||
return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
|
||||
version = result.split()[3]
|
||||
if LooseVersion(version) < LooseVersion("1.24"):
|
||||
return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
|
||||
return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar (1.28+).\n"
|
||||
if LooseVersion(version) < LooseVersion("1.28"):
|
||||
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the projects buildtools-tarball from our last release).\n"
|
||||
return None
|
||||
|
||||
# We use git parameters and functionality only found in 1.7.8 or later
|
||||
@@ -798,6 +801,11 @@ def check_sanity_everybuild(status, d):
|
||||
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
|
||||
status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
|
||||
|
||||
# If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
|
||||
sdkvendor = d.getVar("SDK_VENDOR")
|
||||
if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
|
||||
status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash\n")
|
||||
|
||||
check_supported_distro(d)
|
||||
|
||||
omask = os.umask(0o022)
|
||||
|
||||
@@ -45,7 +45,7 @@ python uninative_event_fetchloader() {
|
||||
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
|
||||
tarballpath = os.path.join(tarballdir, tarball)
|
||||
|
||||
if not os.path.exists(tarballpath):
|
||||
if not os.path.exists(tarballpath + ".done"):
|
||||
bb.utils.mkdirhier(tarballdir)
|
||||
if d.getVar("UNINATIVE_URL") == "unset":
|
||||
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
|
||||
|
||||
@@ -15,10 +15,6 @@ DISABLE_STATIC_pn-nativesdk-libcap = ""
|
||||
DISABLE_STATIC_pn-libpcap = ""
|
||||
# needed by gdb
|
||||
DISABLE_STATIC_pn-readline = ""
|
||||
# needed by pseudo
|
||||
DISABLE_STATIC_pn-sqlite3 = ""
|
||||
DISABLE_STATIC_pn-sqlite3-native = ""
|
||||
DISABLE_STATIC_pn-nativesdk-sqlite3 = ""
|
||||
# openjade/sgml-common have build issues without static libs
|
||||
DISABLE_STATIC_pn-sgml-common-native = ""
|
||||
DISABLE_STATIC_pn-openjade-native = ""
|
||||
|
||||
@@ -1496,11 +1496,13 @@ class DevtoolUpgradeTests(DevtoolBase):
|
||||
recipedir = os.path.dirname(oldrecipefile)
|
||||
olddir = os.path.join(recipedir, recipe + '-' + oldversion)
|
||||
patchfn = '0001-Add-a-note-line-to-the-quick-reference.patch'
|
||||
backportedpatchfn = 'backported.patch'
|
||||
self.assertExists(os.path.join(olddir, patchfn), 'Original patch file does not exist')
|
||||
return recipe, oldrecipefile, recipedir, olddir, newversion, patchfn
|
||||
self.assertExists(os.path.join(olddir, backportedpatchfn), 'Backported patch file does not exist')
|
||||
return recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn
|
||||
|
||||
def test_devtool_finish_upgrade_origlayer(self):
|
||||
recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
|
||||
recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn = self._setup_test_devtool_finish_upgrade()
|
||||
# Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
|
||||
self.assertIn('/meta-selftest/', recipedir)
|
||||
# Try finish to the original layer
|
||||
@@ -1511,14 +1513,23 @@ class DevtoolUpgradeTests(DevtoolBase):
|
||||
self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
|
||||
self.assertNotExists(oldrecipefile, 'Old recipe file should have been deleted but wasn\'t')
|
||||
self.assertNotExists(os.path.join(olddir, patchfn), 'Old patch file should have been deleted but wasn\'t')
|
||||
self.assertNotExists(os.path.join(olddir, backportedpatchfn), 'Old backported patch file should have been deleted but wasn\'t')
|
||||
newrecipefile = os.path.join(recipedir, '%s_%s.bb' % (recipe, newversion))
|
||||
newdir = os.path.join(recipedir, recipe + '-' + newversion)
|
||||
self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
|
||||
self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
|
||||
self.assertNotExists(os.path.join(newdir, backportedpatchfn), 'Backported patch file should not have been copied into new directory but was')
|
||||
self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
|
||||
with open(newrecipefile, 'r') as f:
|
||||
newcontent = f.read()
|
||||
self.assertNotIn(backportedpatchfn, newcontent, "Backported patch should have been removed from the recipe but wasn't")
|
||||
self.assertIn(patchfn, newcontent, "Old patch should have not been removed from the recipe but was")
|
||||
self.assertIn("0002-Add-a-comment-to-the-code.patch", newcontent, "New patch should have been added to the recipe but wasn't")
|
||||
self.assertIn("http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz", newcontent, "New recipe no longer has upstream source in SRC_URI")
|
||||
|
||||
|
||||
def test_devtool_finish_upgrade_otherlayer(self):
|
||||
recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
|
||||
recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn = self._setup_test_devtool_finish_upgrade()
|
||||
# Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
|
||||
self.assertIn('/meta-selftest/', recipedir)
|
||||
# Try finish to a different layer - should create a bbappend
|
||||
@@ -1534,10 +1545,18 @@ class DevtoolUpgradeTests(DevtoolBase):
|
||||
self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
|
||||
self.assertExists(oldrecipefile, 'Old recipe file should not have been deleted')
|
||||
self.assertExists(os.path.join(olddir, patchfn), 'Old patch file should not have been deleted')
|
||||
self.assertExists(os.path.join(olddir, backportedpatchfn), 'Old backported patch file should not have been deleted')
|
||||
newdir = os.path.join(newrecipedir, recipe + '-' + newversion)
|
||||
self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
|
||||
self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
|
||||
self.assertNotExists(os.path.join(newdir, backportedpatchfn), 'Backported patch file should not have been copied into new directory but was')
|
||||
self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
|
||||
with open(newrecipefile, 'r') as f:
|
||||
newcontent = f.read()
|
||||
self.assertNotIn(backportedpatchfn, newcontent, "Backported patch should have been removed from the recipe but wasn't")
|
||||
self.assertIn(patchfn, newcontent, "Old patch should have not been removed from the recipe but was")
|
||||
self.assertIn("0002-Add-a-comment-to-the-code.patch", newcontent, "New patch should have been added to the recipe but wasn't")
|
||||
self.assertIn("http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz", newcontent, "New recipe no longer has upstream source in SRC_URI")
|
||||
|
||||
def _setup_test_devtool_finish_modify(self):
|
||||
# Check preconditions
|
||||
|
||||
@@ -5,11 +5,16 @@
|
||||
|
||||
from oeqa.selftest.case import OESelftestTestCase
|
||||
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
|
||||
import bb.utils
|
||||
import functools
|
||||
import multiprocessing
|
||||
import textwrap
|
||||
import json
|
||||
import unittest
|
||||
import tempfile
|
||||
import shutil
|
||||
import stat
|
||||
import os
|
||||
|
||||
MISSING = 'MISSING'
|
||||
DIFFERENT = 'DIFFERENT'
|
||||
@@ -74,6 +79,7 @@ def compare_file(reference, test, diffutils_sysroot):
|
||||
class ReproducibleTests(OESelftestTestCase):
|
||||
package_classes = ['deb', 'ipk']
|
||||
images = ['core-image-minimal']
|
||||
save_results = False
|
||||
|
||||
def setUpLocal(self):
|
||||
super().setUpLocal()
|
||||
@@ -117,9 +123,18 @@ class ReproducibleTests(OESelftestTestCase):
|
||||
self.extrasresults['reproducible']['files'].setdefault(package_class, {})[name] = [
|
||||
{'reference': p.reference, 'test': p.test} for p in packages]
|
||||
|
||||
def copy_file(self, source, dest):
|
||||
bb.utils.mkdirhier(os.path.dirname(dest))
|
||||
shutil.copyfile(source, dest)
|
||||
|
||||
def test_reproducible_builds(self):
|
||||
capture_vars = ['DEPLOY_DIR_' + c.upper() for c in self.package_classes]
|
||||
|
||||
if self.save_results:
|
||||
save_dir = tempfile.mkdtemp(prefix='oe-reproducible-')
|
||||
os.chmod(save_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
|
||||
self.logger.info('Non-reproducible packages will be copied to %s', save_dir)
|
||||
|
||||
# Build native utilities
|
||||
self.write_config('')
|
||||
bitbake("diffutils-native -c addto_recipe_sysroot")
|
||||
@@ -176,6 +191,11 @@ class ReproducibleTests(OESelftestTestCase):
|
||||
self.write_package_list(package_class, 'different', result.different)
|
||||
self.write_package_list(package_class, 'same', result.same)
|
||||
|
||||
if self.save_results:
|
||||
for d in result.different:
|
||||
self.copy_file(d.reference, '/'.join([save_dir, d.reference]))
|
||||
self.copy_file(d.test, '/'.join([save_dir, d.test]))
|
||||
|
||||
if result.missing or result.different:
|
||||
self.fail("The following %s packages are missing or different: %s" %
|
||||
(c, ' '.join(r.test for r in (result.missing + result.different))))
|
||||
|
||||
@@ -179,6 +179,8 @@ class TestImage(OESelftestTestCase):
|
||||
distro = oe.lsb.distro_identifier()
|
||||
if distro and distro == 'debian-8':
|
||||
self.skipTest('virgl isn\'t working with Debian 8')
|
||||
if distro and distro == 'centos-7':
|
||||
self.skipTest('virgl isn\'t working with Centos 7')
|
||||
|
||||
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
|
||||
features = 'INHERIT += "testimage"\n'
|
||||
|
||||
@@ -255,6 +255,7 @@ BUILD_ARCH = "x86_64"
|
||||
BUILD_OS = "linux"
|
||||
SDKMACHINE = "x86_64"
|
||||
PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
|
||||
bitbake("core-image-sato -S none")
|
||||
@@ -266,6 +267,7 @@ BUILD_ARCH = "i686"
|
||||
BUILD_OS = "linux"
|
||||
SDKMACHINE = "i686"
|
||||
PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
|
||||
bitbake("core-image-sato -S none")
|
||||
@@ -298,6 +300,7 @@ PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
|
||||
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
|
||||
TCLIBCAPPEND = \"\"
|
||||
NATIVELSBSTRING = \"DistroA\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
|
||||
bitbake("core-image-sato -S none")
|
||||
@@ -305,6 +308,7 @@ NATIVELSBSTRING = \"DistroA\"
|
||||
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
|
||||
TCLIBCAPPEND = \"\"
|
||||
NATIVELSBSTRING = \"DistroB\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
|
||||
bitbake("core-image-sato -S none")
|
||||
@@ -332,11 +336,13 @@ NATIVELSBSTRING = \"DistroB\"
|
||||
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
|
||||
TCLIBCAPPEND = \"\"
|
||||
MACHINE = \"qemux86-64\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
"""
|
||||
configB = """
|
||||
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
|
||||
TCLIBCAPPEND = \"\"
|
||||
MACHINE = \"qemuarm\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
"""
|
||||
self.sstate_allarch_samesigs(configA, configB)
|
||||
|
||||
@@ -352,6 +358,7 @@ MACHINE = \"qemux86-64\"
|
||||
require conf/multilib.conf
|
||||
MULTILIBS = \"multilib:lib32\"
|
||||
DEFAULTTUNE_virtclass-multilib-lib32 = \"x86\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
"""
|
||||
configB = """
|
||||
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
|
||||
@@ -359,6 +366,7 @@ TCLIBCAPPEND = \"\"
|
||||
MACHINE = \"qemuarm\"
|
||||
require conf/multilib.conf
|
||||
MULTILIBS = \"\"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
"""
|
||||
self.sstate_allarch_samesigs(configA, configB)
|
||||
|
||||
@@ -404,6 +412,7 @@ MACHINE = \"qemux86\"
|
||||
require conf/multilib.conf
|
||||
MULTILIBS = "multilib:lib32"
|
||||
DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
|
||||
bitbake("world meta-toolchain -S none")
|
||||
@@ -414,6 +423,7 @@ MACHINE = \"qemux86copy\"
|
||||
require conf/multilib.conf
|
||||
MULTILIBS = "multilib:lib32"
|
||||
DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
|
||||
bitbake("world meta-toolchain -S none")
|
||||
@@ -452,6 +462,7 @@ TIME = "111111"
|
||||
DATE = "20161111"
|
||||
INHERIT_remove = "buildstats-summary buildhistory uninative"
|
||||
http_proxy = ""
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
|
||||
self.track_for_cleanup(self.topdir + "/download1")
|
||||
@@ -468,6 +479,7 @@ DATE = "20161212"
|
||||
INHERIT_remove = "uninative"
|
||||
INHERIT += "buildstats-summary buildhistory"
|
||||
http_proxy = "http://example.com/"
|
||||
BB_SIGNATURE_HANDLER = "OEBasicHash"
|
||||
""")
|
||||
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
|
||||
self.track_for_cleanup(self.topdir + "/download2")
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
Backport patch to fix CVE-2019-6471.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2019-6471
|
||||
|
||||
CVE: CVE-2019-6471
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/3a9c7bb]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From 3a9c7bb80d4a609b86427406d9dd783199920b5b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Andrews <marka@isc.org>
|
||||
Date: Tue, 19 Mar 2019 14:14:21 +1100
|
||||
Subject: [PATCH] move item_out test inside lock in dns_dispatch_getnext()
|
||||
|
||||
(cherry picked from commit 60c42f849d520564ed42e5ed0ba46b4b69c07712)
|
||||
---
|
||||
lib/dns/dispatch.c | 12 ++++++++----
|
||||
1 file changed, 8 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c
|
||||
index 408beda367..3278db4a07 100644
|
||||
--- a/lib/dns/dispatch.c
|
||||
+++ b/lib/dns/dispatch.c
|
||||
@@ -134,7 +134,7 @@ struct dns_dispentry {
|
||||
isc_task_t *task;
|
||||
isc_taskaction_t action;
|
||||
void *arg;
|
||||
- bool item_out;
|
||||
+ bool item_out;
|
||||
dispsocket_t *dispsocket;
|
||||
ISC_LIST(dns_dispatchevent_t) items;
|
||||
ISC_LINK(dns_dispentry_t) link;
|
||||
@@ -3422,13 +3422,14 @@ dns_dispatch_getnext(dns_dispentry_t *resp, dns_dispatchevent_t **sockevent) {
|
||||
disp = resp->disp;
|
||||
REQUIRE(VALID_DISPATCH(disp));
|
||||
|
||||
- REQUIRE(resp->item_out == true);
|
||||
- resp->item_out = false;
|
||||
-
|
||||
ev = *sockevent;
|
||||
*sockevent = NULL;
|
||||
|
||||
LOCK(&disp->lock);
|
||||
+
|
||||
+ REQUIRE(resp->item_out == true);
|
||||
+ resp->item_out = false;
|
||||
+
|
||||
if (ev->buffer.base != NULL)
|
||||
free_buffer(disp, ev->buffer.base, ev->buffer.length);
|
||||
free_devent(disp, ev);
|
||||
@@ -3573,6 +3574,9 @@ dns_dispatch_removeresponse(dns_dispentry_t **resp,
|
||||
isc_task_send(disp->task[0], &disp->ctlevent);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * disp must be locked.
|
||||
+ */
|
||||
static void
|
||||
do_cancel(dns_dispatch_t *disp) {
|
||||
dns_dispatchevent_t *ev;
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ec2d50d]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From ec2d50da8d81814640e28593d912f4b96c7efece Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
|
||||
Date: Thu, 3 Jan 2019 14:17:43 +0100
|
||||
Subject: [PATCH 1/6] fix enforcement of tcp-clients (v1)
|
||||
|
||||
tcp-clients settings could be exceeded in some cases by
|
||||
creating more and more active TCP clients that are over
|
||||
the set quota limit, which in the end could lead to a
|
||||
DoS attack by e.g. exhaustion of file descriptors.
|
||||
|
||||
If TCP client we're closing went over the quota (so it's
|
||||
not attached to a quota) mark it as mortal - so that it
|
||||
will be destroyed and not set up to listen for new
|
||||
connections - unless it's the last client for a specific
|
||||
interface.
|
||||
|
||||
(cherry picked from commit f97131d21b97381cef72b971b157345c1f9b4115)
|
||||
(cherry picked from commit 9689ffc485df8f971f0ad81ab8ab1f5389493776)
|
||||
---
|
||||
bin/named/client.c | 13 ++++++++++++-
|
||||
1 file changed, 12 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index d482da7121..0739dd48af 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -421,8 +421,19 @@ exit_check(ns_client_t *client) {
|
||||
isc_socket_detach(&client->tcpsocket);
|
||||
}
|
||||
|
||||
- if (client->tcpquota != NULL)
|
||||
+ if (client->tcpquota != NULL) {
|
||||
isc_quota_detach(&client->tcpquota);
|
||||
+ } else {
|
||||
+ /*
|
||||
+ * We went over quota with this client, we don't
|
||||
+ * want to restart listening unless this is the
|
||||
+ * last client on this interface, which is
|
||||
+ * checked later.
|
||||
+ */
|
||||
+ if (TCP_CLIENT(client)) {
|
||||
+ client->mortal = true;
|
||||
+ }
|
||||
+ }
|
||||
|
||||
if (client->timerset) {
|
||||
(void)isc_timer_reset(client->timer,
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,670 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
|
||||
Date: Fri, 4 Jan 2019 12:50:51 +0100
|
||||
Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2)
|
||||
|
||||
the TCP client quota could still be ineffective under some
|
||||
circumstances. this change:
|
||||
|
||||
- improves quota accounting to ensure that TCP clients are
|
||||
properly limited, while still guaranteeing that at least one client
|
||||
is always available to serve TCP connections on each interface.
|
||||
- uses more descriptive names and removes one (ntcptarget) that
|
||||
was no longer needed
|
||||
- adds comments
|
||||
|
||||
(cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2)
|
||||
(cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396)
|
||||
---
|
||||
bin/named/client.c | 311 ++++++++++++++++++++-----
|
||||
bin/named/include/named/client.h | 14 +-
|
||||
bin/named/include/named/interfacemgr.h | 11 +-
|
||||
bin/named/interfacemgr.c | 8 +-
|
||||
4 files changed, 267 insertions(+), 77 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index 0739dd48af..a7b49a0f71 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
|
||||
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
dns_dispatch_t *disp, bool tcp);
|
||||
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
- isc_socket_t *sock);
|
||||
+ isc_socket_t *sock, ns_client_t *oldclient);
|
||||
static inline bool
|
||||
-allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr,
|
||||
- uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl);
|
||||
+allowed(isc_netaddr_t *addr, dns_name_t *signer,
|
||||
+ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
|
||||
+ uint8_t *ecs_scope, dns_acl_t *acl)
|
||||
static void compute_cookie(ns_client_t *client, uint32_t when,
|
||||
uint32_t nonce, const unsigned char *secret,
|
||||
isc_buffer_t *buf);
|
||||
@@ -405,8 +406,11 @@ exit_check(ns_client_t *client) {
|
||||
*/
|
||||
INSIST(client->recursionquota == NULL);
|
||||
INSIST(client->newstate <= NS_CLIENTSTATE_READY);
|
||||
- if (client->nreads > 0)
|
||||
+
|
||||
+ if (client->nreads > 0) {
|
||||
dns_tcpmsg_cancelread(&client->tcpmsg);
|
||||
+ }
|
||||
+
|
||||
if (client->nreads != 0) {
|
||||
/* Still waiting for read cancel completion. */
|
||||
return (true);
|
||||
@@ -416,25 +420,58 @@ exit_check(ns_client_t *client) {
|
||||
dns_tcpmsg_invalidate(&client->tcpmsg);
|
||||
client->tcpmsg_valid = false;
|
||||
}
|
||||
+
|
||||
if (client->tcpsocket != NULL) {
|
||||
CTRACE("closetcp");
|
||||
isc_socket_detach(&client->tcpsocket);
|
||||
+
|
||||
+ if (client->tcpactive) {
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ INSIST(client->interface->ntcpactive > 0);
|
||||
+ client->interface->ntcpactive--;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+ client->tcpactive = false;
|
||||
+ }
|
||||
}
|
||||
|
||||
if (client->tcpquota != NULL) {
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
- } else {
|
||||
/*
|
||||
- * We went over quota with this client, we don't
|
||||
- * want to restart listening unless this is the
|
||||
- * last client on this interface, which is
|
||||
- * checked later.
|
||||
+ * If we are not in a pipeline group, or
|
||||
+ * we are the last client in the group, detach from
|
||||
+ * tcpquota; otherwise, transfer the quota to
|
||||
+ * another client in the same group.
|
||||
*/
|
||||
- if (TCP_CLIENT(client)) {
|
||||
- client->mortal = true;
|
||||
+ if (!ISC_LINK_LINKED(client, glink) ||
|
||||
+ (client->glink.next == NULL &&
|
||||
+ client->glink.prev == NULL))
|
||||
+ {
|
||||
+ isc_quota_detach(&client->tcpquota);
|
||||
+ } else if (client->glink.next != NULL) {
|
||||
+ INSIST(client->glink.next->tcpquota == NULL);
|
||||
+ client->glink.next->tcpquota = client->tcpquota;
|
||||
+ client->tcpquota = NULL;
|
||||
+ } else {
|
||||
+ INSIST(client->glink.prev->tcpquota == NULL);
|
||||
+ client->glink.prev->tcpquota = client->tcpquota;
|
||||
+ client->tcpquota = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Unlink from pipeline group.
|
||||
+ */
|
||||
+ if (ISC_LINK_LINKED(client, glink)) {
|
||||
+ if (client->glink.next != NULL) {
|
||||
+ client->glink.next->glink.prev =
|
||||
+ client->glink.prev;
|
||||
+ }
|
||||
+ if (client->glink.prev != NULL) {
|
||||
+ client->glink.prev->glink.next =
|
||||
+ client->glink.next;
|
||||
+ }
|
||||
+ ISC_LINK_INIT(client, glink);
|
||||
+ }
|
||||
+
|
||||
if (client->timerset) {
|
||||
(void)isc_timer_reset(client->timer,
|
||||
isc_timertype_inactive,
|
||||
@@ -455,15 +492,16 @@ exit_check(ns_client_t *client) {
|
||||
* that already. Check whether this client needs to remain
|
||||
* active and force it to go inactive if not.
|
||||
*
|
||||
- * UDP clients go inactive at this point, but TCP clients
|
||||
- * may remain active if we have fewer active TCP client
|
||||
- * objects than desired due to an earlier quota exhaustion.
|
||||
+ * UDP clients go inactive at this point, but a TCP client
|
||||
+ * will needs to remain active if no other clients are
|
||||
+ * listening for TCP requests on this interface, to
|
||||
+ * prevent this interface from going nonresponsive.
|
||||
*/
|
||||
if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
|
||||
LOCK(&client->interface->lock);
|
||||
- if (client->interface->ntcpcurrent <
|
||||
- client->interface->ntcptarget)
|
||||
+ if (client->interface->ntcpaccepting == 0) {
|
||||
client->mortal = false;
|
||||
+ }
|
||||
UNLOCK(&client->interface->lock);
|
||||
}
|
||||
|
||||
@@ -472,15 +510,17 @@ exit_check(ns_client_t *client) {
|
||||
* queue for recycling.
|
||||
*/
|
||||
if (client->mortal) {
|
||||
- if (client->newstate > NS_CLIENTSTATE_INACTIVE)
|
||||
+ if (client->newstate > NS_CLIENTSTATE_INACTIVE) {
|
||||
client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
+ }
|
||||
}
|
||||
|
||||
if (NS_CLIENTSTATE_READY == client->newstate) {
|
||||
if (TCP_CLIENT(client)) {
|
||||
client_accept(client);
|
||||
- } else
|
||||
+ } else {
|
||||
client_udprecv(client);
|
||||
+ }
|
||||
client->newstate = NS_CLIENTSTATE_MAX;
|
||||
return (true);
|
||||
}
|
||||
@@ -492,41 +532,57 @@ exit_check(ns_client_t *client) {
|
||||
/*
|
||||
* We are trying to enter the inactive state.
|
||||
*/
|
||||
- if (client->naccepts > 0)
|
||||
+ if (client->naccepts > 0) {
|
||||
isc_socket_cancel(client->tcplistener, client->task,
|
||||
ISC_SOCKCANCEL_ACCEPT);
|
||||
+ }
|
||||
|
||||
/* Still waiting for accept cancel completion. */
|
||||
- if (! (client->naccepts == 0))
|
||||
+ if (! (client->naccepts == 0)) {
|
||||
return (true);
|
||||
+ }
|
||||
|
||||
/* Accept cancel is complete. */
|
||||
- if (client->nrecvs > 0)
|
||||
+ if (client->nrecvs > 0) {
|
||||
isc_socket_cancel(client->udpsocket, client->task,
|
||||
ISC_SOCKCANCEL_RECV);
|
||||
+ }
|
||||
|
||||
/* Still waiting for recv cancel completion. */
|
||||
- if (! (client->nrecvs == 0))
|
||||
+ if (! (client->nrecvs == 0)) {
|
||||
return (true);
|
||||
+ }
|
||||
|
||||
/* Still waiting for control event to be delivered */
|
||||
- if (client->nctls > 0)
|
||||
+ if (client->nctls > 0) {
|
||||
return (true);
|
||||
-
|
||||
- /* Deactivate the client. */
|
||||
- if (client->interface)
|
||||
- ns_interface_detach(&client->interface);
|
||||
+ }
|
||||
|
||||
INSIST(client->naccepts == 0);
|
||||
INSIST(client->recursionquota == NULL);
|
||||
- if (client->tcplistener != NULL)
|
||||
+ if (client->tcplistener != NULL) {
|
||||
isc_socket_detach(&client->tcplistener);
|
||||
|
||||
- if (client->udpsocket != NULL)
|
||||
+ if (client->tcpactive) {
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ INSIST(client->interface->ntcpactive > 0);
|
||||
+ client->interface->ntcpactive--;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+ client->tcpactive = false;
|
||||
+ }
|
||||
+ }
|
||||
+ if (client->udpsocket != NULL) {
|
||||
isc_socket_detach(&client->udpsocket);
|
||||
+ }
|
||||
|
||||
- if (client->dispatch != NULL)
|
||||
+ /* Deactivate the client. */
|
||||
+ if (client->interface != NULL) {
|
||||
+ ns_interface_detach(&client->interface);
|
||||
+ }
|
||||
+
|
||||
+ if (client->dispatch != NULL) {
|
||||
dns_dispatch_detach(&client->dispatch);
|
||||
+ }
|
||||
|
||||
client->attributes = 0;
|
||||
client->mortal = false;
|
||||
@@ -551,10 +607,13 @@ exit_check(ns_client_t *client) {
|
||||
client->newstate = NS_CLIENTSTATE_MAX;
|
||||
if (!ns_g_clienttest && manager != NULL &&
|
||||
!manager->exiting)
|
||||
+ {
|
||||
ISC_QUEUE_PUSH(manager->inactive, client,
|
||||
ilink);
|
||||
- if (client->needshutdown)
|
||||
+ }
|
||||
+ if (client->needshutdown) {
|
||||
isc_task_shutdown(client->task);
|
||||
+ }
|
||||
return (true);
|
||||
}
|
||||
}
|
||||
@@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) {
|
||||
}
|
||||
}
|
||||
|
||||
-
|
||||
/*%
|
||||
* The client's task has received a shutdown event.
|
||||
*/
|
||||
@@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) {
|
||||
/*
|
||||
* Pipeline TCP query processing.
|
||||
*/
|
||||
- if (client->message->opcode != dns_opcode_query)
|
||||
+ if (client->message->opcode != dns_opcode_query) {
|
||||
client->pipelined = false;
|
||||
+ }
|
||||
if (TCP_CLIENT(client) && client->pipelined) {
|
||||
- result = isc_quota_reserve(&ns_g_server->tcpquota);
|
||||
- if (result == ISC_R_SUCCESS)
|
||||
- result = ns_client_replace(client);
|
||||
+ result = ns_client_replace(client);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
|
||||
- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
|
||||
- "no more TCP clients(read): %s",
|
||||
- isc_result_totext(result));
|
||||
client->pipelined = false;
|
||||
}
|
||||
}
|
||||
@@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
client->filter_aaaa = dns_aaaa_ok;
|
||||
#endif
|
||||
client->needshutdown = ns_g_clienttest;
|
||||
+ client->tcpactive = false;
|
||||
|
||||
ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL,
|
||||
NS_EVENT_CLIENTCONTROL, client_start, client, client,
|
||||
@@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
client->formerrcache.id = 0;
|
||||
ISC_LINK_INIT(client, link);
|
||||
ISC_LINK_INIT(client, rlink);
|
||||
+ ISC_LINK_INIT(client, glink);
|
||||
ISC_QLINK_INIT(client, ilink);
|
||||
client->keytag = NULL;
|
||||
client->keytag_len = 0;
|
||||
@@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
|
||||
INSIST(client->state == NS_CLIENTSTATE_READY);
|
||||
|
||||
+ /*
|
||||
+ * The accept() was successful and we're now establishing a new
|
||||
+ * connection. We need to make note of it in the client and
|
||||
+ * interface objects so client objects can do the right thing
|
||||
+ * when going inactive in exit_check() (see comments in
|
||||
+ * client_accept() for details).
|
||||
+ */
|
||||
INSIST(client->naccepts == 1);
|
||||
client->naccepts--;
|
||||
|
||||
LOCK(&client->interface->lock);
|
||||
- INSIST(client->interface->ntcpcurrent > 0);
|
||||
- client->interface->ntcpcurrent--;
|
||||
+ INSIST(client->interface->ntcpaccepting > 0);
|
||||
+ client->interface->ntcpaccepting--;
|
||||
UNLOCK(&client->interface->lock);
|
||||
|
||||
/*
|
||||
@@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
|
||||
"accept failed: %s",
|
||||
isc_result_totext(nevent->result));
|
||||
+ if (client->tcpquota != NULL) {
|
||||
+ isc_quota_detach(&client->tcpquota);
|
||||
+ }
|
||||
}
|
||||
|
||||
if (exit_check(client))
|
||||
@@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
* deny service to legitimate TCP clients.
|
||||
*/
|
||||
client->pipelined = false;
|
||||
- result = isc_quota_attach(&ns_g_server->tcpquota,
|
||||
- &client->tcpquota);
|
||||
- if (result == ISC_R_SUCCESS)
|
||||
- result = ns_client_replace(client);
|
||||
- if (result != ISC_R_SUCCESS) {
|
||||
- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
|
||||
- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
|
||||
- "no more TCP clients(accept): %s",
|
||||
- isc_result_totext(result));
|
||||
- } else if (ns_g_server->keepresporder == NULL ||
|
||||
- !allowed(&netaddr, NULL, NULL, 0, NULL,
|
||||
- ns_g_server->keepresporder)) {
|
||||
+ result = ns_client_replace(client);
|
||||
+ if (result == ISC_R_SUCCESS &&
|
||||
+ (client->sctx->keepresporder == NULL ||
|
||||
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
|
||||
+ ns_g_server->keepresporder)))
|
||||
+ {
|
||||
client->pipelined = true;
|
||||
}
|
||||
|
||||
@@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) {
|
||||
|
||||
CTRACE("accept");
|
||||
|
||||
+ /*
|
||||
+ * The tcpquota object can only be simultaneously referenced a
|
||||
+ * pre-defined number of times; this is configured by 'tcp-clients'
|
||||
+ * in named.conf. If we can't attach to it here, that means the TCP
|
||||
+ * client quota has been exceeded.
|
||||
+ */
|
||||
+ result = isc_quota_attach(&client->sctx->tcpquota,
|
||||
+ &client->tcpquota);
|
||||
+ if (result != ISC_R_SUCCESS) {
|
||||
+ bool exit;
|
||||
+
|
||||
+ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
|
||||
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
|
||||
+ "no more TCP clients: %s",
|
||||
+ isc_result_totext(result));
|
||||
+
|
||||
+ /*
|
||||
+ * We have exceeded the system-wide TCP client
|
||||
+ * quota. But, we can't just block this accept
|
||||
+ * in all cases, because if we did, a heavy TCP
|
||||
+ * load on other interfaces might cause this
|
||||
+ * interface to be starved, with no clients able
|
||||
+ * to accept new connections.
|
||||
+ *
|
||||
+ * So, we check here to see if any other client
|
||||
+ * is already servicing TCP queries on this
|
||||
+ * interface (whether accepting, reading, or
|
||||
+ * processing).
|
||||
+ *
|
||||
+ * If so, then it's okay *not* to call
|
||||
+ * accept - we can let this client to go inactive
|
||||
+ * and the other one handle the next connection
|
||||
+ * when it's ready.
|
||||
+ *
|
||||
+ * But if not, then we need to be a little bit
|
||||
+ * flexible about the quota. We allow *one* extra
|
||||
+ * TCP client through, to ensure we're listening on
|
||||
+ * every interface.
|
||||
+ *
|
||||
+ * (Note: In practice this means that the *real*
|
||||
+ * TCP client quota is tcp-clients plus the number
|
||||
+ * of interfaces.)
|
||||
+ */
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ exit = (client->interface->ntcpactive > 0);
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+
|
||||
+ if (exit) {
|
||||
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
+ (void)exit_check(client);
|
||||
+ return;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * By incrementing the interface's ntcpactive counter we signal
|
||||
+ * that there is at least one client servicing TCP queries for the
|
||||
+ * interface.
|
||||
+ *
|
||||
+ * We also make note of the fact in the client itself with the
|
||||
+ * tcpactive flag. This ensures proper accounting by preventing
|
||||
+ * us from accidentally incrementing or decrementing ntcpactive
|
||||
+ * more than once per client object.
|
||||
+ */
|
||||
+ if (!client->tcpactive) {
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ client->interface->ntcpactive++;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+ client->tcpactive = true;
|
||||
+ }
|
||||
+
|
||||
result = isc_socket_accept(client->tcplistener, client->task,
|
||||
client_newconn, client);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
- UNEXPECTED_ERROR(__FILE__, __LINE__,
|
||||
- "isc_socket_accept() failed: %s",
|
||||
- isc_result_totext(result));
|
||||
/*
|
||||
* XXXRTH What should we do? We're trying to accept but
|
||||
* it didn't work. If we just give up, then TCP
|
||||
@@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) {
|
||||
*
|
||||
* For now, we just go idle.
|
||||
*/
|
||||
+ UNEXPECTED_ERROR(__FILE__, __LINE__,
|
||||
+ "isc_socket_accept() failed: %s",
|
||||
+ isc_result_totext(result));
|
||||
+ if (client->tcpquota != NULL) {
|
||||
+ isc_quota_detach(&client->tcpquota);
|
||||
+ }
|
||||
return;
|
||||
}
|
||||
+
|
||||
+ /*
|
||||
+ * The client's 'naccepts' counter indicates that this client has
|
||||
+ * called accept() and is waiting for a new connection. It should
|
||||
+ * never exceed 1.
|
||||
+ */
|
||||
INSIST(client->naccepts == 0);
|
||||
client->naccepts++;
|
||||
+
|
||||
+ /*
|
||||
+ * The interface's 'ntcpaccepting' counter is incremented when
|
||||
+ * any client calls accept(), and decremented in client_newconn()
|
||||
+ * once the connection is established.
|
||||
+ *
|
||||
+ * When the client object is shutting down after handling a TCP
|
||||
+ * request (see exit_check()), it looks to see whether this value is
|
||||
+ * non-zero. If so, that means another client has already called
|
||||
+ * accept() and is waiting to establish the next connection, which
|
||||
+ * means the first client is free to go inactive. Otherwise,
|
||||
+ * the first client must come back and call accept() again; this
|
||||
+ * guarantees there will always be at least one client listening
|
||||
+ * for new TCP connections on each interface.
|
||||
+ */
|
||||
LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpcurrent++;
|
||||
+ client->interface->ntcpaccepting++;
|
||||
UNLOCK(&client->interface->lock);
|
||||
}
|
||||
|
||||
@@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) {
|
||||
tcp = TCP_CLIENT(client);
|
||||
if (tcp && client->pipelined) {
|
||||
result = get_worker(client->manager, client->interface,
|
||||
- client->tcpsocket);
|
||||
+ client->tcpsocket, client);
|
||||
} else {
|
||||
result = get_client(client->manager, client->interface,
|
||||
client->dispatch, tcp);
|
||||
}
|
||||
- if (result != ISC_R_SUCCESS)
|
||||
+ if (result != ISC_R_SUCCESS) {
|
||||
return (result);
|
||||
+ }
|
||||
|
||||
/*
|
||||
* The responsibility for listening for new requests is hereby
|
||||
@@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
client->attributes |= NS_CLIENTATTR_TCP;
|
||||
isc_socket_attach(ifp->tcpsocket,
|
||||
&client->tcplistener);
|
||||
+
|
||||
} else {
|
||||
isc_socket_t *sock;
|
||||
|
||||
@@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
}
|
||||
|
||||
static isc_result_t
|
||||
-get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
|
||||
+get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
+ ns_client_t *oldclient)
|
||||
{
|
||||
isc_result_t result = ISC_R_SUCCESS;
|
||||
isc_event_t *ev;
|
||||
@@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
|
||||
MTRACE("get worker");
|
||||
|
||||
REQUIRE(manager != NULL);
|
||||
+ REQUIRE(oldclient != NULL);
|
||||
|
||||
if (manager->exiting)
|
||||
return (ISC_R_SHUTTINGDOWN);
|
||||
@@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
|
||||
ns_interface_attach(ifp, &client->interface);
|
||||
client->newstate = client->state = NS_CLIENTSTATE_WORKING;
|
||||
INSIST(client->recursionquota == NULL);
|
||||
- client->tcpquota = &ns_g_server->tcpquota;
|
||||
+
|
||||
+ /*
|
||||
+ * Transfer TCP quota to the new client.
|
||||
+ */
|
||||
+ INSIST(client->tcpquota == NULL);
|
||||
+ INSIST(oldclient->tcpquota != NULL);
|
||||
+ client->tcpquota = oldclient->tcpquota;
|
||||
+ oldclient->tcpquota = NULL;
|
||||
+
|
||||
+ /*
|
||||
+ * Link to a pipeline group, creating it if needed.
|
||||
+ */
|
||||
+ if (!ISC_LINK_LINKED(oldclient, glink)) {
|
||||
+ oldclient->glink.next = NULL;
|
||||
+ oldclient->glink.prev = NULL;
|
||||
+ }
|
||||
+ client->glink.next = oldclient->glink.next;
|
||||
+ client->glink.prev = oldclient;
|
||||
+ if (oldclient->glink.next != NULL) {
|
||||
+ oldclient->glink.next->glink.prev = client;
|
||||
+ }
|
||||
+ oldclient->glink.next = client;
|
||||
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
@@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
|
||||
(void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
|
||||
client->peeraddr_valid = true;
|
||||
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ client->interface->ntcpactive++;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+
|
||||
+ client->tcpactive = true;
|
||||
+
|
||||
INSIST(client->tcpmsg_valid == false);
|
||||
dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
|
||||
client->tcpmsg_valid = true;
|
||||
diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
|
||||
index b23a7b191d..1f7973f9c5 100644
|
||||
--- a/bin/named/include/named/client.h
|
||||
+++ b/bin/named/include/named/client.h
|
||||
@@ -94,7 +94,8 @@ struct ns_client {
|
||||
int nupdates;
|
||||
int nctls;
|
||||
int references;
|
||||
- bool needshutdown; /*
|
||||
+ bool tcpactive;
|
||||
+ bool needshutdown; /*
|
||||
* Used by clienttest to get
|
||||
* the client to go from
|
||||
* inactive to free state
|
||||
@@ -130,9 +131,9 @@ struct ns_client {
|
||||
isc_stdtime_t now;
|
||||
isc_time_t tnow;
|
||||
dns_name_t signername; /*%< [T]SIG key name */
|
||||
- dns_name_t * signer; /*%< NULL if not valid sig */
|
||||
- bool mortal; /*%< Die after handling request */
|
||||
- bool pipelined; /*%< TCP queries not in sequence */
|
||||
+ dns_name_t *signer; /*%< NULL if not valid sig */
|
||||
+ bool mortal; /*%< Die after handling request */
|
||||
+ bool pipelined; /*%< TCP queries not in sequence */
|
||||
isc_quota_t *tcpquota;
|
||||
isc_quota_t *recursionquota;
|
||||
ns_interface_t *interface;
|
||||
@@ -143,8 +144,8 @@ struct ns_client {
|
||||
isc_sockaddr_t destsockaddr;
|
||||
|
||||
isc_netaddr_t ecs_addr; /*%< EDNS client subnet */
|
||||
- uint8_t ecs_addrlen;
|
||||
- uint8_t ecs_scope;
|
||||
+ uint8_t ecs_addrlen;
|
||||
+ uint8_t ecs_scope;
|
||||
|
||||
struct in6_pktinfo pktinfo;
|
||||
isc_dscp_t dscp;
|
||||
@@ -166,6 +167,7 @@ struct ns_client {
|
||||
|
||||
ISC_LINK(ns_client_t) link;
|
||||
ISC_LINK(ns_client_t) rlink;
|
||||
+ ISC_LINK(ns_client_t) glink;
|
||||
ISC_QLINK(ns_client_t) ilink;
|
||||
unsigned char cookie[8];
|
||||
uint32_t expire;
|
||||
diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
|
||||
index 7d1883e1e8..61b08826a6 100644
|
||||
--- a/bin/named/include/named/interfacemgr.h
|
||||
+++ b/bin/named/include/named/interfacemgr.h
|
||||
@@ -77,9 +77,14 @@ struct ns_interface {
|
||||
/*%< UDP dispatchers. */
|
||||
isc_socket_t * tcpsocket; /*%< TCP socket. */
|
||||
isc_dscp_t dscp; /*%< "listen-on" DSCP value */
|
||||
- int ntcptarget; /*%< Desired number of concurrent
|
||||
- TCP accepts */
|
||||
- int ntcpcurrent; /*%< Current ditto, locked */
|
||||
+ int ntcpaccepting; /*%< Number of clients
|
||||
+ ready to accept new
|
||||
+ TCP connections on this
|
||||
+ interface */
|
||||
+ int ntcpactive; /*%< Number of clients
|
||||
+ servicing TCP queries
|
||||
+ (whether accepting or
|
||||
+ connected) */
|
||||
int nudpdispatch; /*%< Number of UDP dispatches */
|
||||
ns_clientmgr_t * clientmgr; /*%< Client manager. */
|
||||
ISC_LINK(ns_interface_t) link;
|
||||
diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
|
||||
index 419927bf54..955096ef47 100644
|
||||
--- a/bin/named/interfacemgr.c
|
||||
+++ b/bin/named/interfacemgr.c
|
||||
@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
|
||||
* connections will be handled in parallel even though there is
|
||||
* only one client initially.
|
||||
*/
|
||||
- ifp->ntcptarget = 1;
|
||||
- ifp->ntcpcurrent = 0;
|
||||
+ ifp->ntcpaccepting = 0;
|
||||
+ ifp->ntcpactive = 0;
|
||||
ifp->nudpdispatch = 0;
|
||||
|
||||
ifp->dscp = -1;
|
||||
@@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) {
|
||||
*/
|
||||
(void)isc_socket_filter(ifp->tcpsocket, "dataready");
|
||||
|
||||
- result = ns_clientmgr_createclients(ifp->clientmgr,
|
||||
- ifp->ntcptarget, ifp,
|
||||
- true);
|
||||
+ result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
||||
"TCP ns_clientmgr_createclients(): %s",
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,278 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/366b4e1]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From 366b4e1ede8aed690e981e07137cb1cb77879c36 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
|
||||
Date: Thu, 17 Jan 2019 15:53:38 +0100
|
||||
Subject: [PATCH 3/6] use reference counter for pipeline groups (v3)
|
||||
|
||||
Track pipeline groups using a shared reference counter
|
||||
instead of a linked list.
|
||||
|
||||
(cherry picked from commit 513afd33eb17d5dc41a3f0d2d38204ef8c5f6f91)
|
||||
(cherry picked from commit 9446629b730c59c4215f08d37fbaf810282fbccb)
|
||||
---
|
||||
bin/named/client.c | 171 ++++++++++++++++++++-----------
|
||||
bin/named/include/named/client.h | 2 +-
|
||||
2 files changed, 110 insertions(+), 63 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index a7b49a0f71..277656cef0 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -299,6 +299,75 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
|
||||
}
|
||||
}
|
||||
|
||||
+/*%
|
||||
+ * Allocate a reference counter that will track the number of client structures
|
||||
+ * using the TCP connection that 'client' called accept() for. This counter
|
||||
+ * will be shared between all client structures associated with this TCP
|
||||
+ * connection.
|
||||
+ */
|
||||
+static void
|
||||
+pipeline_init(ns_client_t *client) {
|
||||
+ isc_refcount_t *refs;
|
||||
+
|
||||
+ REQUIRE(client->pipeline_refs == NULL);
|
||||
+
|
||||
+ /*
|
||||
+ * A global memory context is used for the allocation as different
|
||||
+ * client structures may have different memory contexts assigned and a
|
||||
+ * reference counter allocated here might need to be freed by a
|
||||
+ * different client. The performance impact caused by memory context
|
||||
+ * contention here is expected to be negligible, given that this code
|
||||
+ * is only executed for TCP connections.
|
||||
+ */
|
||||
+ refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
|
||||
+ isc_refcount_init(refs, 1);
|
||||
+ client->pipeline_refs = refs;
|
||||
+}
|
||||
+
|
||||
+/*%
|
||||
+ * Increase the count of client structures using the TCP connection that
|
||||
+ * 'source' is associated with and put a pointer to that count in 'target',
|
||||
+ * thus associating it with the same TCP connection.
|
||||
+ */
|
||||
+static void
|
||||
+pipeline_attach(ns_client_t *source, ns_client_t *target) {
|
||||
+ int old_refs;
|
||||
+
|
||||
+ REQUIRE(source->pipeline_refs != NULL);
|
||||
+ REQUIRE(target->pipeline_refs == NULL);
|
||||
+
|
||||
+ old_refs = isc_refcount_increment(source->pipeline_refs);
|
||||
+ INSIST(old_refs > 0);
|
||||
+ target->pipeline_refs = source->pipeline_refs;
|
||||
+}
|
||||
+
|
||||
+/*%
|
||||
+ * Decrease the count of client structures using the TCP connection that
|
||||
+ * 'client' is associated with. If this is the last client using this TCP
|
||||
+ * connection, free the reference counter and return true; otherwise, return
|
||||
+ * false.
|
||||
+ */
|
||||
+static bool
|
||||
+pipeline_detach(ns_client_t *client) {
|
||||
+ isc_refcount_t *refs;
|
||||
+ int old_refs;
|
||||
+
|
||||
+ REQUIRE(client->pipeline_refs != NULL);
|
||||
+
|
||||
+ refs = client->pipeline_refs;
|
||||
+ client->pipeline_refs = NULL;
|
||||
+
|
||||
+ old_refs = isc_refcount_decrement(refs);
|
||||
+ INSIST(old_refs > 0);
|
||||
+
|
||||
+ if (old_refs == 1) {
|
||||
+ isc_mem_free(client->sctx->mctx, refs);
|
||||
+ return (true);
|
||||
+ }
|
||||
+
|
||||
+ return (false);
|
||||
+}
|
||||
+
|
||||
/*%
|
||||
* Check for a deactivation or shutdown request and take appropriate
|
||||
* action. Returns true if either is in progress; in this case
|
||||
@@ -421,6 +490,40 @@ exit_check(ns_client_t *client) {
|
||||
client->tcpmsg_valid = false;
|
||||
}
|
||||
|
||||
+ if (client->tcpquota != NULL) {
|
||||
+ if (client->pipeline_refs == NULL ||
|
||||
+ pipeline_detach(client))
|
||||
+ {
|
||||
+ /*
|
||||
+ * Only detach from the TCP client quota if
|
||||
+ * there are no more client structures using
|
||||
+ * this TCP connection.
|
||||
+ *
|
||||
+ * Note that we check 'pipeline_refs' and not
|
||||
+ * 'pipelined' because in some cases (e.g.
|
||||
+ * after receiving a request with an opcode
|
||||
+ * different than QUERY) 'pipelined' is set to
|
||||
+ * false after the reference counter gets
|
||||
+ * allocated in pipeline_init() and we must
|
||||
+ * still drop our reference as failing to do so
|
||||
+ * would prevent the reference counter itself
|
||||
+ * from being freed.
|
||||
+ */
|
||||
+ isc_quota_detach(&client->tcpquota);
|
||||
+ } else {
|
||||
+ /*
|
||||
+ * There are other client structures using this
|
||||
+ * TCP connection, so we cannot detach from the
|
||||
+ * TCP client quota to prevent excess TCP
|
||||
+ * connections from being accepted. However,
|
||||
+ * this client structure might later be reused
|
||||
+ * for accepting new connections and thus must
|
||||
+ * have its 'tcpquota' field set to NULL.
|
||||
+ */
|
||||
+ client->tcpquota = NULL;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
if (client->tcpsocket != NULL) {
|
||||
CTRACE("closetcp");
|
||||
isc_socket_detach(&client->tcpsocket);
|
||||
@@ -434,44 +537,6 @@ exit_check(ns_client_t *client) {
|
||||
}
|
||||
}
|
||||
|
||||
- if (client->tcpquota != NULL) {
|
||||
- /*
|
||||
- * If we are not in a pipeline group, or
|
||||
- * we are the last client in the group, detach from
|
||||
- * tcpquota; otherwise, transfer the quota to
|
||||
- * another client in the same group.
|
||||
- */
|
||||
- if (!ISC_LINK_LINKED(client, glink) ||
|
||||
- (client->glink.next == NULL &&
|
||||
- client->glink.prev == NULL))
|
||||
- {
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
- } else if (client->glink.next != NULL) {
|
||||
- INSIST(client->glink.next->tcpquota == NULL);
|
||||
- client->glink.next->tcpquota = client->tcpquota;
|
||||
- client->tcpquota = NULL;
|
||||
- } else {
|
||||
- INSIST(client->glink.prev->tcpquota == NULL);
|
||||
- client->glink.prev->tcpquota = client->tcpquota;
|
||||
- client->tcpquota = NULL;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /*
|
||||
- * Unlink from pipeline group.
|
||||
- */
|
||||
- if (ISC_LINK_LINKED(client, glink)) {
|
||||
- if (client->glink.next != NULL) {
|
||||
- client->glink.next->glink.prev =
|
||||
- client->glink.prev;
|
||||
- }
|
||||
- if (client->glink.prev != NULL) {
|
||||
- client->glink.prev->glink.next =
|
||||
- client->glink.next;
|
||||
- }
|
||||
- ISC_LINK_INIT(client, glink);
|
||||
- }
|
||||
-
|
||||
if (client->timerset) {
|
||||
(void)isc_timer_reset(client->timer,
|
||||
isc_timertype_inactive,
|
||||
@@ -3130,6 +3195,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
dns_name_init(&client->signername, NULL);
|
||||
client->mortal = false;
|
||||
client->pipelined = false;
|
||||
+ client->pipeline_refs = NULL;
|
||||
client->tcpquota = NULL;
|
||||
client->recursionquota = NULL;
|
||||
client->interface = NULL;
|
||||
@@ -3154,7 +3220,6 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
client->formerrcache.id = 0;
|
||||
ISC_LINK_INIT(client, link);
|
||||
ISC_LINK_INIT(client, rlink);
|
||||
- ISC_LINK_INIT(client, glink);
|
||||
ISC_QLINK_INIT(client, ilink);
|
||||
client->keytag = NULL;
|
||||
client->keytag_len = 0;
|
||||
@@ -3341,6 +3406,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
!allowed(&netaddr, NULL, NULL, 0, NULL,
|
||||
ns_g_server->keepresporder)))
|
||||
{
|
||||
+ pipeline_init(client);
|
||||
client->pipelined = true;
|
||||
}
|
||||
|
||||
@@ -3800,35 +3866,16 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
ns_interface_attach(ifp, &client->interface);
|
||||
client->newstate = client->state = NS_CLIENTSTATE_WORKING;
|
||||
INSIST(client->recursionquota == NULL);
|
||||
-
|
||||
- /*
|
||||
- * Transfer TCP quota to the new client.
|
||||
- */
|
||||
- INSIST(client->tcpquota == NULL);
|
||||
- INSIST(oldclient->tcpquota != NULL);
|
||||
- client->tcpquota = oldclient->tcpquota;
|
||||
- oldclient->tcpquota = NULL;
|
||||
-
|
||||
- /*
|
||||
- * Link to a pipeline group, creating it if needed.
|
||||
- */
|
||||
- if (!ISC_LINK_LINKED(oldclient, glink)) {
|
||||
- oldclient->glink.next = NULL;
|
||||
- oldclient->glink.prev = NULL;
|
||||
- }
|
||||
- client->glink.next = oldclient->glink.next;
|
||||
- client->glink.prev = oldclient;
|
||||
- if (oldclient->glink.next != NULL) {
|
||||
- oldclient->glink.next->glink.prev = client;
|
||||
- }
|
||||
- oldclient->glink.next = client;
|
||||
+ client->tcpquota = &client->sctx->tcpquota;
|
||||
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
client->attributes |= NS_CLIENTATTR_TCP;
|
||||
- client->pipelined = true;
|
||||
client->mortal = true;
|
||||
|
||||
+ pipeline_attach(oldclient, client);
|
||||
+ client->pipelined = true;
|
||||
+
|
||||
isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
|
||||
isc_socket_attach(sock, &client->tcpsocket);
|
||||
isc_socket_setname(client->tcpsocket, "worker-tcp", NULL);
|
||||
diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
|
||||
index 1f7973f9c5..aeed9ccdda 100644
|
||||
--- a/bin/named/include/named/client.h
|
||||
+++ b/bin/named/include/named/client.h
|
||||
@@ -134,6 +134,7 @@ struct ns_client {
|
||||
dns_name_t *signer; /*%< NULL if not valid sig */
|
||||
bool mortal; /*%< Die after handling request */
|
||||
bool pipelined; /*%< TCP queries not in sequence */
|
||||
+ isc_refcount_t *pipeline_refs;
|
||||
isc_quota_t *tcpquota;
|
||||
isc_quota_t *recursionquota;
|
||||
ns_interface_t *interface;
|
||||
@@ -167,7 +168,6 @@ struct ns_client {
|
||||
|
||||
ISC_LINK(ns_client_t) link;
|
||||
ISC_LINK(ns_client_t) rlink;
|
||||
- ISC_LINK(ns_client_t) glink;
|
||||
ISC_QLINK(ns_client_t) ilink;
|
||||
unsigned char cookie[8];
|
||||
uint32_t expire;
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,512 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001
|
||||
From: Evan Hunt <each@isc.org>
|
||||
Date: Fri, 5 Apr 2019 16:12:18 -0700
|
||||
Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks
|
||||
|
||||
- ensure that tcpactive is cleaned up correctly when accept() fails.
|
||||
- set 'client->tcpattached' when the client is attached to the tcpquota.
|
||||
carry this value on to new clients sharing the same pipeline group.
|
||||
don't call isc_quota_detach() on the tcpquota unless tcpattached is
|
||||
set. this way clients that were allowed to accept TCP connections
|
||||
despite being over quota (and therefore, were never attached to the
|
||||
quota) will not inadvertently detach from it and mess up the
|
||||
accounting.
|
||||
- simplify the code for tcpquota disconnection by using a new function
|
||||
tcpquota_disconnect().
|
||||
- before deciding whether to reject a new connection due to quota
|
||||
exhaustion, check to see whether there are at least two active
|
||||
clients. previously, this was "at least one", but that could be
|
||||
insufficient if there was one other client in READING state (waiting
|
||||
for messages on an open connection) but none in READY (listening
|
||||
for new connections).
|
||||
- before deciding whether a TCP client object can to go inactive, we
|
||||
must ensure there are enough other clients to maintain service
|
||||
afterward -- both accepting new connections and reading/processing new
|
||||
queries. A TCP client can't shut down unless at least one
|
||||
client is accepting new connections and (in the case of pipelined
|
||||
clients) at least one additional client is waiting to read.
|
||||
|
||||
(cherry picked from commit c7394738b2445c16f728a88394864dd61baad900)
|
||||
(cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856)
|
||||
(cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3)
|
||||
---
|
||||
bin/named/client.c | 244 +++++++++++++++++++------------
|
||||
bin/named/include/named/client.h | 3 +-
|
||||
2 files changed, 152 insertions(+), 95 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index 277656cef0..61e96dd28c 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event);
|
||||
static void client_request(isc_task_t *task, isc_event_t *event);
|
||||
static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
|
||||
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
- dns_dispatch_t *disp, bool tcp);
|
||||
+ dns_dispatch_t *disp, ns_client_t *oldclient,
|
||||
+ bool tcp);
|
||||
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
isc_socket_t *sock, ns_client_t *oldclient);
|
||||
static inline bool
|
||||
allowed(isc_netaddr_t *addr, dns_name_t *signer,
|
||||
isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
|
||||
- uint8_t *ecs_scope, dns_acl_t *acl)
|
||||
+ uint8_t *ecs_scope, dns_acl_t *acl);
|
||||
static void compute_cookie(ns_client_t *client, uint32_t when,
|
||||
uint32_t nonce, const unsigned char *secret,
|
||||
isc_buffer_t *buf);
|
||||
@@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) {
|
||||
* contention here is expected to be negligible, given that this code
|
||||
* is only executed for TCP connections.
|
||||
*/
|
||||
- refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
|
||||
+ refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
|
||||
isc_refcount_init(refs, 1);
|
||||
client->pipeline_refs = refs;
|
||||
}
|
||||
@@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) {
|
||||
*/
|
||||
static void
|
||||
pipeline_attach(ns_client_t *source, ns_client_t *target) {
|
||||
- int old_refs;
|
||||
+ int refs;
|
||||
|
||||
REQUIRE(source->pipeline_refs != NULL);
|
||||
REQUIRE(target->pipeline_refs == NULL);
|
||||
|
||||
- old_refs = isc_refcount_increment(source->pipeline_refs);
|
||||
- INSIST(old_refs > 0);
|
||||
+ isc_refcount_increment(source->pipeline_refs, &refs);
|
||||
+ INSIST(refs > 1);
|
||||
target->pipeline_refs = source->pipeline_refs;
|
||||
}
|
||||
|
||||
@@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) {
|
||||
*/
|
||||
static bool
|
||||
pipeline_detach(ns_client_t *client) {
|
||||
- isc_refcount_t *refs;
|
||||
- int old_refs;
|
||||
+ isc_refcount_t *refcount;
|
||||
+ int refs;
|
||||
|
||||
REQUIRE(client->pipeline_refs != NULL);
|
||||
|
||||
- refs = client->pipeline_refs;
|
||||
+ refcount = client->pipeline_refs;
|
||||
client->pipeline_refs = NULL;
|
||||
|
||||
- old_refs = isc_refcount_decrement(refs);
|
||||
- INSIST(old_refs > 0);
|
||||
+ isc_refcount_decrement(refcount, refs);
|
||||
|
||||
- if (old_refs == 1) {
|
||||
- isc_mem_free(client->sctx->mctx, refs);
|
||||
+ if (refs == 0) {
|
||||
+ isc_mem_free(ns_g_mctx, refs);
|
||||
return (true);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Detach a client from the TCP client quota if appropriate, and set
|
||||
+ * the quota pointer to NULL.
|
||||
+ *
|
||||
+ * Sometimes when the TCP client quota is exhausted but there are no other
|
||||
+ * clients servicing the interface, a client will be allowed to continue
|
||||
+ * running despite not having been attached to the quota. In this event,
|
||||
+ * the TCP quota was never attached to the client, so when the client (or
|
||||
+ * associated pipeline group) shuts down, the quota must NOT be detached.
|
||||
+ *
|
||||
+ * Otherwise, if the quota pointer is set, it should be detached. If not
|
||||
+ * set at all, we just return without doing anything.
|
||||
+ */
|
||||
+static void
|
||||
+tcpquota_disconnect(ns_client_t *client) {
|
||||
+ if (client->tcpquota == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ if (client->tcpattached) {
|
||||
+ isc_quota_detach(&client->tcpquota);
|
||||
+ client->tcpattached = false;
|
||||
+ } else {
|
||||
+ client->tcpquota = NULL;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*%
|
||||
* Check for a deactivation or shutdown request and take appropriate
|
||||
* action. Returns true if either is in progress; in this case
|
||||
@@ -490,38 +517,31 @@ exit_check(ns_client_t *client) {
|
||||
client->tcpmsg_valid = false;
|
||||
}
|
||||
|
||||
- if (client->tcpquota != NULL) {
|
||||
- if (client->pipeline_refs == NULL ||
|
||||
- pipeline_detach(client))
|
||||
- {
|
||||
- /*
|
||||
- * Only detach from the TCP client quota if
|
||||
- * there are no more client structures using
|
||||
- * this TCP connection.
|
||||
- *
|
||||
- * Note that we check 'pipeline_refs' and not
|
||||
- * 'pipelined' because in some cases (e.g.
|
||||
- * after receiving a request with an opcode
|
||||
- * different than QUERY) 'pipelined' is set to
|
||||
- * false after the reference counter gets
|
||||
- * allocated in pipeline_init() and we must
|
||||
- * still drop our reference as failing to do so
|
||||
- * would prevent the reference counter itself
|
||||
- * from being freed.
|
||||
- */
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
- } else {
|
||||
- /*
|
||||
- * There are other client structures using this
|
||||
- * TCP connection, so we cannot detach from the
|
||||
- * TCP client quota to prevent excess TCP
|
||||
- * connections from being accepted. However,
|
||||
- * this client structure might later be reused
|
||||
- * for accepting new connections and thus must
|
||||
- * have its 'tcpquota' field set to NULL.
|
||||
- */
|
||||
- client->tcpquota = NULL;
|
||||
- }
|
||||
+ /*
|
||||
+ * Detach from pipeline group and from TCP client quota,
|
||||
+ * if appropriate.
|
||||
+ *
|
||||
+ * - If no pipeline group is active, attempt to
|
||||
+ * detach from the TCP client quota.
|
||||
+ *
|
||||
+ * - If a pipeline group is active, detach from it;
|
||||
+ * if the return code indicates that there no more
|
||||
+ * clients left if this pipeline group, we also detach
|
||||
+ * from the TCP client quota.
|
||||
+ *
|
||||
+ * - Otherwise we don't try to detach, we just set the
|
||||
+ * TCP quota pointer to NULL if it wasn't NULL already.
|
||||
+ *
|
||||
+ * tcpquota_disconnect() will set tcpquota to NULL, either
|
||||
+ * by detaching it or by assignment, depending on the
|
||||
+ * needs of the client. See the comments on that function
|
||||
+ * for further information.
|
||||
+ */
|
||||
+ if (client->pipeline_refs == NULL || pipeline_detach(client)) {
|
||||
+ tcpquota_disconnect(client);
|
||||
+ } else {
|
||||
+ client->tcpquota = NULL;
|
||||
+ client->tcpattached = false;
|
||||
}
|
||||
|
||||
if (client->tcpsocket != NULL) {
|
||||
@@ -544,8 +564,6 @@ exit_check(ns_client_t *client) {
|
||||
client->timerset = false;
|
||||
}
|
||||
|
||||
- client->pipelined = false;
|
||||
-
|
||||
client->peeraddr_valid = false;
|
||||
|
||||
client->state = NS_CLIENTSTATE_READY;
|
||||
@@ -558,18 +576,27 @@ exit_check(ns_client_t *client) {
|
||||
* active and force it to go inactive if not.
|
||||
*
|
||||
* UDP clients go inactive at this point, but a TCP client
|
||||
- * will needs to remain active if no other clients are
|
||||
- * listening for TCP requests on this interface, to
|
||||
- * prevent this interface from going nonresponsive.
|
||||
+ * may need to remain active and go into ready state if
|
||||
+ * no other clients are available to listen for TCP
|
||||
+ * requests on this interface or (in the case of pipelined
|
||||
+ * clients) to read for additional messages on the current
|
||||
+ * connection.
|
||||
*/
|
||||
if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
|
||||
LOCK(&client->interface->lock);
|
||||
- if (client->interface->ntcpaccepting == 0) {
|
||||
+ if ((client->interface->ntcpaccepting == 0 ||
|
||||
+ (client->pipelined &&
|
||||
+ client->interface->ntcpactive < 2)) &&
|
||||
+ client->newstate != NS_CLIENTSTATE_FREED)
|
||||
+ {
|
||||
client->mortal = false;
|
||||
+ client->newstate = NS_CLIENTSTATE_READY;
|
||||
}
|
||||
UNLOCK(&client->interface->lock);
|
||||
}
|
||||
|
||||
+ client->pipelined = false;
|
||||
+
|
||||
/*
|
||||
* We don't need the client; send it to the inactive
|
||||
* queue for recycling.
|
||||
@@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) {
|
||||
client->pipelined = false;
|
||||
}
|
||||
if (TCP_CLIENT(client) && client->pipelined) {
|
||||
+ /*
|
||||
+ * We're pipelining. Replace the client; the
|
||||
+ * the replacement can read the TCP socket looking
|
||||
+ * for new messages and this client can process the
|
||||
+ * current message asynchronously.
|
||||
+ *
|
||||
+ * There are now at least three clients using this
|
||||
+ * TCP socket - one accepting new connections,
|
||||
+ * one reading an existing connection to get new
|
||||
+ * messages, and one answering the message already
|
||||
+ * received.
|
||||
+ */
|
||||
result = ns_client_replace(client);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
client->pipelined = false;
|
||||
@@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
client->pipelined = false;
|
||||
client->pipeline_refs = NULL;
|
||||
client->tcpquota = NULL;
|
||||
+ client->tcpattached = false;
|
||||
client->recursionquota = NULL;
|
||||
client->interface = NULL;
|
||||
client->peeraddr_valid = false;
|
||||
@@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
|
||||
"accept failed: %s",
|
||||
isc_result_totext(nevent->result));
|
||||
- if (client->tcpquota != NULL) {
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
- }
|
||||
+ tcpquota_disconnect(client);
|
||||
}
|
||||
|
||||
if (exit_check(client))
|
||||
@@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
client->pipelined = false;
|
||||
result = ns_client_replace(client);
|
||||
if (result == ISC_R_SUCCESS &&
|
||||
- (client->sctx->keepresporder == NULL ||
|
||||
+ (ns_g_server->keepresporder == NULL ||
|
||||
!allowed(&netaddr, NULL, NULL, 0, NULL,
|
||||
ns_g_server->keepresporder)))
|
||||
{
|
||||
@@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) {
|
||||
* in named.conf. If we can't attach to it here, that means the TCP
|
||||
* client quota has been exceeded.
|
||||
*/
|
||||
- result = isc_quota_attach(&client->sctx->tcpquota,
|
||||
+ result = isc_quota_attach(&ns_g_server->tcpquota,
|
||||
&client->tcpquota);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
bool exit;
|
||||
@@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) {
|
||||
* interface to be starved, with no clients able
|
||||
* to accept new connections.
|
||||
*
|
||||
- * So, we check here to see if any other client
|
||||
- * is already servicing TCP queries on this
|
||||
+ * So, we check here to see if any other clients
|
||||
+ * are already servicing TCP queries on this
|
||||
* interface (whether accepting, reading, or
|
||||
- * processing).
|
||||
- *
|
||||
- * If so, then it's okay *not* to call
|
||||
- * accept - we can let this client to go inactive
|
||||
- * and the other one handle the next connection
|
||||
- * when it's ready.
|
||||
+ * processing). If there are at least two
|
||||
+ * (one reading and one processing a request)
|
||||
+ * then it's okay *not* to call accept - we
|
||||
+ * can let this client go inactive and another
|
||||
+ * one will resume accepting when it's done.
|
||||
*
|
||||
- * But if not, then we need to be a little bit
|
||||
- * flexible about the quota. We allow *one* extra
|
||||
- * TCP client through, to ensure we're listening on
|
||||
- * every interface.
|
||||
+ * If there aren't enough active clients on the
|
||||
+ * interface, then we can be a little bit
|
||||
+ * flexible about the quota. We'll allow *one*
|
||||
+ * extra client through to ensure we're listening
|
||||
+ * on every interface.
|
||||
*
|
||||
- * (Note: In practice this means that the *real*
|
||||
- * TCP client quota is tcp-clients plus the number
|
||||
- * of interfaces.)
|
||||
+ * (Note: In practice this means that the real
|
||||
+ * TCP client quota is tcp-clients plus the
|
||||
+ * number of listening interfaces plus 2.)
|
||||
*/
|
||||
LOCK(&client->interface->lock);
|
||||
- exit = (client->interface->ntcpactive > 0);
|
||||
+ exit = (client->interface->ntcpactive > 1);
|
||||
UNLOCK(&client->interface->lock);
|
||||
|
||||
if (exit) {
|
||||
@@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) {
|
||||
(void)exit_check(client);
|
||||
return;
|
||||
}
|
||||
+
|
||||
+ } else {
|
||||
+ client->tcpattached = true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) {
|
||||
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
||||
"isc_socket_accept() failed: %s",
|
||||
isc_result_totext(result));
|
||||
- if (client->tcpquota != NULL) {
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
+
|
||||
+ tcpquota_disconnect(client);
|
||||
+
|
||||
+ if (client->tcpactive) {
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ client->interface->ntcpactive--;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+ client->tcpactive = false;
|
||||
}
|
||||
+
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) {
|
||||
* once the connection is established.
|
||||
*
|
||||
* When the client object is shutting down after handling a TCP
|
||||
- * request (see exit_check()), it looks to see whether this value is
|
||||
- * non-zero. If so, that means another client has already called
|
||||
- * accept() and is waiting to establish the next connection, which
|
||||
- * means the first client is free to go inactive. Otherwise,
|
||||
- * the first client must come back and call accept() again; this
|
||||
- * guarantees there will always be at least one client listening
|
||||
- * for new TCP connections on each interface.
|
||||
+ * request (see exit_check()), if this value is at least one, that
|
||||
+ * means another client has called accept() and is waiting to
|
||||
+ * establish the next connection. That means the client may be
|
||||
+ * be free to become inactive; otherwise it may need to start
|
||||
+ * listening for connections itself to prevent the interface
|
||||
+ * going dead.
|
||||
*/
|
||||
LOCK(&client->interface->lock);
|
||||
client->interface->ntcpaccepting++;
|
||||
@@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) {
|
||||
client->tcpsocket, client);
|
||||
} else {
|
||||
result = get_client(client->manager, client->interface,
|
||||
- client->dispatch, tcp);
|
||||
+ client->dispatch, client, tcp);
|
||||
+
|
||||
+ /*
|
||||
+ * The responsibility for listening for new requests is hereby
|
||||
+ * transferred to the new client. Therefore, the old client
|
||||
+ * should refrain from listening for any more requests.
|
||||
+ */
|
||||
+ client->mortal = true;
|
||||
}
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
return (result);
|
||||
}
|
||||
|
||||
- /*
|
||||
- * The responsibility for listening for new requests is hereby
|
||||
- * transferred to the new client. Therefore, the old client
|
||||
- * should refrain from listening for any more requests.
|
||||
- */
|
||||
- client->mortal = true;
|
||||
-
|
||||
return (ISC_R_SUCCESS);
|
||||
}
|
||||
|
||||
@@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
|
||||
|
||||
static isc_result_t
|
||||
get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
- dns_dispatch_t *disp, bool tcp)
|
||||
+ dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
|
||||
{
|
||||
isc_result_t result = ISC_R_SUCCESS;
|
||||
isc_event_t *ev;
|
||||
@@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
if (tcp) {
|
||||
+ client->tcpattached = false;
|
||||
+ if (oldclient != NULL) {
|
||||
+ client->tcpattached = oldclient->tcpattached;
|
||||
+ }
|
||||
+
|
||||
+ LOCK(&client->interface->lock);
|
||||
+ client->interface->ntcpactive++;
|
||||
+ UNLOCK(&client->interface->lock);
|
||||
+ client->tcpactive = true;
|
||||
+
|
||||
client->attributes |= NS_CLIENTATTR_TCP;
|
||||
isc_socket_attach(ifp->tcpsocket,
|
||||
&client->tcplistener);
|
||||
@@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
ns_interface_attach(ifp, &client->interface);
|
||||
client->newstate = client->state = NS_CLIENTSTATE_WORKING;
|
||||
INSIST(client->recursionquota == NULL);
|
||||
- client->tcpquota = &client->sctx->tcpquota;
|
||||
+ client->tcpquota = &ns_g_server->tcpquota;
|
||||
+ client->tcpattached = oldclient->tcpattached;
|
||||
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
@@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
LOCK(&client->interface->lock);
|
||||
client->interface->ntcpactive++;
|
||||
UNLOCK(&client->interface->lock);
|
||||
-
|
||||
client->tcpactive = true;
|
||||
|
||||
INSIST(client->tcpmsg_valid == false);
|
||||
@@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
|
||||
MTRACE("createclients");
|
||||
|
||||
for (disp = 0; disp < n; disp++) {
|
||||
- result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
|
||||
+ result = get_client(manager, ifp, ifp->udpdispatch[disp],
|
||||
+ NULL, tcp);
|
||||
if (result != ISC_R_SUCCESS)
|
||||
break;
|
||||
}
|
||||
diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
|
||||
index aeed9ccdda..e2c40acd28 100644
|
||||
--- a/bin/named/include/named/client.h
|
||||
+++ b/bin/named/include/named/client.h
|
||||
@@ -9,8 +9,6 @@
|
||||
* information regarding copyright ownership.
|
||||
*/
|
||||
|
||||
-/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */
|
||||
-
|
||||
#ifndef NAMED_CLIENT_H
|
||||
#define NAMED_CLIENT_H 1
|
||||
|
||||
@@ -136,6 +134,7 @@ struct ns_client {
|
||||
bool pipelined; /*%< TCP queries not in sequence */
|
||||
isc_refcount_t *pipeline_refs;
|
||||
isc_quota_t *tcpquota;
|
||||
+ bool tcpattached;
|
||||
isc_quota_t *recursionquota;
|
||||
ns_interface_t *interface;
|
||||
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,911 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/c47ccf6]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From c47ccf630f147378568b33e8fdb7b754f228c346 Mon Sep 17 00:00:00 2001
|
||||
From: Evan Hunt <each@isc.org>
|
||||
Date: Fri, 5 Apr 2019 16:26:05 -0700
|
||||
Subject: [PATCH 5/6] refactor tcpquota and pipeline refs; allow special-case
|
||||
overrun in isc_quota
|
||||
|
||||
- if the TCP quota has been exceeded but there are no clients listening
|
||||
for new connections on the interface, we can now force attachment to the
|
||||
quota using isc_quota_force(), instead of carrying on with the quota not
|
||||
attached.
|
||||
- the TCP client quota is now referenced via a reference-counted
|
||||
'ns_tcpconn' object, one of which is created whenever a client begins
|
||||
listening for new connections, and attached to by members of that
|
||||
client's pipeline group. when the last reference to the tcpconn
|
||||
object is detached, it is freed and the TCP quota slot is released.
|
||||
- reduce code duplication by adding mark_tcp_active() function.
|
||||
- convert counters to atomic.
|
||||
|
||||
(cherry picked from commit 7e8222378ca24f1302a0c1c638565050ab04681b)
|
||||
(cherry picked from commit 4939451275722bfda490ea86ca13e84f6bc71e46)
|
||||
(cherry picked from commit 13f7c918b8720d890408f678bd73c20e634539d9)
|
||||
---
|
||||
bin/named/client.c | 444 +++++++++++--------------
|
||||
bin/named/include/named/client.h | 12 +-
|
||||
bin/named/include/named/interfacemgr.h | 6 +-
|
||||
bin/named/interfacemgr.c | 1 +
|
||||
lib/isc/include/isc/quota.h | 7 +
|
||||
lib/isc/quota.c | 33 +-
|
||||
lib/isc/win32/libisc.def.in | 1 +
|
||||
7 files changed, 236 insertions(+), 268 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index 61e96dd28c..d826ab32bf 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -244,8 +244,7 @@ static void client_start(isc_task_t *task, isc_event_t *event);
|
||||
static void client_request(isc_task_t *task, isc_event_t *event);
|
||||
static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
|
||||
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
- dns_dispatch_t *disp, ns_client_t *oldclient,
|
||||
- bool tcp);
|
||||
+ dns_dispatch_t *disp, bool tcp);
|
||||
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
isc_socket_t *sock, ns_client_t *oldclient);
|
||||
static inline bool
|
||||
@@ -301,16 +300,32 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
|
||||
}
|
||||
|
||||
/*%
|
||||
- * Allocate a reference counter that will track the number of client structures
|
||||
- * using the TCP connection that 'client' called accept() for. This counter
|
||||
- * will be shared between all client structures associated with this TCP
|
||||
- * connection.
|
||||
+ * Allocate a reference-counted object that will maintain a single pointer to
|
||||
+ * the (also reference-counted) TCP client quota, shared between all the
|
||||
+ * clients processing queries on a single TCP connection, so that all
|
||||
+ * clients sharing the one socket will together consume only one slot in
|
||||
+ * the 'tcp-clients' quota.
|
||||
*/
|
||||
-static void
|
||||
-pipeline_init(ns_client_t *client) {
|
||||
- isc_refcount_t *refs;
|
||||
+static isc_result_t
|
||||
+tcpconn_init(ns_client_t *client, bool force) {
|
||||
+ isc_result_t result;
|
||||
+ isc_quota_t *quota = NULL;
|
||||
+ ns_tcpconn_t *tconn = NULL;
|
||||
|
||||
- REQUIRE(client->pipeline_refs == NULL);
|
||||
+ REQUIRE(client->tcpconn == NULL);
|
||||
+
|
||||
+ /*
|
||||
+ * Try to attach to the quota first, so we won't pointlessly
|
||||
+ * allocate memory for a tcpconn object if we can't get one.
|
||||
+ */
|
||||
+ if (force) {
|
||||
+ result = isc_quota_force(&ns_g_server->tcpquota, "a);
|
||||
+ } else {
|
||||
+ result = isc_quota_attach(&ns_g_server->tcpquota, "a);
|
||||
+ }
|
||||
+ if (result != ISC_R_SUCCESS) {
|
||||
+ return (result);
|
||||
+ }
|
||||
|
||||
/*
|
||||
* A global memory context is used for the allocation as different
|
||||
@@ -320,78 +335,80 @@ pipeline_init(ns_client_t *client) {
|
||||
* contention here is expected to be negligible, given that this code
|
||||
* is only executed for TCP connections.
|
||||
*/
|
||||
- refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
|
||||
- isc_refcount_init(refs, 1);
|
||||
- client->pipeline_refs = refs;
|
||||
+ tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn));
|
||||
+
|
||||
+ isc_refcount_init(&tconn->refs, 1);
|
||||
+ tconn->tcpquota = quota;
|
||||
+ quota = NULL;
|
||||
+ tconn->pipelined = false;
|
||||
+
|
||||
+ client->tcpconn = tconn;
|
||||
+
|
||||
+ return (ISC_R_SUCCESS);
|
||||
}
|
||||
|
||||
/*%
|
||||
- * Increase the count of client structures using the TCP connection that
|
||||
- * 'source' is associated with and put a pointer to that count in 'target',
|
||||
- * thus associating it with the same TCP connection.
|
||||
+ * Increase the count of client structures sharing the TCP connection
|
||||
+ * that 'source' is associated with; add a pointer to the same tcpconn
|
||||
+ * to 'target', thus associating it with the same TCP connection.
|
||||
*/
|
||||
static void
|
||||
-pipeline_attach(ns_client_t *source, ns_client_t *target) {
|
||||
+tcpconn_attach(ns_client_t *source, ns_client_t *target) {
|
||||
int refs;
|
||||
|
||||
- REQUIRE(source->pipeline_refs != NULL);
|
||||
- REQUIRE(target->pipeline_refs == NULL);
|
||||
+ REQUIRE(source->tcpconn != NULL);
|
||||
+ REQUIRE(target->tcpconn == NULL);
|
||||
+ REQUIRE(source->tcpconn->pipelined);
|
||||
|
||||
- isc_refcount_increment(source->pipeline_refs, &refs);
|
||||
+ isc_refcount_increment(&source->tcpconn->refs, &refs);
|
||||
INSIST(refs > 1);
|
||||
- target->pipeline_refs = source->pipeline_refs;
|
||||
+ target->tcpconn = source->tcpconn;
|
||||
}
|
||||
|
||||
/*%
|
||||
- * Decrease the count of client structures using the TCP connection that
|
||||
+ * Decrease the count of client structures sharing the TCP connection that
|
||||
* 'client' is associated with. If this is the last client using this TCP
|
||||
- * connection, free the reference counter and return true; otherwise, return
|
||||
- * false.
|
||||
+ * connection, we detach from the TCP quota and free the tcpconn
|
||||
+ * object. Either way, client->tcpconn is set to NULL.
|
||||
*/
|
||||
-static bool
|
||||
-pipeline_detach(ns_client_t *client) {
|
||||
- isc_refcount_t *refcount;
|
||||
+static void
|
||||
+tcpconn_detach(ns_client_t *client) {
|
||||
+ ns_tcpconn_t *tconn = NULL;
|
||||
int refs;
|
||||
|
||||
- REQUIRE(client->pipeline_refs != NULL);
|
||||
-
|
||||
- refcount = client->pipeline_refs;
|
||||
- client->pipeline_refs = NULL;
|
||||
+ REQUIRE(client->tcpconn != NULL);
|
||||
|
||||
- isc_refcount_decrement(refcount, refs);
|
||||
+ tconn = client->tcpconn;
|
||||
+ client->tcpconn = NULL;
|
||||
|
||||
+ isc_refcount_decrement(&tconn->refs, &refs);
|
||||
if (refs == 0) {
|
||||
- isc_mem_free(ns_g_mctx, refs);
|
||||
- return (true);
|
||||
+ isc_quota_detach(&tconn->tcpquota);
|
||||
+ isc_mem_free(ns_g_mctx, tconn);
|
||||
}
|
||||
-
|
||||
- return (false);
|
||||
}
|
||||
|
||||
-/*
|
||||
- * Detach a client from the TCP client quota if appropriate, and set
|
||||
- * the quota pointer to NULL.
|
||||
- *
|
||||
- * Sometimes when the TCP client quota is exhausted but there are no other
|
||||
- * clients servicing the interface, a client will be allowed to continue
|
||||
- * running despite not having been attached to the quota. In this event,
|
||||
- * the TCP quota was never attached to the client, so when the client (or
|
||||
- * associated pipeline group) shuts down, the quota must NOT be detached.
|
||||
+/*%
|
||||
+ * Mark a client as active and increment the interface's 'ntcpactive'
|
||||
+ * counter, as a signal that there is at least one client servicing
|
||||
+ * TCP queries for the interface. If we reach the TCP client quota at
|
||||
+ * some point, this will be used to determine whether a quota overrun
|
||||
+ * should be permitted.
|
||||
*
|
||||
- * Otherwise, if the quota pointer is set, it should be detached. If not
|
||||
- * set at all, we just return without doing anything.
|
||||
+ * Marking the client active with the 'tcpactive' flag ensures proper
|
||||
+ * accounting, by preventing us from incrementing or decrementing
|
||||
+ * 'ntcpactive' more than once per client.
|
||||
*/
|
||||
static void
|
||||
-tcpquota_disconnect(ns_client_t *client) {
|
||||
- if (client->tcpquota == NULL) {
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- if (client->tcpattached) {
|
||||
- isc_quota_detach(&client->tcpquota);
|
||||
- client->tcpattached = false;
|
||||
- } else {
|
||||
- client->tcpquota = NULL;
|
||||
+mark_tcp_active(ns_client_t *client, bool active) {
|
||||
+ if (active && !client->tcpactive) {
|
||||
+ isc_atomic_xadd(&client->interface->ntcpactive, 1);
|
||||
+ client->tcpactive = active;
|
||||
+ } else if (!active && client->tcpactive) {
|
||||
+ uint32_t old =
|
||||
+ isc_atomic_xadd(&client->interface->ntcpactive, -1);
|
||||
+ INSIST(old > 0);
|
||||
+ client->tcpactive = active;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,7 +501,8 @@ exit_check(ns_client_t *client) {
|
||||
INSIST(client->recursionquota == NULL);
|
||||
|
||||
if (NS_CLIENTSTATE_READING == client->newstate) {
|
||||
- if (!client->pipelined) {
|
||||
+ INSIST(client->tcpconn != NULL);
|
||||
+ if (!client->tcpconn->pipelined) {
|
||||
client_read(client);
|
||||
client->newstate = NS_CLIENTSTATE_MAX;
|
||||
return (true); /* We're done. */
|
||||
@@ -507,8 +525,8 @@ exit_check(ns_client_t *client) {
|
||||
dns_tcpmsg_cancelread(&client->tcpmsg);
|
||||
}
|
||||
|
||||
- if (client->nreads != 0) {
|
||||
- /* Still waiting for read cancel completion. */
|
||||
+ /* Still waiting for read cancel completion. */
|
||||
+ if (client->nreads > 0) {
|
||||
return (true);
|
||||
}
|
||||
|
||||
@@ -518,43 +536,45 @@ exit_check(ns_client_t *client) {
|
||||
}
|
||||
|
||||
/*
|
||||
- * Detach from pipeline group and from TCP client quota,
|
||||
- * if appropriate.
|
||||
+ * Soon the client will be ready to accept a new TCP
|
||||
+ * connection or UDP request, but we may have enough
|
||||
+ * clients doing that already. Check whether this client
|
||||
+ * needs to remain active and allow it go inactive if
|
||||
+ * not.
|
||||
*
|
||||
- * - If no pipeline group is active, attempt to
|
||||
- * detach from the TCP client quota.
|
||||
+ * UDP clients always go inactive at this point, but a TCP
|
||||
+ * client may need to stay active and return to READY
|
||||
+ * state if no other clients are available to listen
|
||||
+ * for TCP requests on this interface.
|
||||
*
|
||||
- * - If a pipeline group is active, detach from it;
|
||||
- * if the return code indicates that there no more
|
||||
- * clients left if this pipeline group, we also detach
|
||||
- * from the TCP client quota.
|
||||
- *
|
||||
- * - Otherwise we don't try to detach, we just set the
|
||||
- * TCP quota pointer to NULL if it wasn't NULL already.
|
||||
- *
|
||||
- * tcpquota_disconnect() will set tcpquota to NULL, either
|
||||
- * by detaching it or by assignment, depending on the
|
||||
- * needs of the client. See the comments on that function
|
||||
- * for further information.
|
||||
+ * Regardless, if we're going to FREED state, that means
|
||||
+ * the system is shutting down and we don't need to
|
||||
+ * retain clients.
|
||||
*/
|
||||
- if (client->pipeline_refs == NULL || pipeline_detach(client)) {
|
||||
- tcpquota_disconnect(client);
|
||||
- } else {
|
||||
- client->tcpquota = NULL;
|
||||
- client->tcpattached = false;
|
||||
+ if (client->mortal && TCP_CLIENT(client) &&
|
||||
+ client->newstate != NS_CLIENTSTATE_FREED &&
|
||||
+ !ns_g_clienttest &&
|
||||
+ isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
|
||||
+ {
|
||||
+ /* Nobody else is accepting */
|
||||
+ client->mortal = false;
|
||||
+ client->newstate = NS_CLIENTSTATE_READY;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Detach from TCP connection and TCP client quota,
|
||||
+ * if appropriate. If this is the last reference to
|
||||
+ * the TCP connection in our pipeline group, the
|
||||
+ * TCP quota slot will be released.
|
||||
+ */
|
||||
+ if (client->tcpconn) {
|
||||
+ tcpconn_detach(client);
|
||||
}
|
||||
|
||||
if (client->tcpsocket != NULL) {
|
||||
CTRACE("closetcp");
|
||||
isc_socket_detach(&client->tcpsocket);
|
||||
-
|
||||
- if (client->tcpactive) {
|
||||
- LOCK(&client->interface->lock);
|
||||
- INSIST(client->interface->ntcpactive > 0);
|
||||
- client->interface->ntcpactive--;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = false;
|
||||
- }
|
||||
+ mark_tcp_active(client, false);
|
||||
}
|
||||
|
||||
if (client->timerset) {
|
||||
@@ -567,35 +587,6 @@ exit_check(ns_client_t *client) {
|
||||
client->peeraddr_valid = false;
|
||||
|
||||
client->state = NS_CLIENTSTATE_READY;
|
||||
- INSIST(client->recursionquota == NULL);
|
||||
-
|
||||
- /*
|
||||
- * Now the client is ready to accept a new TCP connection
|
||||
- * or UDP request, but we may have enough clients doing
|
||||
- * that already. Check whether this client needs to remain
|
||||
- * active and force it to go inactive if not.
|
||||
- *
|
||||
- * UDP clients go inactive at this point, but a TCP client
|
||||
- * may need to remain active and go into ready state if
|
||||
- * no other clients are available to listen for TCP
|
||||
- * requests on this interface or (in the case of pipelined
|
||||
- * clients) to read for additional messages on the current
|
||||
- * connection.
|
||||
- */
|
||||
- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
|
||||
- LOCK(&client->interface->lock);
|
||||
- if ((client->interface->ntcpaccepting == 0 ||
|
||||
- (client->pipelined &&
|
||||
- client->interface->ntcpactive < 2)) &&
|
||||
- client->newstate != NS_CLIENTSTATE_FREED)
|
||||
- {
|
||||
- client->mortal = false;
|
||||
- client->newstate = NS_CLIENTSTATE_READY;
|
||||
- }
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- }
|
||||
-
|
||||
- client->pipelined = false;
|
||||
|
||||
/*
|
||||
* We don't need the client; send it to the inactive
|
||||
@@ -630,7 +621,7 @@ exit_check(ns_client_t *client) {
|
||||
}
|
||||
|
||||
/* Still waiting for accept cancel completion. */
|
||||
- if (! (client->naccepts == 0)) {
|
||||
+ if (client->naccepts > 0) {
|
||||
return (true);
|
||||
}
|
||||
|
||||
@@ -641,7 +632,7 @@ exit_check(ns_client_t *client) {
|
||||
}
|
||||
|
||||
/* Still waiting for recv cancel completion. */
|
||||
- if (! (client->nrecvs == 0)) {
|
||||
+ if (client->nrecvs > 0) {
|
||||
return (true);
|
||||
}
|
||||
|
||||
@@ -654,14 +645,7 @@ exit_check(ns_client_t *client) {
|
||||
INSIST(client->recursionquota == NULL);
|
||||
if (client->tcplistener != NULL) {
|
||||
isc_socket_detach(&client->tcplistener);
|
||||
-
|
||||
- if (client->tcpactive) {
|
||||
- LOCK(&client->interface->lock);
|
||||
- INSIST(client->interface->ntcpactive > 0);
|
||||
- client->interface->ntcpactive--;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = false;
|
||||
- }
|
||||
+ mark_tcp_active(client, false);
|
||||
}
|
||||
if (client->udpsocket != NULL) {
|
||||
isc_socket_detach(&client->udpsocket);
|
||||
@@ -816,7 +800,7 @@ client_start(isc_task_t *task, isc_event_t *event) {
|
||||
return;
|
||||
|
||||
if (TCP_CLIENT(client)) {
|
||||
- if (client->pipelined) {
|
||||
+ if (client->tcpconn != NULL) {
|
||||
client_read(client);
|
||||
} else {
|
||||
client_accept(client);
|
||||
@@ -2470,6 +2454,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
|
||||
client->nrecvs--;
|
||||
} else {
|
||||
INSIST(TCP_CLIENT(client));
|
||||
+ INSIST(client->tcpconn != NULL);
|
||||
REQUIRE(event->ev_type == DNS_EVENT_TCPMSG);
|
||||
REQUIRE(event->ev_sender == &client->tcpmsg);
|
||||
buffer = &client->tcpmsg.buffer;
|
||||
@@ -2657,17 +2642,19 @@ client_request(isc_task_t *task, isc_event_t *event) {
|
||||
/*
|
||||
* Pipeline TCP query processing.
|
||||
*/
|
||||
- if (client->message->opcode != dns_opcode_query) {
|
||||
- client->pipelined = false;
|
||||
+ if (TCP_CLIENT(client) &&
|
||||
+ client->message->opcode != dns_opcode_query)
|
||||
+ {
|
||||
+ client->tcpconn->pipelined = false;
|
||||
}
|
||||
- if (TCP_CLIENT(client) && client->pipelined) {
|
||||
+ if (TCP_CLIENT(client) && client->tcpconn->pipelined) {
|
||||
/*
|
||||
* We're pipelining. Replace the client; the
|
||||
- * the replacement can read the TCP socket looking
|
||||
- * for new messages and this client can process the
|
||||
+ * replacement can read the TCP socket looking
|
||||
+ * for new messages and this one can process the
|
||||
* current message asynchronously.
|
||||
*
|
||||
- * There are now at least three clients using this
|
||||
+ * There will now be at least three clients using this
|
||||
* TCP socket - one accepting new connections,
|
||||
* one reading an existing connection to get new
|
||||
* messages, and one answering the message already
|
||||
@@ -2675,7 +2662,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
|
||||
*/
|
||||
result = ns_client_replace(client);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
- client->pipelined = false;
|
||||
+ client->tcpconn->pipelined = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3233,10 +3220,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
|
||||
client->signer = NULL;
|
||||
dns_name_init(&client->signername, NULL);
|
||||
client->mortal = false;
|
||||
- client->pipelined = false;
|
||||
- client->pipeline_refs = NULL;
|
||||
- client->tcpquota = NULL;
|
||||
- client->tcpattached = false;
|
||||
+ client->tcpconn = NULL;
|
||||
client->recursionquota = NULL;
|
||||
client->interface = NULL;
|
||||
client->peeraddr_valid = false;
|
||||
@@ -3341,9 +3325,10 @@ client_read(ns_client_t *client) {
|
||||
|
||||
static void
|
||||
client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
+ isc_result_t result;
|
||||
ns_client_t *client = event->ev_arg;
|
||||
isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
|
||||
- isc_result_t result;
|
||||
+ uint32_t old;
|
||||
|
||||
REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
|
||||
REQUIRE(NS_CLIENT_VALID(client));
|
||||
@@ -3363,10 +3348,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
INSIST(client->naccepts == 1);
|
||||
client->naccepts--;
|
||||
|
||||
- LOCK(&client->interface->lock);
|
||||
- INSIST(client->interface->ntcpaccepting > 0);
|
||||
- client->interface->ntcpaccepting--;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
+ old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
|
||||
+ INSIST(old > 0);
|
||||
|
||||
/*
|
||||
* We must take ownership of the new socket before the exit
|
||||
@@ -3399,7 +3382,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
|
||||
"accept failed: %s",
|
||||
isc_result_totext(nevent->result));
|
||||
- tcpquota_disconnect(client);
|
||||
+ tcpconn_detach(client);
|
||||
}
|
||||
|
||||
if (exit_check(client))
|
||||
@@ -3437,15 +3420,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
* telnetting to port 53 (once per CPU) will
|
||||
* deny service to legitimate TCP clients.
|
||||
*/
|
||||
- client->pipelined = false;
|
||||
result = ns_client_replace(client);
|
||||
if (result == ISC_R_SUCCESS &&
|
||||
(ns_g_server->keepresporder == NULL ||
|
||||
!allowed(&netaddr, NULL, NULL, 0, NULL,
|
||||
ns_g_server->keepresporder)))
|
||||
{
|
||||
- pipeline_init(client);
|
||||
- client->pipelined = true;
|
||||
+ client->tcpconn->pipelined = true;
|
||||
}
|
||||
|
||||
client_read(client);
|
||||
@@ -3462,78 +3443,59 @@ client_accept(ns_client_t *client) {
|
||||
CTRACE("accept");
|
||||
|
||||
/*
|
||||
- * The tcpquota object can only be simultaneously referenced a
|
||||
- * pre-defined number of times; this is configured by 'tcp-clients'
|
||||
- * in named.conf. If we can't attach to it here, that means the TCP
|
||||
- * client quota has been exceeded.
|
||||
+ * Set up a new TCP connection. This means try to attach to the
|
||||
+ * TCP client quota (tcp-clients), but fail if we're over quota.
|
||||
*/
|
||||
- result = isc_quota_attach(&ns_g_server->tcpquota,
|
||||
- &client->tcpquota);
|
||||
+ result = tcpconn_init(client, false);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
- bool exit;
|
||||
+ bool exit;
|
||||
|
||||
- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
|
||||
- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
|
||||
- "no more TCP clients: %s",
|
||||
- isc_result_totext(result));
|
||||
-
|
||||
- /*
|
||||
- * We have exceeded the system-wide TCP client
|
||||
- * quota. But, we can't just block this accept
|
||||
- * in all cases, because if we did, a heavy TCP
|
||||
- * load on other interfaces might cause this
|
||||
- * interface to be starved, with no clients able
|
||||
- * to accept new connections.
|
||||
- *
|
||||
- * So, we check here to see if any other clients
|
||||
- * are already servicing TCP queries on this
|
||||
- * interface (whether accepting, reading, or
|
||||
- * processing). If there are at least two
|
||||
- * (one reading and one processing a request)
|
||||
- * then it's okay *not* to call accept - we
|
||||
- * can let this client go inactive and another
|
||||
- * one will resume accepting when it's done.
|
||||
- *
|
||||
- * If there aren't enough active clients on the
|
||||
- * interface, then we can be a little bit
|
||||
- * flexible about the quota. We'll allow *one*
|
||||
- * extra client through to ensure we're listening
|
||||
- * on every interface.
|
||||
- *
|
||||
- * (Note: In practice this means that the real
|
||||
- * TCP client quota is tcp-clients plus the
|
||||
- * number of listening interfaces plus 2.)
|
||||
- */
|
||||
- LOCK(&client->interface->lock);
|
||||
- exit = (client->interface->ntcpactive > 1);
|
||||
- UNLOCK(&client->interface->lock);
|
||||
+ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
|
||||
+ NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
|
||||
+ "TCP client quota reached: %s",
|
||||
+ isc_result_totext(result));
|
||||
|
||||
- if (exit) {
|
||||
- client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
- (void)exit_check(client);
|
||||
- return;
|
||||
- }
|
||||
+ /*
|
||||
+ * We have exceeded the system-wide TCP client quota. But,
|
||||
+ * we can't just block this accept in all cases, because if
|
||||
+ * we did, a heavy TCP load on other interfaces might cause
|
||||
+ * this interface to be starved, with no clients able to
|
||||
+ * accept new connections.
|
||||
+ *
|
||||
+ * So, we check here to see if any other clients are
|
||||
+ * already servicing TCP queries on this interface (whether
|
||||
+ * accepting, reading, or processing). If we find at least
|
||||
+ * one, then it's okay *not* to call accept - we can let this
|
||||
+ * client go inactive and another will take over when it's
|
||||
+ * done.
|
||||
+ *
|
||||
+ * If there aren't enough active clients on the interface,
|
||||
+ * then we can be a little bit flexible about the quota.
|
||||
+ * We'll allow *one* extra client through to ensure we're
|
||||
+ * listening on every interface; we do this by setting the
|
||||
+ * 'force' option to tcpconn_init().
|
||||
+ *
|
||||
+ * (Note: In practice this means that the real TCP client
|
||||
+ * quota is tcp-clients plus the number of listening
|
||||
+ * interfaces plus 1.)
|
||||
+ */
|
||||
+ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
|
||||
+ if (exit) {
|
||||
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
+ (void)exit_check(client);
|
||||
+ return;
|
||||
+ }
|
||||
|
||||
- } else {
|
||||
- client->tcpattached = true;
|
||||
+ result = tcpconn_init(client, true);
|
||||
+ RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
- * By incrementing the interface's ntcpactive counter we signal
|
||||
- * that there is at least one client servicing TCP queries for the
|
||||
- * interface.
|
||||
- *
|
||||
- * We also make note of the fact in the client itself with the
|
||||
- * tcpactive flag. This ensures proper accounting by preventing
|
||||
- * us from accidentally incrementing or decrementing ntcpactive
|
||||
- * more than once per client object.
|
||||
+ * If this client was set up using get_client() or get_worker(),
|
||||
+ * then TCP is already marked active. However, if it was restarted
|
||||
+ * from exit_check(), it might not be, so we take care of it now.
|
||||
*/
|
||||
- if (!client->tcpactive) {
|
||||
- LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpactive++;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = true;
|
||||
- }
|
||||
+ mark_tcp_active(client, true);
|
||||
|
||||
result = isc_socket_accept(client->tcplistener, client->task,
|
||||
client_newconn, client);
|
||||
@@ -3549,15 +3511,8 @@ client_accept(ns_client_t *client) {
|
||||
"isc_socket_accept() failed: %s",
|
||||
isc_result_totext(result));
|
||||
|
||||
- tcpquota_disconnect(client);
|
||||
-
|
||||
- if (client->tcpactive) {
|
||||
- LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpactive--;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = false;
|
||||
- }
|
||||
-
|
||||
+ tcpconn_detach(client);
|
||||
+ mark_tcp_active(client, false);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3582,9 +3537,7 @@ client_accept(ns_client_t *client) {
|
||||
* listening for connections itself to prevent the interface
|
||||
* going dead.
|
||||
*/
|
||||
- LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpaccepting++;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
+ isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -3655,24 +3608,25 @@ ns_client_replace(ns_client_t *client) {
|
||||
REQUIRE(client->manager != NULL);
|
||||
|
||||
tcp = TCP_CLIENT(client);
|
||||
- if (tcp && client->pipelined) {
|
||||
+ if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) {
|
||||
result = get_worker(client->manager, client->interface,
|
||||
client->tcpsocket, client);
|
||||
} else {
|
||||
result = get_client(client->manager, client->interface,
|
||||
- client->dispatch, client, tcp);
|
||||
+ client->dispatch, tcp);
|
||||
|
||||
- /*
|
||||
- * The responsibility for listening for new requests is hereby
|
||||
- * transferred to the new client. Therefore, the old client
|
||||
- * should refrain from listening for any more requests.
|
||||
- */
|
||||
- client->mortal = true;
|
||||
}
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
return (result);
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * The responsibility for listening for new requests is hereby
|
||||
+ * transferred to the new client. Therefore, the old client
|
||||
+ * should refrain from listening for any more requests.
|
||||
+ */
|
||||
+ client->mortal = true;
|
||||
+
|
||||
return (ISC_R_SUCCESS);
|
||||
}
|
||||
|
||||
@@ -3806,7 +3760,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
|
||||
|
||||
static isc_result_t
|
||||
get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
- dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
|
||||
+ dns_dispatch_t *disp, bool tcp)
|
||||
{
|
||||
isc_result_t result = ISC_R_SUCCESS;
|
||||
isc_event_t *ev;
|
||||
@@ -3850,15 +3804,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
if (tcp) {
|
||||
- client->tcpattached = false;
|
||||
- if (oldclient != NULL) {
|
||||
- client->tcpattached = oldclient->tcpattached;
|
||||
- }
|
||||
-
|
||||
- LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpactive++;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = true;
|
||||
+ mark_tcp_active(client, true);
|
||||
|
||||
client->attributes |= NS_CLIENTATTR_TCP;
|
||||
isc_socket_attach(ifp->tcpsocket,
|
||||
@@ -3923,16 +3869,14 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
ns_interface_attach(ifp, &client->interface);
|
||||
client->newstate = client->state = NS_CLIENTSTATE_WORKING;
|
||||
INSIST(client->recursionquota == NULL);
|
||||
- client->tcpquota = &ns_g_server->tcpquota;
|
||||
- client->tcpattached = oldclient->tcpattached;
|
||||
|
||||
client->dscp = ifp->dscp;
|
||||
|
||||
client->attributes |= NS_CLIENTATTR_TCP;
|
||||
client->mortal = true;
|
||||
|
||||
- pipeline_attach(oldclient, client);
|
||||
- client->pipelined = true;
|
||||
+ tcpconn_attach(oldclient, client);
|
||||
+ mark_tcp_active(client, true);
|
||||
|
||||
isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
|
||||
isc_socket_attach(sock, &client->tcpsocket);
|
||||
@@ -3940,11 +3884,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
|
||||
(void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
|
||||
client->peeraddr_valid = true;
|
||||
|
||||
- LOCK(&client->interface->lock);
|
||||
- client->interface->ntcpactive++;
|
||||
- UNLOCK(&client->interface->lock);
|
||||
- client->tcpactive = true;
|
||||
-
|
||||
INSIST(client->tcpmsg_valid == false);
|
||||
dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
|
||||
client->tcpmsg_valid = true;
|
||||
@@ -3970,8 +3909,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
|
||||
MTRACE("createclients");
|
||||
|
||||
for (disp = 0; disp < n; disp++) {
|
||||
- result = get_client(manager, ifp, ifp->udpdispatch[disp],
|
||||
- NULL, tcp);
|
||||
+ result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
|
||||
if (result != ISC_R_SUCCESS)
|
||||
break;
|
||||
}
|
||||
diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
|
||||
index e2c40acd28..969ee4c08f 100644
|
||||
--- a/bin/named/include/named/client.h
|
||||
+++ b/bin/named/include/named/client.h
|
||||
@@ -78,6 +78,13 @@
|
||||
*** Types
|
||||
***/
|
||||
|
||||
+/*% reference-counted TCP connection object */
|
||||
+typedef struct ns_tcpconn {
|
||||
+ isc_refcount_t refs;
|
||||
+ isc_quota_t *tcpquota;
|
||||
+ bool pipelined;
|
||||
+} ns_tcpconn_t;
|
||||
+
|
||||
/*% nameserver client structure */
|
||||
struct ns_client {
|
||||
unsigned int magic;
|
||||
@@ -131,10 +138,7 @@ struct ns_client {
|
||||
dns_name_t signername; /*%< [T]SIG key name */
|
||||
dns_name_t *signer; /*%< NULL if not valid sig */
|
||||
bool mortal; /*%< Die after handling request */
|
||||
- bool pipelined; /*%< TCP queries not in sequence */
|
||||
- isc_refcount_t *pipeline_refs;
|
||||
- isc_quota_t *tcpquota;
|
||||
- bool tcpattached;
|
||||
+ ns_tcpconn_t *tcpconn;
|
||||
isc_quota_t *recursionquota;
|
||||
ns_interface_t *interface;
|
||||
|
||||
diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
|
||||
index 61b08826a6..3535ef22a8 100644
|
||||
--- a/bin/named/include/named/interfacemgr.h
|
||||
+++ b/bin/named/include/named/interfacemgr.h
|
||||
@@ -9,8 +9,6 @@
|
||||
* information regarding copyright ownership.
|
||||
*/
|
||||
|
||||
-/* $Id: interfacemgr.h,v 1.35 2011/07/28 23:47:58 tbox Exp $ */
|
||||
-
|
||||
#ifndef NAMED_INTERFACEMGR_H
|
||||
#define NAMED_INTERFACEMGR_H 1
|
||||
|
||||
@@ -77,11 +75,11 @@ struct ns_interface {
|
||||
/*%< UDP dispatchers. */
|
||||
isc_socket_t * tcpsocket; /*%< TCP socket. */
|
||||
isc_dscp_t dscp; /*%< "listen-on" DSCP value */
|
||||
- int ntcpaccepting; /*%< Number of clients
|
||||
+ int32_t ntcpaccepting; /*%< Number of clients
|
||||
ready to accept new
|
||||
TCP connections on this
|
||||
interface */
|
||||
- int ntcpactive; /*%< Number of clients
|
||||
+ int32_t ntcpactive; /*%< Number of clients
|
||||
servicing TCP queries
|
||||
(whether accepting or
|
||||
connected) */
|
||||
diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
|
||||
index 955096ef47..d9f6df5802 100644
|
||||
--- a/bin/named/interfacemgr.c
|
||||
+++ b/bin/named/interfacemgr.c
|
||||
@@ -388,6 +388,7 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
|
||||
*/
|
||||
ifp->ntcpaccepting = 0;
|
||||
ifp->ntcpactive = 0;
|
||||
+
|
||||
ifp->nudpdispatch = 0;
|
||||
|
||||
ifp->dscp = -1;
|
||||
diff --git a/lib/isc/include/isc/quota.h b/lib/isc/include/isc/quota.h
|
||||
index b9bf59877a..36c5830242 100644
|
||||
--- a/lib/isc/include/isc/quota.h
|
||||
+++ b/lib/isc/include/isc/quota.h
|
||||
@@ -100,6 +100,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p);
|
||||
* quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA).
|
||||
*/
|
||||
|
||||
+isc_result_t
|
||||
+isc_quota_force(isc_quota_t *quota, isc_quota_t **p);
|
||||
+/*%<
|
||||
+ * Like isc_quota_attach, but will attach '*p' to the quota
|
||||
+ * even if the hard quota has been exceeded.
|
||||
+ */
|
||||
+
|
||||
void
|
||||
isc_quota_detach(isc_quota_t **p);
|
||||
/*%<
|
||||
diff --git a/lib/isc/quota.c b/lib/isc/quota.c
|
||||
index 3ddff0d875..556a61f21d 100644
|
||||
--- a/lib/isc/quota.c
|
||||
+++ b/lib/isc/quota.c
|
||||
@@ -74,20 +74,39 @@ isc_quota_release(isc_quota_t *quota) {
|
||||
UNLOCK("a->lock);
|
||||
}
|
||||
|
||||
-isc_result_t
|
||||
-isc_quota_attach(isc_quota_t *quota, isc_quota_t **p)
|
||||
-{
|
||||
+static isc_result_t
|
||||
+doattach(isc_quota_t *quota, isc_quota_t **p, bool force) {
|
||||
isc_result_t result;
|
||||
- INSIST(p != NULL && *p == NULL);
|
||||
+ REQUIRE(p != NULL && *p == NULL);
|
||||
+
|
||||
result = isc_quota_reserve(quota);
|
||||
- if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA)
|
||||
+ if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
|
||||
+ *p = quota;
|
||||
+ } else if (result == ISC_R_QUOTA && force) {
|
||||
+ /* attach anyway */
|
||||
+ LOCK("a->lock);
|
||||
+ quota->used++;
|
||||
+ UNLOCK("a->lock);
|
||||
+
|
||||
*p = quota;
|
||||
+ result = ISC_R_SUCCESS;
|
||||
+ }
|
||||
+
|
||||
return (result);
|
||||
}
|
||||
|
||||
+isc_result_t
|
||||
+isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
|
||||
+ return (doattach(quota, p, false));
|
||||
+}
|
||||
+
|
||||
+isc_result_t
|
||||
+isc_quota_force(isc_quota_t *quota, isc_quota_t **p) {
|
||||
+ return (doattach(quota, p, true));
|
||||
+}
|
||||
+
|
||||
void
|
||||
-isc_quota_detach(isc_quota_t **p)
|
||||
-{
|
||||
+isc_quota_detach(isc_quota_t **p) {
|
||||
INSIST(p != NULL && *p != NULL);
|
||||
isc_quota_release(*p);
|
||||
*p = NULL;
|
||||
diff --git a/lib/isc/win32/libisc.def.in b/lib/isc/win32/libisc.def.in
|
||||
index a82facec0f..7b9f23d776 100644
|
||||
--- a/lib/isc/win32/libisc.def.in
|
||||
+++ b/lib/isc/win32/libisc.def.in
|
||||
@@ -519,6 +519,7 @@ isc_portset_removerange
|
||||
isc_quota_attach
|
||||
isc_quota_destroy
|
||||
isc_quota_detach
|
||||
+isc_quota_force
|
||||
isc_quota_init
|
||||
isc_quota_max
|
||||
isc_quota_release
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
Backport patch to fix CVE-2018-5743.
|
||||
|
||||
Ref:
|
||||
https://security-tracker.debian.org/tracker/CVE-2018-5743
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/59434b9]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From 59434b987e8eb436b08c24e559ee094c4e939daa Mon Sep 17 00:00:00 2001
|
||||
From: Evan Hunt <each@isc.org>
|
||||
Date: Fri, 5 Apr 2019 16:26:19 -0700
|
||||
Subject: [PATCH 6/6] restore allowance for tcp-clients < interfaces
|
||||
|
||||
in the "refactor tcpquota and pipeline refs" commit, the counting
|
||||
of active interfaces was tightened in such a way that named could
|
||||
fail to listen on an interface if there were more interfaces than
|
||||
tcp-clients. when checking the quota to start accepting on an
|
||||
interface, if the number of active clients was above zero, then
|
||||
it was presumed that some other client was able to handle accepting
|
||||
new connections. this, however, ignored the fact that the current client
|
||||
could be included in that count, so if the quota was already exceeded
|
||||
before all the interfaces were listening, some interfaces would never
|
||||
listen.
|
||||
|
||||
we now check whether the current client has been marked active; if so,
|
||||
then the number of active clients on the interface must be greater
|
||||
than 1, not 0.
|
||||
|
||||
(cherry picked from commit 0b4e2cd4c3192ba88569dd344f542a8cc43742b5)
|
||||
(cherry picked from commit d01023aaac35543daffbdf48464e320150235d41)
|
||||
---
|
||||
bin/named/client.c | 8 +++++---
|
||||
doc/arm/Bv9ARM-book.xml | 3 ++-
|
||||
2 files changed, 7 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index d826ab32bf..845326abc0 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -3464,8 +3464,9 @@ client_accept(ns_client_t *client) {
|
||||
*
|
||||
* So, we check here to see if any other clients are
|
||||
* already servicing TCP queries on this interface (whether
|
||||
- * accepting, reading, or processing). If we find at least
|
||||
- * one, then it's okay *not* to call accept - we can let this
|
||||
+ * accepting, reading, or processing). If we find that at
|
||||
+ * least one client other than this one is active, then
|
||||
+ * it's okay *not* to call accept - we can let this
|
||||
* client go inactive and another will take over when it's
|
||||
* done.
|
||||
*
|
||||
@@ -3479,7 +3480,8 @@ client_accept(ns_client_t *client) {
|
||||
* quota is tcp-clients plus the number of listening
|
||||
* interfaces plus 1.)
|
||||
*/
|
||||
- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
|
||||
+ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
|
||||
+ (client->tcpactive ? 1 : 0));
|
||||
if (exit) {
|
||||
client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
(void)exit_check(client);
|
||||
diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml
|
||||
index 381768d540..9c76d3cd6f 100644
|
||||
--- a/doc/arm/Bv9ARM-book.xml
|
||||
+++ b/doc/arm/Bv9ARM-book.xml
|
||||
@@ -8493,7 +8493,8 @@ avoid-v6-udp-ports { 40000; range 50000 60000; };
|
||||
<para>
|
||||
The number of file descriptors reserved for TCP, stdio,
|
||||
etc. This needs to be big enough to cover the number of
|
||||
- interfaces <command>named</command> listens on, <command>tcp-clients</command> as well as
|
||||
+ interfaces <command>named</command> listens on plus
|
||||
+ <command>tcp-clients</command>, as well as
|
||||
to provide room for outgoing TCP queries and incoming zone
|
||||
transfers. The default is <literal>512</literal>.
|
||||
The minimum value is <literal>128</literal> and the
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -0,0 +1,140 @@
|
||||
Backport commit to fix compile error on arm caused by commits which are
|
||||
to fix CVE-2018-5743.
|
||||
|
||||
CVE: CVE-2018-5743
|
||||
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ef49780]
|
||||
|
||||
Signed-off-by: Kai Kang <kai.kang@windriver.com>
|
||||
|
||||
From ef49780d30d3ddc5735cfc32561b678a634fa72f Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@sury.org>
|
||||
Date: Wed, 17 Apr 2019 15:22:27 +0200
|
||||
Subject: [PATCH] Replace atomic operations in bin/named/client.c with
|
||||
isc_refcount reference counting
|
||||
|
||||
---
|
||||
bin/named/client.c | 18 +++++++-----------
|
||||
bin/named/include/named/interfacemgr.h | 5 +++--
|
||||
bin/named/interfacemgr.c | 7 +++++--
|
||||
3 files changed, 15 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/bin/named/client.c b/bin/named/client.c
|
||||
index 845326abc0..29fecadca8 100644
|
||||
--- a/bin/named/client.c
|
||||
+++ b/bin/named/client.c
|
||||
@@ -402,12 +402,10 @@ tcpconn_detach(ns_client_t *client) {
|
||||
static void
|
||||
mark_tcp_active(ns_client_t *client, bool active) {
|
||||
if (active && !client->tcpactive) {
|
||||
- isc_atomic_xadd(&client->interface->ntcpactive, 1);
|
||||
+ isc_refcount_increment0(&client->interface->ntcpactive, NULL);
|
||||
client->tcpactive = active;
|
||||
} else if (!active && client->tcpactive) {
|
||||
- uint32_t old =
|
||||
- isc_atomic_xadd(&client->interface->ntcpactive, -1);
|
||||
- INSIST(old > 0);
|
||||
+ isc_refcount_decrement(&client->interface->ntcpactive, NULL);
|
||||
client->tcpactive = active;
|
||||
}
|
||||
}
|
||||
@@ -554,7 +552,7 @@ exit_check(ns_client_t *client) {
|
||||
if (client->mortal && TCP_CLIENT(client) &&
|
||||
client->newstate != NS_CLIENTSTATE_FREED &&
|
||||
!ns_g_clienttest &&
|
||||
- isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
|
||||
+ isc_refcount_current(&client->interface->ntcpaccepting) == 0)
|
||||
{
|
||||
/* Nobody else is accepting */
|
||||
client->mortal = false;
|
||||
@@ -3328,7 +3326,6 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
isc_result_t result;
|
||||
ns_client_t *client = event->ev_arg;
|
||||
isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
|
||||
- uint32_t old;
|
||||
|
||||
REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
|
||||
REQUIRE(NS_CLIENT_VALID(client));
|
||||
@@ -3348,8 +3345,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
|
||||
INSIST(client->naccepts == 1);
|
||||
client->naccepts--;
|
||||
|
||||
- old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
|
||||
- INSIST(old > 0);
|
||||
+ isc_refcount_decrement(&client->interface->ntcpaccepting, NULL);
|
||||
|
||||
/*
|
||||
* We must take ownership of the new socket before the exit
|
||||
@@ -3480,8 +3476,8 @@ client_accept(ns_client_t *client) {
|
||||
* quota is tcp-clients plus the number of listening
|
||||
* interfaces plus 1.)
|
||||
*/
|
||||
- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
|
||||
- (client->tcpactive ? 1 : 0));
|
||||
+ exit = (isc_refcount_current(&client->interface->ntcpactive) >
|
||||
+ (client->tcpactive ? 1U : 0U));
|
||||
if (exit) {
|
||||
client->newstate = NS_CLIENTSTATE_INACTIVE;
|
||||
(void)exit_check(client);
|
||||
@@ -3539,7 +3535,7 @@ client_accept(ns_client_t *client) {
|
||||
* listening for connections itself to prevent the interface
|
||||
* going dead.
|
||||
*/
|
||||
- isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
|
||||
+ isc_refcount_increment0(&client->interface->ntcpaccepting, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
|
||||
index 3535ef22a8..6e10f210fd 100644
|
||||
--- a/bin/named/include/named/interfacemgr.h
|
||||
+++ b/bin/named/include/named/interfacemgr.h
|
||||
@@ -45,6 +45,7 @@
|
||||
#include <isc/magic.h>
|
||||
#include <isc/mem.h>
|
||||
#include <isc/socket.h>
|
||||
+#include <isc/refcount.h>
|
||||
|
||||
#include <dns/result.h>
|
||||
|
||||
@@ -75,11 +76,11 @@ struct ns_interface {
|
||||
/*%< UDP dispatchers. */
|
||||
isc_socket_t * tcpsocket; /*%< TCP socket. */
|
||||
isc_dscp_t dscp; /*%< "listen-on" DSCP value */
|
||||
- int32_t ntcpaccepting; /*%< Number of clients
|
||||
+ isc_refcount_t ntcpaccepting; /*%< Number of clients
|
||||
ready to accept new
|
||||
TCP connections on this
|
||||
interface */
|
||||
- int32_t ntcpactive; /*%< Number of clients
|
||||
+ isc_refcount_t ntcpactive; /*%< Number of clients
|
||||
servicing TCP queries
|
||||
(whether accepting or
|
||||
connected) */
|
||||
diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
|
||||
index d9f6df5802..135533be6b 100644
|
||||
--- a/bin/named/interfacemgr.c
|
||||
+++ b/bin/named/interfacemgr.c
|
||||
@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
|
||||
* connections will be handled in parallel even though there is
|
||||
* only one client initially.
|
||||
*/
|
||||
- ifp->ntcpaccepting = 0;
|
||||
- ifp->ntcpactive = 0;
|
||||
+ isc_refcount_init(&ifp->ntcpaccepting, 0);
|
||||
+ isc_refcount_init(&ifp->ntcpactive, 0);
|
||||
|
||||
ifp->nudpdispatch = 0;
|
||||
|
||||
@@ -618,6 +618,9 @@ ns_interface_destroy(ns_interface_t *ifp) {
|
||||
|
||||
ns_interfacemgr_detach(&ifp->mgr);
|
||||
|
||||
+ isc_refcount_destroy(&ifp->ntcpactive);
|
||||
+ isc_refcount_destroy(&ifp->ntcpaccepting);
|
||||
+
|
||||
ifp->magic = 0;
|
||||
isc_mem_put(mctx, ifp, sizeof(*ifp));
|
||||
}
|
||||
--
|
||||
2.20.1
|
||||
|
||||
@@ -20,6 +20,14 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
|
||||
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
|
||||
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
|
||||
file://0001-avoid-start-failure-with-bind-user.patch \
|
||||
file://0001-bind-fix-CVE-2019-6471.patch \
|
||||
file://0001-fix-enforcement-of-tcp-clients-v1.patch \
|
||||
file://0002-tcp-clients-could-still-be-exceeded-v2.patch \
|
||||
file://0003-use-reference-counter-for-pipeline-groups-v3.patch \
|
||||
file://0004-better-tcpquota-accounting-and-client-mortality-chec.patch \
|
||||
file://0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch \
|
||||
file://0006-restore-allowance-for-tcp-clients-interfaces.patch \
|
||||
file://0007-Replace-atomic-operations-in-bin-named-client.c-with.patch \
|
||||
"
|
||||
|
||||
SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960"
|
||||
|
||||
@@ -59,7 +59,7 @@ INITSCRIPT_NAME = "connman"
|
||||
INITSCRIPT_PARAMS = "start 05 5 2 3 . stop 22 0 1 6 ."
|
||||
|
||||
python __anonymous () {
|
||||
systemd_packages = "${PN}"
|
||||
systemd_packages = "${PN} ${PN}-wait-online"
|
||||
pkgconfig = d.getVar('PACKAGECONFIG')
|
||||
if ('openvpn' or 'vpnc' or 'l2tp' or 'pptp') in pkgconfig.split():
|
||||
systemd_packages += " ${PN}-vpn"
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
From 2014fad3d28090b59d2f8a0971166c06e5fa6da6 Mon Sep 17 00:00:00 2001
|
||||
From: Hongxu Jia <hongxu.jia@windriver.com>
|
||||
Date: Fri, 18 Oct 2019 14:56:58 +0800
|
||||
Subject: [PATCH] upstream: fix integer overflow in XMSS private key parsing.
|
||||
|
||||
Reported by Adam Zabrocki via SecuriTeam's SSH program.
|
||||
|
||||
Note that this code is experimental and not compiled by default.
|
||||
|
||||
ok markus@
|
||||
|
||||
OpenBSD-Commit-ID: cd0361896d15e8a1bac495ac583ff065ffca2be1
|
||||
|
||||
Signed-off-by: "djm@openbsd.org" <djm@openbsd.org>
|
||||
|
||||
Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/a546b17bbaeb12beac4c9aeed56f74a42b18a93a]
|
||||
CVE: CVE-2019-16905
|
||||
|
||||
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
|
||||
---
|
||||
sshkey-xmss.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/sshkey-xmss.c b/sshkey-xmss.c
|
||||
index aaae702..c57681a 100644
|
||||
--- a/sshkey-xmss.c
|
||||
+++ b/sshkey-xmss.c
|
||||
@@ -977,7 +977,8 @@ sshkey_xmss_decrypt_state(const struct sshkey *k, struct sshbuf *encoded,
|
||||
goto out;
|
||||
}
|
||||
/* check that an appropriate amount of auth data is present */
|
||||
- if (sshbuf_len(encoded) < encrypted_len + authlen) {
|
||||
+ if (sshbuf_len(encoded) < authlen ||
|
||||
+ sshbuf_len(encoded) - authlen < encrypted_len) {
|
||||
r = SSH_ERR_INVALID_FORMAT;
|
||||
goto out;
|
||||
}
|
||||
--
|
||||
2.7.4
|
||||
|
||||
@@ -24,6 +24,7 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
|
||||
file://fix-potential-signed-overflow-in-pointer-arithmatic.patch \
|
||||
file://sshd_check_keys \
|
||||
file://add-test-support-for-busybox.patch \
|
||||
file://0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch \
|
||||
"
|
||||
SRC_URI[md5sum] = "bf050f002fe510e1daecd39044e1122d"
|
||||
SRC_URI[sha256sum] = "bd943879e69498e8031eb6b7f44d08cdc37d59a7ab689aa0b437320c3481fd68"
|
||||
|
||||
@@ -148,7 +148,7 @@ do_install_append_class-native () {
|
||||
OPENSSL_CONF=${libdir}/ssl-1.1/openssl.cnf \
|
||||
SSL_CERT_DIR=${libdir}/ssl-1.1/certs \
|
||||
SSL_CERT_FILE=${libdir}/ssl-1.1/cert.pem \
|
||||
OPENSSL_ENGINES=${libdir}/ssl-1.1/engines
|
||||
OPENSSL_ENGINES=${libdir}/engines-1.1
|
||||
}
|
||||
|
||||
do_install_append_class-nativesdk () {
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
hostapd before 2.10 and wpa_supplicant before 2.10 allow an incorrect indication
|
||||
of disconnection in certain situations because source address validation is
|
||||
mishandled. This is a denial of service that should have been prevented by PMF
|
||||
(aka management frame protection). The attacker must send a crafted 802.11 frame
|
||||
from a location that is within the 802.11 communications range.
|
||||
|
||||
CVE: CVE-2019-16275
|
||||
Upstream-Status: Backport
|
||||
Signed-off-by: Ross Burton <ross.burton@intel.com>
|
||||
|
||||
From 8c07fa9eda13e835f3f968b2e1c9a8be3a851ff9 Mon Sep 17 00:00:00 2001
|
||||
From: Jouni Malinen <j@w1.fi>
|
||||
Date: Thu, 29 Aug 2019 11:52:04 +0300
|
||||
Subject: [PATCH] AP: Silently ignore management frame from unexpected source
|
||||
address
|
||||
|
||||
Do not process any received Management frames with unexpected/invalid SA
|
||||
so that we do not add any state for unexpected STA addresses or end up
|
||||
sending out frames to unexpected destination. This prevents unexpected
|
||||
sequences where an unprotected frame might end up causing the AP to send
|
||||
out a response to another device and that other device processing the
|
||||
unexpected response.
|
||||
|
||||
In particular, this prevents some potential denial of service cases
|
||||
where the unexpected response frame from the AP might result in a
|
||||
connected station dropping its association.
|
||||
|
||||
Signed-off-by: Jouni Malinen <j@w1.fi>
|
||||
---
|
||||
src/ap/drv_callbacks.c | 13 +++++++++++++
|
||||
src/ap/ieee802_11.c | 12 ++++++++++++
|
||||
2 files changed, 25 insertions(+)
|
||||
|
||||
diff --git a/src/ap/drv_callbacks.c b/src/ap/drv_callbacks.c
|
||||
index 31587685fe3b..34ca379edc3d 100644
|
||||
--- a/src/ap/drv_callbacks.c
|
||||
+++ b/src/ap/drv_callbacks.c
|
||||
@@ -131,6 +131,19 @@ int hostapd_notif_assoc(struct hostapd_data *hapd, const u8 *addr,
|
||||
"hostapd_notif_assoc: Skip event with no address");
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
+ if (is_multicast_ether_addr(addr) ||
|
||||
+ is_zero_ether_addr(addr) ||
|
||||
+ os_memcmp(addr, hapd->own_addr, ETH_ALEN) == 0) {
|
||||
+ /* Do not process any frames with unexpected/invalid SA so that
|
||||
+ * we do not add any state for unexpected STA addresses or end
|
||||
+ * up sending out frames to unexpected destination. */
|
||||
+ wpa_printf(MSG_DEBUG, "%s: Invalid SA=" MACSTR
|
||||
+ " in received indication - ignore this indication silently",
|
||||
+ __func__, MAC2STR(addr));
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
random_add_randomness(addr, ETH_ALEN);
|
||||
|
||||
hostapd_logger(hapd, addr, HOSTAPD_MODULE_IEEE80211,
|
||||
diff --git a/src/ap/ieee802_11.c b/src/ap/ieee802_11.c
|
||||
index c85a28db44b7..e7065372e158 100644
|
||||
--- a/src/ap/ieee802_11.c
|
||||
+++ b/src/ap/ieee802_11.c
|
||||
@@ -4626,6 +4626,18 @@ int ieee802_11_mgmt(struct hostapd_data *hapd, const u8 *buf, size_t len,
|
||||
fc = le_to_host16(mgmt->frame_control);
|
||||
stype = WLAN_FC_GET_STYPE(fc);
|
||||
|
||||
+ if (is_multicast_ether_addr(mgmt->sa) ||
|
||||
+ is_zero_ether_addr(mgmt->sa) ||
|
||||
+ os_memcmp(mgmt->sa, hapd->own_addr, ETH_ALEN) == 0) {
|
||||
+ /* Do not process any frames with unexpected/invalid SA so that
|
||||
+ * we do not add any state for unexpected STA addresses or end
|
||||
+ * up sending out frames to unexpected destination. */
|
||||
+ wpa_printf(MSG_DEBUG, "MGMT: Invalid SA=" MACSTR
|
||||
+ " in received frame - ignore this frame silently",
|
||||
+ MAC2STR(mgmt->sa));
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
if (stype == WLAN_FC_STYPE_BEACON) {
|
||||
handle_beacon(hapd, mgmt, len, fi);
|
||||
return 1;
|
||||
--
|
||||
2.20.1
|
||||
@@ -25,6 +25,7 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
|
||||
file://wpa_supplicant.conf-sane \
|
||||
file://99_wpa_supplicant \
|
||||
file://0001-replace-systemd-install-Alias-with-WantedBy.patch \
|
||||
file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \
|
||||
"
|
||||
SRC_URI[md5sum] = "2d2958c782576dc9901092fbfecb4190"
|
||||
SRC_URI[sha256sum] = "fcbdee7b4a64bea8177973299c8c824419c413ec2e3a95db63dd6a5dc3541f17"
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
|
||||
DAEMON=@bindir@/dbus-daemon
|
||||
NAME=dbus
|
||||
DAEMONUSER=messagebus # must match /etc/dbus-1/system.conf
|
||||
PIDFILE=/var/run/messagebus.pid # must match /etc/dbus-1/system.conf
|
||||
DAEMONUSER=messagebus # must match /usr/share/dbus-1/system.conf
|
||||
PIDFILE=/var/run/dbus/pid # must match /usr/share/dbus-1/system.conf
|
||||
UUIDDIR=/var/lib/dbus
|
||||
DESC="system message bus"
|
||||
EVENTDIR=/etc/dbus-1/event.d
|
||||
|
||||
@@ -24,8 +24,8 @@ IMAGE_FSTYPES = "wic.vmdk"
|
||||
|
||||
inherit core-image module-base setuptools3
|
||||
|
||||
SRCREV ?= "8181681b33da272fef83276104d5c7a93f84da46"
|
||||
SRC_URI = "git://git.yoctoproject.org/poky \
|
||||
SRCREV ?= "cf0cefd53c5d4f72e26c74571a10e098996a1ff2"
|
||||
SRC_URI = "git://git.yoctoproject.org/poky;branch=zeus \
|
||||
file://Yocto_Build_Appliance.vmx \
|
||||
file://Yocto_Build_Appliance.vmxf \
|
||||
file://README_VirtualBox_Guest_Additions.txt \
|
||||
|
||||
@@ -26,6 +26,10 @@ if [ -e /sys/kernel/config ] && grep -q configfs /proc/filesystems; then
|
||||
mount -t configfs configfs /sys/kernel/config
|
||||
fi
|
||||
|
||||
if [ -e /sys/firmware/efi/efivars ] && grep -q efivarfs /proc/filesystems; then
|
||||
mount -t efivarfs efivarfs /sys/firmware/efi/efivars
|
||||
fi
|
||||
|
||||
if ! [ -e /dev/zero ] && [ -e /dev ] && grep -q devtmpfs /proc/filesystems; then
|
||||
mount -n -t devtmpfs devtmpfs /dev
|
||||
fi
|
||||
|
||||
@@ -21,26 +21,26 @@ python do_populate_cve_db() {
|
||||
"""
|
||||
Update NVD database with json data feed
|
||||
"""
|
||||
|
||||
import bb.utils
|
||||
import sqlite3, urllib, urllib.parse, shutil, gzip
|
||||
from datetime import date
|
||||
|
||||
bb.utils.export_proxies(d)
|
||||
|
||||
BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-"
|
||||
YEAR_START = 2002
|
||||
|
||||
db_dir = os.path.join(d.getVar("DL_DIR"), 'CVE_CHECK')
|
||||
db_file = os.path.join(db_dir, 'nvdcve_1.0.db')
|
||||
db_file = d.getVar("CVE_CHECK_DB_FILE")
|
||||
db_dir = os.path.dirname(db_file)
|
||||
json_tmpfile = os.path.join(db_dir, 'nvd.json.gz')
|
||||
proxy = d.getVar("https_proxy")
|
||||
|
||||
if proxy:
|
||||
# instantiate an opener but do not install it as the global
|
||||
# opener unless if we're really sure it's applicable for all
|
||||
# urllib requests
|
||||
proxy_handler = urllib.request.ProxyHandler({'https': proxy})
|
||||
proxy_opener = urllib.request.build_opener(proxy_handler)
|
||||
else:
|
||||
proxy_opener = None
|
||||
# Don't refresh the database more than once an hour
|
||||
try:
|
||||
import time
|
||||
if time.time() - os.path.getmtime(db_file) < (60*60):
|
||||
return
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
cve_f = open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a')
|
||||
|
||||
@@ -59,15 +59,7 @@ python do_populate_cve_db() {
|
||||
json_url = year_url + ".json.gz"
|
||||
|
||||
# Retrieve meta last modified date
|
||||
|
||||
response = None
|
||||
|
||||
if proxy_opener:
|
||||
response = proxy_opener.open(meta_url)
|
||||
else:
|
||||
req = urllib.request.Request(meta_url)
|
||||
response = urllib.request.urlopen(req)
|
||||
|
||||
response = urllib.request.urlopen(meta_url)
|
||||
if response:
|
||||
for l in response.read().decode("utf-8").splitlines():
|
||||
key, value = l.split(":", 1)
|
||||
@@ -87,12 +79,7 @@ python do_populate_cve_db() {
|
||||
|
||||
# Update db with current year json file
|
||||
try:
|
||||
if proxy_opener:
|
||||
response = proxy_opener.open(json_url)
|
||||
else:
|
||||
req = urllib.request.Request(json_url)
|
||||
response = urllib.request.urlopen(req)
|
||||
|
||||
response = urllib.request.urlopen(json_url)
|
||||
if response:
|
||||
update_db(c, gzip.decompress(response.read()).decode('utf-8'))
|
||||
c.execute("insert or replace into META values (?, ?)", [year, last_modified])
|
||||
@@ -112,11 +99,14 @@ python do_populate_cve_db() {
|
||||
|
||||
def initialize_db(c):
|
||||
c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
|
||||
|
||||
c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
|
||||
SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
|
||||
|
||||
c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
|
||||
VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
|
||||
VERSION_END TEXT, OPERATOR_END TEXT)")
|
||||
c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
|
||||
|
||||
def parse_node_and_insert(c, node, cveId):
|
||||
# Parse children node if needed
|
||||
|
||||
@@ -0,0 +1,169 @@
|
||||
From 064b77f173337aa790f1cec0d741bfbc61a33d31 Mon Sep 17 00:00:00 2001
|
||||
From: Trevor Gamblin <trevor.gamblin@windriver.com>
|
||||
Date: Fri, 18 Oct 2019 09:57:43 -0400
|
||||
Subject: [PATCH] ncurses: selective backport of 20191012 patch
|
||||
|
||||
Upstream-Status: Backport [https://salsa.debian.org/debian/ncurses/commit/243908b1e3d81]
|
||||
|
||||
Contents of the upstream patch that are not applied to comp_hash.c,
|
||||
parse_entry.c, or dump_entry.c have been omitted.
|
||||
|
||||
CVE: CVE-2019-17594
|
||||
CVE: CVE-2019-17595
|
||||
|
||||
Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
|
||||
|
||||
---
|
||||
ncurses/tinfo/comp_hash.c | 14 ++++++++++----
|
||||
ncurses/tinfo/parse_entry.c | 32 ++++++++++++++++----------------
|
||||
progs/dump_entry.c | 7 ++++---
|
||||
3 files changed, 30 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/ncurses/tinfo/comp_hash.c b/ncurses/tinfo/comp_hash.c
|
||||
index 21f165ca..a62d38f9 100644
|
||||
--- a/ncurses/tinfo/comp_hash.c
|
||||
+++ b/ncurses/tinfo/comp_hash.c
|
||||
@@ -44,7 +44,7 @@
|
||||
#include <tic.h>
|
||||
#include <hashsize.h>
|
||||
|
||||
-MODULE_ID("$Id: comp_hash.c,v 1.49 2019/03/10 00:06:48 tom Exp $")
|
||||
+MODULE_ID("$Id: comp_hash.c,v 1.51 2019/10/12 16:32:13 tom Exp $")
|
||||
|
||||
/*
|
||||
* Finds the entry for the given string in the hash table if present.
|
||||
@@ -63,7 +63,9 @@ _nc_find_entry(const char *string,
|
||||
|
||||
hashvalue = data->hash_of(string);
|
||||
|
||||
- if (data->table_data[hashvalue] >= 0) {
|
||||
+ if (hashvalue >= 0
|
||||
+ && (unsigned) hashvalue < data->table_size
|
||||
+ && data->table_data[hashvalue] >= 0) {
|
||||
|
||||
real_table = _nc_get_table(termcap);
|
||||
ptr = real_table + data->table_data[hashvalue];
|
||||
@@ -96,7 +98,9 @@ _nc_find_type_entry(const char *string,
|
||||
const HashData *data = _nc_get_hash_info(termcap);
|
||||
int hashvalue = data->hash_of(string);
|
||||
|
||||
- if (data->table_data[hashvalue] >= 0) {
|
||||
+ if (hashvalue >= 0
|
||||
+ && (unsigned) hashvalue < data->table_size
|
||||
+ && data->table_data[hashvalue] >= 0) {
|
||||
const struct name_table_entry *const table = _nc_get_table(termcap);
|
||||
|
||||
ptr = table + data->table_data[hashvalue];
|
||||
@@ -124,7 +128,9 @@ _nc_find_user_entry(const char *string)
|
||||
|
||||
hashvalue = data->hash_of(string);
|
||||
|
||||
- if (data->table_data[hashvalue] >= 0) {
|
||||
+ if (hashvalue >= 0
|
||||
+ && (unsigned) hashvalue < data->table_size
|
||||
+ && data->table_data[hashvalue] >= 0) {
|
||||
|
||||
real_table = _nc_get_userdefs_table();
|
||||
ptr = real_table + data->table_data[hashvalue];
|
||||
diff --git a/ncurses/tinfo/parse_entry.c b/ncurses/tinfo/parse_entry.c
|
||||
index f8cca8b5..064376c5 100644
|
||||
--- a/ncurses/tinfo/parse_entry.c
|
||||
+++ b/ncurses/tinfo/parse_entry.c
|
||||
@@ -47,7 +47,7 @@
|
||||
#include <ctype.h>
|
||||
#include <tic.h>
|
||||
|
||||
-MODULE_ID("$Id: parse_entry.c,v 1.97 2019/08/03 23:10:38 tom Exp $")
|
||||
+MODULE_ID("$Id: parse_entry.c,v 1.98 2019/10/12 00:50:31 tom Exp $")
|
||||
|
||||
#ifdef LINT
|
||||
static short const parametrized[] =
|
||||
@@ -654,12 +654,12 @@ _nc_capcmp(const char *s, const char *t)
|
||||
}
|
||||
|
||||
static void
|
||||
-append_acs0(string_desc * dst, int code, int src)
|
||||
+append_acs0(string_desc * dst, int code, char *src, size_t off)
|
||||
{
|
||||
- if (src != 0) {
|
||||
+ if (src != 0 && off < strlen(src)) {
|
||||
char temp[3];
|
||||
temp[0] = (char) code;
|
||||
- temp[1] = (char) src;
|
||||
+ temp[1] = src[off];
|
||||
temp[2] = 0;
|
||||
_nc_safe_strcat(dst, temp);
|
||||
}
|
||||
@@ -669,7 +669,7 @@ static void
|
||||
append_acs(string_desc * dst, int code, char *src)
|
||||
{
|
||||
if (VALID_STRING(src) && strlen(src) == 1) {
|
||||
- append_acs0(dst, code, *src);
|
||||
+ append_acs0(dst, code, src, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1038,17 +1038,17 @@ postprocess_terminfo(TERMTYPE2 *tp)
|
||||
_nc_str_init(&result, buf2, sizeof(buf2));
|
||||
_nc_safe_strcat(&result, acs_chars);
|
||||
|
||||
- append_acs0(&result, 'l', box_chars_1[0]); /* ACS_ULCORNER */
|
||||
- append_acs0(&result, 'q', box_chars_1[1]); /* ACS_HLINE */
|
||||
- append_acs0(&result, 'k', box_chars_1[2]); /* ACS_URCORNER */
|
||||
- append_acs0(&result, 'x', box_chars_1[3]); /* ACS_VLINE */
|
||||
- append_acs0(&result, 'j', box_chars_1[4]); /* ACS_LRCORNER */
|
||||
- append_acs0(&result, 'm', box_chars_1[5]); /* ACS_LLCORNER */
|
||||
- append_acs0(&result, 'w', box_chars_1[6]); /* ACS_TTEE */
|
||||
- append_acs0(&result, 'u', box_chars_1[7]); /* ACS_RTEE */
|
||||
- append_acs0(&result, 'v', box_chars_1[8]); /* ACS_BTEE */
|
||||
- append_acs0(&result, 't', box_chars_1[9]); /* ACS_LTEE */
|
||||
- append_acs0(&result, 'n', box_chars_1[10]); /* ACS_PLUS */
|
||||
+ append_acs0(&result, 'l', box_chars_1, 0); /* ACS_ULCORNER */
|
||||
+ append_acs0(&result, 'q', box_chars_1, 1); /* ACS_HLINE */
|
||||
+ append_acs0(&result, 'k', box_chars_1, 2); /* ACS_URCORNER */
|
||||
+ append_acs0(&result, 'x', box_chars_1, 3); /* ACS_VLINE */
|
||||
+ append_acs0(&result, 'j', box_chars_1, 4); /* ACS_LRCORNER */
|
||||
+ append_acs0(&result, 'm', box_chars_1, 5); /* ACS_LLCORNER */
|
||||
+ append_acs0(&result, 'w', box_chars_1, 6); /* ACS_TTEE */
|
||||
+ append_acs0(&result, 'u', box_chars_1, 7); /* ACS_RTEE */
|
||||
+ append_acs0(&result, 'v', box_chars_1, 8); /* ACS_BTEE */
|
||||
+ append_acs0(&result, 't', box_chars_1, 9); /* ACS_LTEE */
|
||||
+ append_acs0(&result, 'n', box_chars_1, 10); /* ACS_PLUS */
|
||||
|
||||
if (buf2[0]) {
|
||||
acs_chars = _nc_save_str(buf2);
|
||||
diff --git a/progs/dump_entry.c b/progs/dump_entry.c
|
||||
index d0e420ec..8a47084a 100644
|
||||
--- a/progs/dump_entry.c
|
||||
+++ b/progs/dump_entry.c
|
||||
@@ -39,7 +39,7 @@
|
||||
#include "termsort.c" /* this C file is generated */
|
||||
#include <parametrized.h> /* so is this */
|
||||
|
||||
-MODULE_ID("$Id: dump_entry.c,v 1.173 2019/05/11 21:02:24 tom Exp $")
|
||||
+MODULE_ID("$Id: dump_entry.c,v 1.175 2019/10/12 15:59:07 tom Exp $")
|
||||
|
||||
#define DISCARD(string) string = ABSENT_STRING
|
||||
#define PRINTF (void) printf
|
||||
@@ -1136,7 +1136,8 @@ fmt_entry(TERMTYPE2 *tterm,
|
||||
*d++ = '\\';
|
||||
*d = ':';
|
||||
} else if (*d == '\\') {
|
||||
- *++d = *s++;
|
||||
+ if ((*++d = *s++) == '\0')
|
||||
+ break;
|
||||
}
|
||||
d++;
|
||||
*d = '\0';
|
||||
@@ -1396,7 +1397,7 @@ one_one_mapping(const char *mapping)
|
||||
|
||||
if (VALID_STRING(mapping)) {
|
||||
int n = 0;
|
||||
- while (mapping[n] != '\0') {
|
||||
+ while (mapping[n] != '\0' && mapping[n + 1] != '\0') {
|
||||
if (isLine(mapping[n]) &&
|
||||
mapping[n] != mapping[n + 1]) {
|
||||
result = FALSE;
|
||||
--
|
||||
2.17.1
|
||||
|
||||
@@ -3,6 +3,7 @@ require ncurses.inc
|
||||
SRC_URI += "file://0001-tic-hang.patch \
|
||||
file://0002-configure-reproducible.patch \
|
||||
file://config.cache \
|
||||
file://0001-ncurses-selective-backport-of-20191012-patch.patch \
|
||||
"
|
||||
# commit id corresponds to the revision in package version
|
||||
SRCREV = "3c9b2677c96c645496997321bf2fe465a5e7e21f"
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
From e3bb9bfb76c17b1d05814436ced62c05c4011f48 Mon Sep 17 00:00:00 2001
|
||||
From: Karel Zak <kzak@redhat.com>
|
||||
Date: Thu, 27 Jun 2019 09:22:18 +0200
|
||||
Subject: [PATCH] lsblk: force to print PKNAME for partition
|
||||
|
||||
PKNAME (parent kernel device name) is based on printed tree according
|
||||
to parent -> child relationship. The tree is optional and not printed
|
||||
if partition specified (.e.g "lsblk -o+PKNAME /dev/sda1"), but old
|
||||
versions print the PKNAME also in this case.
|
||||
|
||||
Upstream-Status: Backport [https://github.com/karelzak/util-linux/commit/e3bb9bfb76c17b1d05814436ced62c05c4011f48]
|
||||
|
||||
Addresses: https://github.com/karelzak/util-linux/issues/813
|
||||
Signed-off-by: Karel Zak <kzak@redhat.com>
|
||||
Signed-off-by: Liwei Song <liwei.song@windriver.com>
|
||||
---
|
||||
misc-utils/lsblk.c | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/misc-utils/lsblk.c b/misc-utils/lsblk.c
|
||||
index e95af7af0256..3ce6da730264 100644
|
||||
--- a/misc-utils/lsblk.c
|
||||
+++ b/misc-utils/lsblk.c
|
||||
@@ -1019,6 +1019,9 @@ static void device_to_scols(
|
||||
DBG(DEV, ul_debugobj(dev, "add '%s' to scols", dev->name));
|
||||
ON_DBG(DEV, if (ul_path_isopen_dirfd(dev->sysfs)) ul_debugobj(dev, " %s ---> is open!", dev->name));
|
||||
|
||||
+ if (!parent && dev->wholedisk)
|
||||
+ parent = dev->wholedisk;
|
||||
+
|
||||
/* Do not print device more than one in --list mode */
|
||||
if (!(lsblk->flags & LSBLK_TREE) && dev->is_printed)
|
||||
return;
|
||||
--
|
||||
2.17.1
|
||||
|
||||
@@ -7,6 +7,7 @@ SRC_URI += "file://configure-sbindir.patch \
|
||||
file://run-ptest \
|
||||
file://display_testname_for_subtest.patch \
|
||||
file://avoid_parallel_tests.patch \
|
||||
file://0001-lsblk-force-to-print-PKNAME-for-partition.patch \
|
||||
"
|
||||
SRC_URI[md5sum] = "a78cbeaed9c39094b96a48ba8f891d50"
|
||||
SRC_URI[sha256sum] = "743f9d0c7252b6db246b659c1e1ce0bd45d8d4508b4dfa427bbb4a3e9b9f62b5"
|
||||
|
||||
@@ -49,6 +49,8 @@ SRC_URI = "\
|
||||
file://CVE-2019-12972.patch \
|
||||
file://CVE-2019-14250.patch \
|
||||
file://CVE-2019-14444.patch \
|
||||
file://CVE-2019-17450.patch \
|
||||
file://CVE-2019-17451.patch \
|
||||
"
|
||||
S = "${WORKDIR}/git"
|
||||
|
||||
|
||||
99
meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch
Normal file
99
meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch
Normal file
@@ -0,0 +1,99 @@
|
||||
From 09dd135df9ebc7a4b640537e23e26a03a288a789 Mon Sep 17 00:00:00 2001
|
||||
From: Alan Modra <amodra@gmail.com>
|
||||
Date: Wed, 9 Oct 2019 00:07:29 +1030
|
||||
Subject: [PATCH] PR25078, stack overflow in function find_abstract_instance
|
||||
|
||||
Selectively backporting fix for bfd/dwarf2.c, but not the ChangeLog
|
||||
file. There are newer versions of binutils, but none of them contain the
|
||||
commit fixing CVE-2019-17450, so backport it to master and zeus.
|
||||
|
||||
Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=063c511bd79]
|
||||
CVE: CVE-2019-17450
|
||||
Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
|
||||
|
||||
PR 25078
|
||||
* dwarf2.c (find_abstract_instance): Delete orig_info_ptr, add
|
||||
recur_count. Error on recur_count reaching 100 rather than
|
||||
info_ptr matching orig_info_ptr. Adjust calls.
|
||||
|
||||
---
|
||||
bfd/dwarf2.c | 35 +++++++++++++++++------------------
|
||||
1 file changed, 17 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
|
||||
index 0b4e485582..20ec9e2e56 100644
|
||||
--- a/bfd/dwarf2.c
|
||||
+++ b/bfd/dwarf2.c
|
||||
@@ -2803,13 +2803,13 @@ lookup_symbol_in_variable_table (struct comp_unit *unit,
|
||||
}
|
||||
|
||||
static bfd_boolean
|
||||
-find_abstract_instance (struct comp_unit * unit,
|
||||
- bfd_byte * orig_info_ptr,
|
||||
- struct attribute * attr_ptr,
|
||||
- const char ** pname,
|
||||
- bfd_boolean * is_linkage,
|
||||
- char ** filename_ptr,
|
||||
- int * linenumber_ptr)
|
||||
+find_abstract_instance (struct comp_unit *unit,
|
||||
+ struct attribute *attr_ptr,
|
||||
+ unsigned int recur_count,
|
||||
+ const char **pname,
|
||||
+ bfd_boolean *is_linkage,
|
||||
+ char **filename_ptr,
|
||||
+ int *linenumber_ptr)
|
||||
{
|
||||
bfd *abfd = unit->abfd;
|
||||
bfd_byte *info_ptr;
|
||||
@@ -2820,6 +2820,14 @@ find_abstract_instance (struct comp_unit * unit,
|
||||
struct attribute attr;
|
||||
const char *name = NULL;
|
||||
|
||||
+ if (recur_count == 100)
|
||||
+ {
|
||||
+ _bfd_error_handler
|
||||
+ (_("DWARF error: abstract instance recursion detected"));
|
||||
+ bfd_set_error (bfd_error_bad_value);
|
||||
+ return FALSE;
|
||||
+ }
|
||||
+
|
||||
/* DW_FORM_ref_addr can reference an entry in a different CU. It
|
||||
is an offset from the .debug_info section, not the current CU. */
|
||||
if (attr_ptr->form == DW_FORM_ref_addr)
|
||||
@@ -2939,15 +2947,6 @@ find_abstract_instance (struct comp_unit * unit,
|
||||
info_ptr, info_ptr_end);
|
||||
if (info_ptr == NULL)
|
||||
break;
|
||||
- /* It doesn't ever make sense for DW_AT_specification to
|
||||
- refer to the same DIE. Stop simple recursion. */
|
||||
- if (info_ptr == orig_info_ptr)
|
||||
- {
|
||||
- _bfd_error_handler
|
||||
- (_("DWARF error: abstract instance recursion detected"));
|
||||
- bfd_set_error (bfd_error_bad_value);
|
||||
- return FALSE;
|
||||
- }
|
||||
switch (attr.name)
|
||||
{
|
||||
case DW_AT_name:
|
||||
@@ -2961,7 +2960,7 @@ find_abstract_instance (struct comp_unit * unit,
|
||||
}
|
||||
break;
|
||||
case DW_AT_specification:
|
||||
- if (!find_abstract_instance (unit, info_ptr, &attr,
|
||||
+ if (!find_abstract_instance (unit, &attr, recur_count + 1,
|
||||
&name, is_linkage,
|
||||
filename_ptr, linenumber_ptr))
|
||||
return FALSE;
|
||||
@@ -3175,7 +3174,7 @@ scan_unit_for_symbols (struct comp_unit *unit)
|
||||
|
||||
case DW_AT_abstract_origin:
|
||||
case DW_AT_specification:
|
||||
- if (!find_abstract_instance (unit, info_ptr, &attr,
|
||||
+ if (!find_abstract_instance (unit, &attr, 0,
|
||||
&func->name,
|
||||
&func->is_linkage,
|
||||
&func->file,
|
||||
--
|
||||
2.23.0
|
||||
|
||||
51
meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch
Normal file
51
meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch
Normal file
@@ -0,0 +1,51 @@
|
||||
From 0192438051a7e781585647d5581a2a6f62fda362 Mon Sep 17 00:00:00 2001
|
||||
From: Alan Modra <amodra@gmail.com>
|
||||
Date: Wed, 9 Oct 2019 10:47:13 +1030
|
||||
Subject: [PATCH] PR25070, SEGV in function _bfd_dwarf2_find_nearest_line
|
||||
|
||||
Selectively backporting fix for bfd/dwarf2.c, but not the ChangeLog
|
||||
file. There are newer versions of binutils, but none of them contain the
|
||||
commit fixing CVE-2019-17451, so backport it to master and zeus.
|
||||
|
||||
Upstream-Status: Backport
|
||||
[https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=336bfbeb1848]
|
||||
CVE: CVE-2019-17451
|
||||
Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
|
||||
|
||||
|
||||
Evil testcase with two debug info sections, with sizes of 2aaaabac4ec1
|
||||
and ffffd5555453b140 result in a total size of 1. Reading the first
|
||||
section of course overflows the buffer and tramples on other memory.
|
||||
|
||||
PR 25070
|
||||
* dwarf2.c (_bfd_dwarf2_slurp_debug_info): Catch overflow of
|
||||
total_size calculation.
|
||||
---
|
||||
bfd/dwarf2.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
|
||||
index 0b4e485582..a91597b1d0 100644
|
||||
--- a/bfd/dwarf2.c
|
||||
+++ b/bfd/dwarf2.c
|
||||
@@ -4426,7 +4426,16 @@ _bfd_dwarf2_slurp_debug_info (bfd *abfd, bfd *debug_bfd,
|
||||
for (total_size = 0;
|
||||
msec;
|
||||
msec = find_debug_info (debug_bfd, debug_sections, msec))
|
||||
- total_size += msec->size;
|
||||
+ {
|
||||
+ /* Catch PR25070 testcase overflowing size calculation here. */
|
||||
+ if (total_size + msec->size < total_size
|
||||
+ || total_size + msec->size < msec->size)
|
||||
+ {
|
||||
+ bfd_set_error (bfd_error_no_memory);
|
||||
+ return FALSE;
|
||||
+ }
|
||||
+ total_size += msec->size;
|
||||
+ }
|
||||
|
||||
stash->info_ptr_memory = (bfd_byte *) bfd_malloc (total_size);
|
||||
if (stash->info_ptr_memory == NULL)
|
||||
--
|
||||
2.23.0
|
||||
|
||||
217
meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5094.patch
Normal file
217
meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5094.patch
Normal file
@@ -0,0 +1,217 @@
|
||||
From 8dbe7b475ec5e91ed767239f0e85880f416fc384 Mon Sep 17 00:00:00 2001
|
||||
From: Theodore Ts'o <tytso@mit.edu>
|
||||
Date: Sun, 1 Sep 2019 00:59:16 -0400
|
||||
Subject: libsupport: add checks to prevent buffer overrun bugs in quota code
|
||||
|
||||
A maliciously corrupted file systems can trigger buffer overruns in
|
||||
the quota code used by e2fsck. To fix this, add sanity checks to the
|
||||
quota header fields as well as to block number references in the quota
|
||||
tree.
|
||||
|
||||
Addresses: CVE-2019-5094
|
||||
Addresses: TALOS-2019-0887
|
||||
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
|
||||
|
||||
|
||||
Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?h=maint&id=8dbe7b475ec5e91ed767239f0e85880f416fc384]
|
||||
CVE: CVE-2019-5094
|
||||
|
||||
Signed-off-by: Changqing Li <changqing.li@windriver.com>
|
||||
---
|
||||
lib/support/mkquota.c | 1 +
|
||||
lib/support/quotaio_tree.c | 71 ++++++++++++++++++++++++++++++----------------
|
||||
lib/support/quotaio_v2.c | 28 ++++++++++++++++++
|
||||
3 files changed, 76 insertions(+), 24 deletions(-)
|
||||
|
||||
diff --git a/lib/support/mkquota.c b/lib/support/mkquota.c
|
||||
index 0b9e7665..ddb53124 100644
|
||||
--- a/lib/support/mkquota.c
|
||||
+++ b/lib/support/mkquota.c
|
||||
@@ -671,6 +671,7 @@ errcode_t quota_compare_and_update(quota_ctx_t qctx, enum quota_type qtype,
|
||||
err = qh.qh_ops->scan_dquots(&qh, scan_dquots_callback, &scan_data);
|
||||
if (err) {
|
||||
log_debug("Error scanning dquots");
|
||||
+ *usage_inconsistent = 1;
|
||||
goto out_close_qh;
|
||||
}
|
||||
|
||||
diff --git a/lib/support/quotaio_tree.c b/lib/support/quotaio_tree.c
|
||||
index a7c2028c..6cc4fb5b 100644
|
||||
--- a/lib/support/quotaio_tree.c
|
||||
+++ b/lib/support/quotaio_tree.c
|
||||
@@ -540,6 +540,17 @@ struct dquot *qtree_read_dquot(struct quota_handle *h, qid_t id)
|
||||
return dquot;
|
||||
}
|
||||
|
||||
+static int check_reference(struct quota_handle *h, unsigned int blk)
|
||||
+{
|
||||
+ if (blk >= h->qh_info.u.v2_mdqi.dqi_qtree.dqi_blocks) {
|
||||
+ log_err("Illegal reference (%u >= %u) in %s quota file",
|
||||
+ blk, h->qh_info.u.v2_mdqi.dqi_qtree.dqi_blocks,
|
||||
+ quota_type2name(h->qh_type));
|
||||
+ return -1;
|
||||
+ }
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Scan all dquots in file and call callback on each
|
||||
*/
|
||||
@@ -558,7 +569,7 @@ static int report_block(struct dquot *dquot, unsigned int blk, char *bitmap,
|
||||
int entries, i;
|
||||
|
||||
if (!buf)
|
||||
- return 0;
|
||||
+ return -1;
|
||||
|
||||
set_bit(bitmap, blk);
|
||||
read_blk(dquot->dq_h, blk, buf);
|
||||
@@ -580,23 +591,12 @@ static int report_block(struct dquot *dquot, unsigned int blk, char *bitmap,
|
||||
return entries;
|
||||
}
|
||||
|
||||
-static void check_reference(struct quota_handle *h, unsigned int blk)
|
||||
-{
|
||||
- if (blk >= h->qh_info.u.v2_mdqi.dqi_qtree.dqi_blocks)
|
||||
- log_err("Illegal reference (%u >= %u) in %s quota file. "
|
||||
- "Quota file is probably corrupted.\n"
|
||||
- "Please run e2fsck (8) to fix it.",
|
||||
- blk,
|
||||
- h->qh_info.u.v2_mdqi.dqi_qtree.dqi_blocks,
|
||||
- quota_type2name(h->qh_type));
|
||||
-}
|
||||
-
|
||||
static int report_tree(struct dquot *dquot, unsigned int blk, int depth,
|
||||
char *bitmap,
|
||||
int (*process_dquot) (struct dquot *, void *),
|
||||
void *data)
|
||||
{
|
||||
- int entries = 0, i;
|
||||
+ int entries = 0, ret, i;
|
||||
dqbuf_t buf = getdqbuf();
|
||||
__le32 *ref = (__le32 *) buf;
|
||||
|
||||
@@ -607,22 +607,40 @@ static int report_tree(struct dquot *dquot, unsigned int blk, int depth,
|
||||
if (depth == QT_TREEDEPTH - 1) {
|
||||
for (i = 0; i < QT_BLKSIZE >> 2; i++) {
|
||||
blk = ext2fs_le32_to_cpu(ref[i]);
|
||||
- check_reference(dquot->dq_h, blk);
|
||||
- if (blk && !get_bit(bitmap, blk))
|
||||
- entries += report_block(dquot, blk, bitmap,
|
||||
- process_dquot, data);
|
||||
+ if (check_reference(dquot->dq_h, blk)) {
|
||||
+ entries = -1;
|
||||
+ goto errout;
|
||||
+ }
|
||||
+ if (blk && !get_bit(bitmap, blk)) {
|
||||
+ ret = report_block(dquot, blk, bitmap,
|
||||
+ process_dquot, data);
|
||||
+ if (ret < 0) {
|
||||
+ entries = ret;
|
||||
+ goto errout;
|
||||
+ }
|
||||
+ entries += ret;
|
||||
+ }
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < QT_BLKSIZE >> 2; i++) {
|
||||
blk = ext2fs_le32_to_cpu(ref[i]);
|
||||
if (blk) {
|
||||
- check_reference(dquot->dq_h, blk);
|
||||
- entries += report_tree(dquot, blk, depth + 1,
|
||||
- bitmap, process_dquot,
|
||||
- data);
|
||||
+ if (check_reference(dquot->dq_h, blk)) {
|
||||
+ entries = -1;
|
||||
+ goto errout;
|
||||
+ }
|
||||
+ ret = report_tree(dquot, blk, depth + 1,
|
||||
+ bitmap, process_dquot,
|
||||
+ data);
|
||||
+ if (ret < 0) {
|
||||
+ entries = ret;
|
||||
+ goto errout;
|
||||
+ }
|
||||
+ entries += ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
+errout:
|
||||
freedqbuf(buf);
|
||||
return entries;
|
||||
}
|
||||
@@ -642,6 +660,7 @@ int qtree_scan_dquots(struct quota_handle *h,
|
||||
int (*process_dquot) (struct dquot *, void *),
|
||||
void *data)
|
||||
{
|
||||
+ int ret;
|
||||
char *bitmap;
|
||||
struct v2_mem_dqinfo *v2info = &h->qh_info.u.v2_mdqi;
|
||||
struct qtree_mem_dqinfo *info = &v2info->dqi_qtree;
|
||||
@@ -655,10 +674,14 @@ int qtree_scan_dquots(struct quota_handle *h,
|
||||
ext2fs_free_mem(&dquot);
|
||||
return -1;
|
||||
}
|
||||
- v2info->dqi_used_entries = report_tree(dquot, QT_TREEOFF, 0, bitmap,
|
||||
- process_dquot, data);
|
||||
+ ret = report_tree(dquot, QT_TREEOFF, 0, bitmap, process_dquot, data);
|
||||
+ if (ret < 0)
|
||||
+ goto errout;
|
||||
+ v2info->dqi_used_entries = ret;
|
||||
v2info->dqi_data_blocks = find_set_bits(bitmap, info->dqi_blocks);
|
||||
+ ret = 0;
|
||||
+errout:
|
||||
ext2fs_free_mem(&bitmap);
|
||||
ext2fs_free_mem(&dquot);
|
||||
- return 0;
|
||||
+ return ret;
|
||||
}
|
||||
diff --git a/lib/support/quotaio_v2.c b/lib/support/quotaio_v2.c
|
||||
index 38be2a34..73906676 100644
|
||||
--- a/lib/support/quotaio_v2.c
|
||||
+++ b/lib/support/quotaio_v2.c
|
||||
@@ -175,6 +175,8 @@ static int v2_check_file(struct quota_handle *h, int type, int fmt)
|
||||
static int v2_init_io(struct quota_handle *h)
|
||||
{
|
||||
struct v2_disk_dqinfo ddqinfo;
|
||||
+ struct v2_mem_dqinfo *info;
|
||||
+ __u64 filesize;
|
||||
|
||||
h->qh_info.u.v2_mdqi.dqi_qtree.dqi_entry_size =
|
||||
sizeof(struct v2r1_disk_dqblk);
|
||||
@@ -185,6 +187,32 @@ static int v2_init_io(struct quota_handle *h)
|
||||
sizeof(ddqinfo)) != sizeof(ddqinfo))
|
||||
return -1;
|
||||
v2_disk2memdqinfo(&h->qh_info, &ddqinfo);
|
||||
+
|
||||
+ /* Check to make sure quota file info is sane */
|
||||
+ info = &h->qh_info.u.v2_mdqi;
|
||||
+ if (ext2fs_file_get_lsize(h->qh_qf.e2_file, &filesize))
|
||||
+ return -1;
|
||||
+ if ((filesize > (1U << 31)) ||
|
||||
+ (info->dqi_qtree.dqi_blocks >
|
||||
+ (filesize + QT_BLKSIZE - 1) >> QT_BLKSIZE_BITS)) {
|
||||
+ log_err("Quota inode %u corrupted: file size %llu; "
|
||||
+ "dqi_blocks %u", h->qh_qf.ino,
|
||||
+ filesize, info->dqi_qtree.dqi_blocks);
|
||||
+ return -1;
|
||||
+ }
|
||||
+ if (info->dqi_qtree.dqi_free_blk >= info->dqi_qtree.dqi_blocks) {
|
||||
+ log_err("Quota inode %u corrupted: free_blk %u; dqi_blocks %u",
|
||||
+ h->qh_qf.ino, info->dqi_qtree.dqi_free_blk,
|
||||
+ info->dqi_qtree.dqi_blocks);
|
||||
+ return -1;
|
||||
+ }
|
||||
+ if (info->dqi_qtree.dqi_free_entry >= info->dqi_qtree.dqi_blocks) {
|
||||
+ log_err("Quota inode %u corrupted: free_entry %u; "
|
||||
+ "dqi_blocks %u", h->qh_qf.ino,
|
||||
+ info->dqi_qtree.dqi_free_entry,
|
||||
+ info->dqi_qtree.dqi_blocks);
|
||||
+ return -1;
|
||||
+ }
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
cgit 1.2-0.3.lf.el7
|
||||
|
||||
@@ -5,6 +5,7 @@ SRC_URI += "file://remove.ldconfig.call.patch \
|
||||
file://ptest.patch \
|
||||
file://mkdir_p.patch \
|
||||
file://0001-misc-create_inode.c-set-dir-s-mode-correctly.patch \
|
||||
file://CVE-2019-5094.patch \
|
||||
"
|
||||
|
||||
SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch \
|
||||
|
||||
55
meta/recipes-devtools/file/file/CVE-2019-18218.patch
Normal file
55
meta/recipes-devtools/file/file/CVE-2019-18218.patch
Normal file
@@ -0,0 +1,55 @@
|
||||
cdf_read_property_info in cdf.c in file through 5.37 does not restrict the
|
||||
number of CDF_VECTOR elements, which allows a heap-based buffer overflow (4-byte
|
||||
out-of-bounds write).
|
||||
|
||||
CVE: CVE-2019-18218
|
||||
Upstream-Status: Backport
|
||||
Signed-off-by: Ross Burton <ross.burton@intel.com>
|
||||
|
||||
From 46a8443f76cec4b41ec736eca396984c74664f84 Mon Sep 17 00:00:00 2001
|
||||
From: Christos Zoulas <christos@zoulas.com>
|
||||
Date: Mon, 26 Aug 2019 14:31:39 +0000
|
||||
Subject: [PATCH] Limit the number of elements in a vector (found by oss-fuzz)
|
||||
|
||||
---
|
||||
src/cdf.c | 9 ++++-----
|
||||
src/cdf.h | 1 +
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/src/cdf.c b/src/cdf.c
|
||||
index 9d6396742..bb81d6374 100644
|
||||
--- a/src/cdf.c
|
||||
+++ b/src/cdf.c
|
||||
@@ -1016,8 +1016,9 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
|
||||
goto out;
|
||||
}
|
||||
nelements = CDF_GETUINT32(q, 1);
|
||||
- if (nelements == 0) {
|
||||
- DPRINTF(("CDF_VECTOR with nelements == 0\n"));
|
||||
+ if (nelements > CDF_ELEMENT_LIMIT || nelements == 0) {
|
||||
+ DPRINTF(("CDF_VECTOR with nelements == %"
|
||||
+ SIZE_T_FORMAT "u\n", nelements));
|
||||
goto out;
|
||||
}
|
||||
slen = 2;
|
||||
@@ -1060,8 +1061,6 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
|
||||
goto out;
|
||||
inp += nelem;
|
||||
}
|
||||
- DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
|
||||
- nelements));
|
||||
for (j = 0; j < nelements && i < sh.sh_properties;
|
||||
j++, i++)
|
||||
{
|
||||
diff --git a/src/cdf.h b/src/cdf.h
|
||||
index 2f7e554b7..05056668f 100644
|
||||
--- a/src/cdf.h
|
||||
+++ b/src/cdf.h
|
||||
@@ -48,6 +48,7 @@
|
||||
typedef int32_t cdf_secid_t;
|
||||
|
||||
#define CDF_LOOP_LIMIT 10000
|
||||
+#define CDF_ELEMENT_LIMIT 100000
|
||||
|
||||
#define CDF_SECID_NULL 0
|
||||
#define CDF_SECID_FREE -1
|
||||
@@ -14,13 +14,16 @@ DEPENDS_class-native = "zlib-native"
|
||||
# Blacklist a bogus tag in upstream check
|
||||
UPSTREAM_CHECK_GITTAGREGEX = "FILE(?P<pver>(?!6_23).+)"
|
||||
|
||||
SRC_URI = "git://github.com/file/file.git"
|
||||
SRC_URI = "git://github.com/file/file.git \
|
||||
file://CVE-2019-18218.patch"
|
||||
|
||||
SRCREV = "a0d5b0e4e9f97d74a9911e95cedd579852e25398"
|
||||
S = "${WORKDIR}/git"
|
||||
|
||||
inherit autotools update-alternatives
|
||||
|
||||
EXTRA_OECONF += "--disable-libseccomp"
|
||||
|
||||
ALTERNATIVE_${PN} = "file"
|
||||
ALTERNATIVE_LINK_NAME[file] = "${bindir}/file"
|
||||
|
||||
@@ -29,6 +32,10 @@ EXTRA_OEMAKE_append_class-nativesdk = "-e FILE_COMPILE=${STAGING_BINDIR_NATIVE}/
|
||||
|
||||
FILES_${PN} += "${datadir}/misc/*.mgc"
|
||||
|
||||
do_compile_append_class-native() {
|
||||
oe_runmake check
|
||||
}
|
||||
|
||||
do_install_append_class-native() {
|
||||
create_cmdline_wrapper ${D}/${bindir}/file \
|
||||
--magic-file ${datadir}/misc/magic.mgc
|
||||
|
||||
@@ -16,6 +16,7 @@ SRC_URI += "\
|
||||
file://0006-cmd-dist-separate-host-and-target-builds.patch \
|
||||
file://0007-cmd-go-make-GOROOT-precious-by-default.patch \
|
||||
file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
|
||||
file://0001-release-branch.go1.12-security-net-textproto-don-t-n.patch \
|
||||
"
|
||||
SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch"
|
||||
|
||||
|
||||
@@ -0,0 +1,163 @@
|
||||
From 265b691ac440bfb711d8de323346f7d72e620efe Mon Sep 17 00:00:00 2001
|
||||
From: Filippo Valsorda <filippo@golang.org>
|
||||
Date: Thu, 12 Sep 2019 12:37:36 -0400
|
||||
Subject: [PATCH] [release-branch.go1.12-security] net/textproto: don't
|
||||
normalize headers with spaces before the colon
|
||||
|
||||
RFC 7230 is clear about headers with a space before the colon, like
|
||||
|
||||
X-Answer : 42
|
||||
|
||||
being invalid, but we've been accepting and normalizing them for compatibility
|
||||
purposes since CL 5690059 in 2012.
|
||||
|
||||
On the client side, this is harmless and indeed most browsers behave the same
|
||||
to this day. On the server side, this becomes a security issue when the
|
||||
behavior doesn't match that of a reverse proxy sitting in front of the server.
|
||||
|
||||
For example, if a WAF accepts them without normalizing them, it might be
|
||||
possible to bypass its filters, because the Go server would interpret the
|
||||
header differently. Worse, if the reverse proxy coalesces requests onto a
|
||||
single HTTP/1.1 connection to a Go server, the understanding of the request
|
||||
boundaries can get out of sync between them, allowing an attacker to tack an
|
||||
arbitrary method and path onto a request by other clients, including
|
||||
authentication headers unknown to the attacker.
|
||||
|
||||
This was recently presented at multiple security conferences:
|
||||
https://portswigger.net/blog/http-desync-attacks-request-smuggling-reborn
|
||||
|
||||
net/http servers already reject header keys with invalid characters.
|
||||
Simply stop normalizing extra spaces in net/textproto, let it return them
|
||||
unchanged like it does for other invalid headers, and let net/http enforce
|
||||
RFC 7230, which is HTTP specific. This loses us normalization on the client
|
||||
side, but there's no right answer on the client side anyway, and hiding the
|
||||
issue sounds worse than letting the application decide.
|
||||
|
||||
Fixes CVE-2019-16276
|
||||
|
||||
Change-Id: I6d272de827e0870da85d93df770d6a0e161bbcf1
|
||||
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/549719
|
||||
Reviewed-by: Brad Fitzpatrick <bradfitz@google.com>
|
||||
(cherry picked from commit 1280b868e82bf173ea3e988be3092d160ee66082)
|
||||
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/558776
|
||||
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
|
||||
|
||||
CVE: CVE-2019-16276
|
||||
|
||||
Upstream-Status: Backport [https://github.com/golang/go/commit/6e6f4aaf70c8b1cc81e65a26332aa9409de03ad8]
|
||||
|
||||
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
|
||||
---
|
||||
src/net/http/serve_test.go | 4 ++++
|
||||
src/net/http/transport_test.go | 27 +++++++++++++++++++++++++++
|
||||
src/net/textproto/reader.go | 10 ++--------
|
||||
src/net/textproto/reader_test.go | 13 ++++++-------
|
||||
4 files changed, 39 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
|
||||
index 6eb0088a96..89bfdfbb82 100644
|
||||
--- a/src/net/http/serve_test.go
|
||||
+++ b/src/net/http/serve_test.go
|
||||
@@ -4748,6 +4748,10 @@ func TestServerValidatesHeaders(t *testing.T) {
|
||||
{"foo\xffbar: foo\r\n", 400}, // binary in header
|
||||
{"foo\x00bar: foo\r\n", 400}, // binary in header
|
||||
{"Foo: " + strings.Repeat("x", 1<<21) + "\r\n", 431}, // header too large
|
||||
+ // Spaces between the header key and colon are not allowed.
|
||||
+ // See RFC 7230, Section 3.2.4.
|
||||
+ {"Foo : bar\r\n", 400},
|
||||
+ {"Foo\t: bar\r\n", 400},
|
||||
|
||||
{"foo: foo foo\r\n", 200}, // LWS space is okay
|
||||
{"foo: foo\tfoo\r\n", 200}, // LWS tab is okay
|
||||
diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
|
||||
index 5c329543e2..5e5438a708 100644
|
||||
--- a/src/net/http/transport_test.go
|
||||
+++ b/src/net/http/transport_test.go
|
||||
@@ -5133,3 +5133,30 @@ func TestTransportIgnores408(t *testing.T) {
|
||||
}
|
||||
t.Fatalf("timeout after %v waiting for Transport connections to die off", time.Since(t0))
|
||||
}
|
||||
+
|
||||
+func TestInvalidHeaderResponse(t *testing.T) {
|
||||
+ setParallel(t)
|
||||
+ defer afterTest(t)
|
||||
+ cst := newClientServerTest(t, h1Mode, HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
+ conn, buf, _ := w.(Hijacker).Hijack()
|
||||
+ buf.Write([]byte("HTTP/1.1 200 OK\r\n" +
|
||||
+ "Date: Wed, 30 Aug 2017 19:09:27 GMT\r\n" +
|
||||
+ "Content-Type: text/html; charset=utf-8\r\n" +
|
||||
+ "Content-Length: 0\r\n" +
|
||||
+ "Foo : bar\r\n\r\n"))
|
||||
+ buf.Flush()
|
||||
+ conn.Close()
|
||||
+ }))
|
||||
+ defer cst.close()
|
||||
+ res, err := cst.c.Get(cst.ts.URL)
|
||||
+ if err != nil {
|
||||
+ t.Fatal(err)
|
||||
+ }
|
||||
+ defer res.Body.Close()
|
||||
+ if v := res.Header.Get("Foo"); v != "" {
|
||||
+ t.Errorf(`unexpected "Foo" header: %q`, v)
|
||||
+ }
|
||||
+ if v := res.Header.Get("Foo "); v != "bar" {
|
||||
+ t.Errorf(`bad "Foo " header value: %q, want %q`, v, "bar")
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
|
||||
index 2c4f25d5ae..1a5e364cf7 100644
|
||||
--- a/src/net/textproto/reader.go
|
||||
+++ b/src/net/textproto/reader.go
|
||||
@@ -493,18 +493,12 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
|
||||
return m, err
|
||||
}
|
||||
|
||||
- // Key ends at first colon; should not have trailing spaces
|
||||
- // but they appear in the wild, violating specs, so we remove
|
||||
- // them if present.
|
||||
+ // Key ends at first colon.
|
||||
i := bytes.IndexByte(kv, ':')
|
||||
if i < 0 {
|
||||
return m, ProtocolError("malformed MIME header line: " + string(kv))
|
||||
}
|
||||
- endKey := i
|
||||
- for endKey > 0 && kv[endKey-1] == ' ' {
|
||||
- endKey--
|
||||
- }
|
||||
- key := canonicalMIMEHeaderKey(kv[:endKey])
|
||||
+ key := canonicalMIMEHeaderKey(kv[:i])
|
||||
|
||||
// As per RFC 7230 field-name is a token, tokens consist of one or more chars.
|
||||
// We could return a ProtocolError here, but better to be liberal in what we
|
||||
diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
|
||||
index f85fbdc36d..b92fdcd3c7 100644
|
||||
--- a/src/net/textproto/reader_test.go
|
||||
+++ b/src/net/textproto/reader_test.go
|
||||
@@ -188,11 +188,10 @@ func TestLargeReadMIMEHeader(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
-// Test that we read slightly-bogus MIME headers seen in the wild,
|
||||
-// with spaces before colons, and spaces in keys.
|
||||
+// TestReadMIMEHeaderNonCompliant checks that we don't normalize headers
|
||||
+// with spaces before colons, and accept spaces in keys.
|
||||
func TestReadMIMEHeaderNonCompliant(t *testing.T) {
|
||||
- // Invalid HTTP response header as sent by an Axis security
|
||||
- // camera: (this is handled by IE, Firefox, Chrome, curl, etc.)
|
||||
+ // These invalid headers will be rejected by net/http according to RFC 7230.
|
||||
r := reader("Foo: bar\r\n" +
|
||||
"Content-Language: en\r\n" +
|
||||
"SID : 0\r\n" +
|
||||
@@ -202,9 +201,9 @@ func TestReadMIMEHeaderNonCompliant(t *testing.T) {
|
||||
want := MIMEHeader{
|
||||
"Foo": {"bar"},
|
||||
"Content-Language": {"en"},
|
||||
- "Sid": {"0"},
|
||||
- "Audio Mode": {"None"},
|
||||
- "Privilege": {"127"},
|
||||
+ "SID ": {"0"},
|
||||
+ "Audio Mode ": {"None"},
|
||||
+ "Privilege ": {"127"},
|
||||
}
|
||||
if !reflect.DeepEqual(m, want) || err != nil {
|
||||
t.Fatalf("ReadMIMEHeader =\n%v, %v; want:\n%v", m, err, want)
|
||||
@@ -31,6 +31,7 @@ FILES_${PN}-misc = "${sbindir}/i2c-stub-from-dump \
|
||||
RDEPENDS_${PN}-misc = "${PN} perl perl-module-posix \
|
||||
perl-module-constant perl-module-file-basename \
|
||||
perl-module-fcntl perl-module-strict perl-module-vars \
|
||||
perl-module-carp \
|
||||
"
|
||||
|
||||
ALTERNATIVE_PRIORITY = "100"
|
||||
|
||||
31
meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch
Normal file
31
meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch
Normal file
@@ -0,0 +1,31 @@
|
||||
We need opkg-build to fail if for example the tar command is passed invalid
|
||||
options. Without this, we see silently created empty packaged where data.tar
|
||||
is zero bytes in size. This creates hard to debug problems.
|
||||
|
||||
An example is when reproducible builds are enabled and run on old hosts like
|
||||
centos7 which has tar < 1.28:
|
||||
|
||||
Subprocess output:tar: unrecognized option '--clamp-mtime'
|
||||
Try `tar --help' or `tar --usage' for more information.
|
||||
|
||||
Upstream-Status: Pending
|
||||
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
|
||||
|
||||
Index: opkg-utils-0.4.1/opkg-build
|
||||
===================================================================
|
||||
--- opkg-utils-0.4.1.orig/opkg-build
|
||||
+++ opkg-utils-0.4.1/opkg-build
|
||||
@@ -1,4 +1,4 @@
|
||||
-#!/bin/sh
|
||||
+#!/bin/bash
|
||||
|
||||
: <<=cut
|
||||
=head1 NAME
|
||||
@@ -12,6 +12,7 @@ opkg-build - construct an .opk from a di
|
||||
# Updated to work on Familiar Pre0.7rc1, with busybox tar.
|
||||
# Note it Requires: binutils-ar (since the busybox ar can't create)
|
||||
set -e
|
||||
+set -o pipefail
|
||||
|
||||
version=1.0
|
||||
|
||||
@@ -10,6 +10,7 @@ PROVIDES += "${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'virtu
|
||||
SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \
|
||||
file://0001-Switch-all-scripts-to-use-Python-3.x.patch \
|
||||
file://0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch \
|
||||
file://pipefail.patch \
|
||||
"
|
||||
UPSTREAM_CHECK_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils/refs/"
|
||||
|
||||
@@ -19,6 +20,8 @@ SRC_URI[sha256sum] = "9ea9efdd9fe13661ad251e3a2860c1c93045adcfaa6659c3e86d9748ec
|
||||
|
||||
TARGET_CC_ARCH += "${LDFLAGS}"
|
||||
|
||||
RDEPENDS_${PN} += "bash"
|
||||
|
||||
# For native builds we use the host Python
|
||||
PYTHONRDEPS = "python3 python3-shell python3-io python3-math python3-crypt python3-logging python3-fcntl python3-pickle python3-compression python3-stringold"
|
||||
PYTHONRDEPS_class-native = ""
|
||||
|
||||
46
meta/recipes-devtools/opkg/opkg/open_inner.patch
Normal file
46
meta/recipes-devtools/opkg/opkg/open_inner.patch
Normal file
@@ -0,0 +1,46 @@
|
||||
From alejandro.delcastillo@ni.com Wed Nov 20 22:35:02 2019
|
||||
From: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
To: <opkg-devel@googlegroups.com>, <richard.purdie@linuxfoundation.org>
|
||||
CC: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
Subject: [opkg][PATCH 2/2] open_inner: add support for empty payloads
|
||||
Date: Wed, 20 Nov 2019 16:34:48 -0600
|
||||
Message-ID: <20191120223448.26522-3-alejandro.delcastillo@ni.com>
|
||||
X-Mailer: git-send-email 2.22.0
|
||||
In-Reply-To: <20191120223448.26522-1-alejandro.delcastillo@ni.com>
|
||||
References: <20191120223448.26522-1-alejandro.delcastillo@ni.com>
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Support for empty compressed payloads need to be explicitly enabled on
|
||||
libarchive.
|
||||
|
||||
Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
|
||||
Upstream-Status: Backport
|
||||
---
|
||||
libopkg/opkg_archive.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/libopkg/opkg_archive.c b/libopkg/opkg_archive.c
|
||||
index 0e9ccea..f19cece 100644
|
||||
--- a/libopkg/opkg_archive.c
|
||||
+++ b/libopkg/opkg_archive.c
|
||||
@@ -618,6 +618,13 @@ static struct archive *open_inner(struct archive *outer)
|
||||
goto err_cleanup;
|
||||
}
|
||||
|
||||
+ r = archive_read_support_format_empty(inner);
|
||||
+ if (r != ARCHIVE_OK) {
|
||||
+ opkg_msg(ERROR, "Empty format not supported: %s\n",
|
||||
+ archive_error_string(inner));
|
||||
+ goto err_cleanup;
|
||||
+ }
|
||||
+
|
||||
r = archive_read_open(inner, data, NULL, inner_read, inner_close);
|
||||
if (r != ARCHIVE_OK) {
|
||||
opkg_msg(ERROR, "Failed to open inner archive: %s\n",
|
||||
--
|
||||
2.22.0
|
||||
|
||||
|
||||
54
meta/recipes-devtools/opkg/opkg/opkg_archive.patch
Normal file
54
meta/recipes-devtools/opkg/opkg/opkg_archive.patch
Normal file
@@ -0,0 +1,54 @@
|
||||
From alejandro.delcastillo@ni.com Wed Nov 20 22:35:01 2019
|
||||
Return-Path: <richard.purdie+caf_=rpurdie=rpsys.net@linuxfoundation.org>
|
||||
From: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
To: <opkg-devel@googlegroups.com>, <richard.purdie@linuxfoundation.org>
|
||||
CC: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
Subject: [opkg][PATCH 1/2] opkg_archive.c: avoid double free on uncompress
|
||||
error
|
||||
Date: Wed, 20 Nov 2019 16:34:47 -0600
|
||||
Message-ID: <20191120223448.26522-2-alejandro.delcastillo@ni.com>
|
||||
X-Mailer: git-send-email 2.22.0
|
||||
In-Reply-To: <20191120223448.26522-1-alejandro.delcastillo@ni.com>
|
||||
References: <20191120223448.26522-1-alejandro.delcastillo@ni.com>
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
The open-inner function calls archive_read_open. On error,
|
||||
archive_read_open calls inner_close, which also closes the outter
|
||||
archive. On error, return NULL directly to avoid double free.
|
||||
|
||||
|
||||
Upstream-Status: Backport
|
||||
|
||||
Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
|
||||
---
|
||||
libopkg/opkg_archive.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/libopkg/opkg_archive.c b/libopkg/opkg_archive.c
|
||||
index 3d87db1..0e9ccea 100644
|
||||
--- a/libopkg/opkg_archive.c
|
||||
+++ b/libopkg/opkg_archive.c
|
||||
@@ -622,7 +622,7 @@ static struct archive *open_inner(struct archive *outer)
|
||||
if (r != ARCHIVE_OK) {
|
||||
opkg_msg(ERROR, "Failed to open inner archive: %s\n",
|
||||
archive_error_string(inner));
|
||||
- goto err_cleanup;
|
||||
+ return NULL;
|
||||
}
|
||||
|
||||
return inner;
|
||||
@@ -683,7 +683,7 @@ static struct archive *extract_outer(const char *filename, const char *arname)
|
||||
|
||||
inner = open_inner(outer);
|
||||
if (!inner)
|
||||
- goto err_cleanup;
|
||||
+ return NULL;
|
||||
|
||||
return inner;
|
||||
|
||||
--
|
||||
2.22.0
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ PE = "1"
|
||||
SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz \
|
||||
file://opkg.conf \
|
||||
file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \
|
||||
file://opkg_archive.patch \
|
||||
file://open_inner.patch \
|
||||
file://run-ptest \
|
||||
"
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ Subject: [PATCH] Invoke ed directly instead of using the shell
|
||||
* src/pch.c (do_ed_script): Invoke ed directly instead of using a shell
|
||||
command to avoid quoting vulnerabilities.
|
||||
|
||||
CVE: CVE-2019-13638
|
||||
Upstream-Status: Backport[https://git.savannah.gnu.org/cgit/patch.git/patch/?id=3fcd042d26d70856e826a42b5f93dc4854d80bf0]
|
||||
CVE: CVE-2019-13638 CVE-2018-20969
|
||||
Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/patch.git/patch/?id=3fcd042d26d70856e826a42b5f93dc4854d80bf0]
|
||||
Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
|
||||
|
||||
---
|
||||
|
||||
106
meta/recipes-devtools/pseudo/files/0001-Add-statx.patch
Normal file
106
meta/recipes-devtools/pseudo/files/0001-Add-statx.patch
Normal file
@@ -0,0 +1,106 @@
|
||||
From 4e41a05de1f34ba00a68ca4f20fb49c4d1cbd2d0 Mon Sep 17 00:00:00 2001
|
||||
From: Richard Purdie <richard.purdie@linuxfoundation.org>
|
||||
Date: Wed, 6 Nov 2019 12:17:46 +0000
|
||||
Subject: [PATCH] Add statx glibc/syscall support
|
||||
|
||||
Modern distros (e.g. fedora30) are starting to use the new statx() syscall through
|
||||
the newly exposed glibc wrapper function in software like coreutils (e.g. the ls
|
||||
command). Add support to intercept this to pseudo.
|
||||
|
||||
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
|
||||
Upstream-Status: Submitted [Emailed to seebs]
|
||||
---
|
||||
ports/linux/guts/statx.c | 48 ++++++++++++++++++++++++++++++++++++++++
|
||||
ports/linux/portdefs.h | 1 +
|
||||
ports/linux/wrapfuncs.in | 1 +
|
||||
3 files changed, 50 insertions(+)
|
||||
create mode 100644 ports/linux/guts/statx.c
|
||||
|
||||
diff --git a/ports/linux/statx/guts/statx.c b/ports/linux/statx/guts/statx.c
|
||||
new file mode 100644
|
||||
index 0000000..a3259c4
|
||||
--- /dev/null
|
||||
+++ b/ports/linux/statx/guts/statx.c
|
||||
@@ -0,0 +1,42 @@
|
||||
+/*
|
||||
+ * Copyright (c) 2019 Linux Foundation
|
||||
+ * Author: Richard Purdie
|
||||
+ *
|
||||
+ * SPDX-License-Identifier: LGPL-2.1-only
|
||||
+ *
|
||||
+ * int
|
||||
+ * statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) {
|
||||
+ * int rc = -1;
|
||||
+ */
|
||||
+ pseudo_msg_t *msg;
|
||||
+ PSEUDO_STATBUF buf;
|
||||
+ int save_errno;
|
||||
+
|
||||
+ rc = real_statx(dirfd, pathname, flags, mask, statxbuf);
|
||||
+ save_errno = errno;
|
||||
+ if (rc == -1) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ buf.st_uid = statxbuf->stx_uid;
|
||||
+ buf.st_gid = statxbuf->stx_gid;
|
||||
+ buf.st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor);
|
||||
+ buf.st_ino = statxbuf->stx_ino;
|
||||
+ buf.st_mode = statxbuf->stx_mode;
|
||||
+ buf.st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor);
|
||||
+ buf.st_nlink = statxbuf->stx_nlink;
|
||||
+ msg = pseudo_client_op(OP_STAT, 0, -1, dirfd, pathname, &buf);
|
||||
+ if (msg && msg->result == RESULT_SUCCEED) {
|
||||
+ pseudo_debug(PDBGF_FILE, "statx(path %s), flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid);
|
||||
+ statxbuf->stx_uid = msg->uid;
|
||||
+ statxbuf->stx_gid = msg->gid;
|
||||
+ statxbuf->stx_mode = msg->mode;
|
||||
+ statxbuf->stx_rdev_major = major(msg->rdev);
|
||||
+ statxbuf->stx_rdev_minor = minor(msg->rdev);
|
||||
+ } else {
|
||||
+ pseudo_debug(PDBGF_FILE, "statx(path %s) failed, flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid);
|
||||
+ }
|
||||
+ errno = save_errno;
|
||||
+/* return rc;
|
||||
+ * }
|
||||
+ */
|
||||
diff --git a/ports/linux/statx/portdefs.h b/ports/linux/statx/portdefs.h
|
||||
new file mode 100644
|
||||
index 0000000..bf934dc
|
||||
--- /dev/null
|
||||
+++ b/ports/linux/statx/portdefs.h
|
||||
@@ -0,0 +1,6 @@
|
||||
+/*
|
||||
+ * SPDX-License-Identifier: LGPL-2.1-only
|
||||
+ *
|
||||
+ */
|
||||
+#include <sys/stat.h>
|
||||
+#include <sys/sysmacros.h>
|
||||
diff --git a/ports/linux/statx/wrapfuncs.in b/ports/linux/statx/wrapfuncs.in
|
||||
new file mode 100644
|
||||
index 0000000..c9cd4c3
|
||||
--- /dev/null
|
||||
+++ b/ports/linux/statx/wrapfuncs.in
|
||||
@@ -0,0 +1 @@
|
||||
+int statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf);
|
||||
diff --git a/ports/linux/subports b/ports/linux/subports
|
||||
index a29044a..49081bf 100755
|
||||
--- a/ports/linux/subports
|
||||
+++ b/ports/linux/subports
|
||||
@@ -54,3 +54,13 @@ else
|
||||
fi
|
||||
rm -f dummy.c dummy.o
|
||||
|
||||
+cat > dummy.c <<EOF
|
||||
+#define _GNU_SOURCE
|
||||
+#include <sys/stat.h>
|
||||
+struct statx x;
|
||||
+EOF
|
||||
+if ${CC} -c -o dummy.o dummy.c >/dev/null 2>&1; then
|
||||
+ echo "linux/statx"
|
||||
+fi
|
||||
+rm -f dummy.c dummy.o
|
||||
+
|
||||
--
|
||||
2.17.1
|
||||
|
||||
@@ -30,23 +30,10 @@ PSEUDO_EXTRA_OPTS ?= "--enable-force-async --without-passwd-fallback --enable-ep
|
||||
|
||||
# Compile for the local machine arch...
|
||||
do_compile () {
|
||||
SQLITE_LDADD='$(SQLITE)/$(SQLITE_LIB)/libsqlite3.a'
|
||||
for sqlite_link_opt in $(pkg-config sqlite3 --libs --static)
|
||||
do
|
||||
case "$sqlite_link_opt" in
|
||||
-lsqlite3)
|
||||
;;
|
||||
-l*)
|
||||
SQLITE_LDADD="${SQLITE_LDADD} ${sqlite_link_opt}"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ "${SITEINFO_BITS}" = "64" ]; then
|
||||
${S}/configure ${PSEUDO_EXTRA_OPTS} --prefix=${prefix} --libdir=${prefix}/lib/pseudo/lib${SITEINFO_BITS} --with-sqlite-lib=${baselib} --with-sqlite=${STAGING_DIR_TARGET}${exec_prefix} --cflags="${CFLAGS}" --bits=${SITEINFO_BITS} --with-static-sqlite="$SQLITE_LDADD" --without-rpath
|
||||
${S}/configure ${PSEUDO_EXTRA_OPTS} --prefix=${prefix} --libdir=${prefix}/lib/pseudo/lib${SITEINFO_BITS} --with-sqlite-lib=${baselib} --with-sqlite=${STAGING_DIR_TARGET}${exec_prefix} --cflags="${CFLAGS}" --bits=${SITEINFO_BITS} --without-rpath
|
||||
else
|
||||
${S}/configure ${PSEUDO_EXTRA_OPTS} --prefix=${prefix} --libdir=${prefix}/lib/pseudo/lib --with-sqlite-lib=${baselib} --with-sqlite=${STAGING_DIR_TARGET}${exec_prefix} --cflags="${CFLAGS}" --bits=${SITEINFO_BITS} --with-static-sqlite="$SQLITE_LDADD" --without-rpath
|
||||
${S}/configure ${PSEUDO_EXTRA_OPTS} --prefix=${prefix} --libdir=${prefix}/lib/pseudo/lib --with-sqlite-lib=${baselib} --with-sqlite=${STAGING_DIR_TARGET}${exec_prefix} --cflags="${CFLAGS}" --bits=${SITEINFO_BITS} --without-rpath
|
||||
fi
|
||||
oe_runmake ${MAKEOPTS}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ SRC_URI = "git://git.yoctoproject.org/pseudo \
|
||||
file://moreretries.patch \
|
||||
file://toomanyfiles.patch \
|
||||
file://0001-maketables-wrappers-use-Python-3.patch \
|
||||
file://0001-Add-statx.patch \
|
||||
"
|
||||
|
||||
SRCREV = "060058bb29f70b244e685b3c704eb0641b736f73"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
From 12292444e1b3662b994bc223d92b8338fb0895ff Mon Sep 17 00:00:00 2001
|
||||
From 6cbb7529cf7ff0da3ca649fb3486facd9620d625 Mon Sep 17 00:00:00 2001
|
||||
From: Changqing Li <changqing.li@windriver.com>
|
||||
Date: Thu, 25 Oct 2018 07:32:14 +0000
|
||||
Subject: [PATCH] python-native: fix one do_populate_sysroot warning
|
||||
@@ -17,23 +17,24 @@ when do_populate_sysroot. use append to fix it.
|
||||
Upstream-Status: Inappropriate [oe-specific]
|
||||
|
||||
Signed-off-by: Changqing Li <changqing.li@windriver.com>
|
||||
|
||||
---
|
||||
setup.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/setup.py b/setup.py
|
||||
index 7bf13ed..6c0f29b 100644
|
||||
index a2c8127..22f9e23 100644
|
||||
--- a/setup.py
|
||||
+++ b/setup.py
|
||||
@@ -40,7 +40,7 @@ def add_dir_to_list(dirlist, dir):
|
||||
1) 'dir' is not already in 'dirlist'
|
||||
2) 'dir' actually exists, and is a directory."""
|
||||
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
|
||||
- dirlist.insert(0, dir)
|
||||
+ dirlist.append(dir)
|
||||
|
||||
def macosx_sdk_root():
|
||||
"""
|
||||
@@ -47,7 +47,7 @@ def add_dir_to_list(dirlist, dir):
|
||||
else:
|
||||
dir_exists = os.path.isdir(dir)
|
||||
if dir_exists:
|
||||
- dirlist.insert(0, dir)
|
||||
+ dirlist.append(dir)
|
||||
|
||||
MACOS_SDK_ROOT = None
|
||||
|
||||
--
|
||||
2.18.0
|
||||
2.17.1
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ SRC_URI += "\
|
||||
file://nohostlibs.patch \
|
||||
file://multilib.patch \
|
||||
file://add-md5module-support.patch \
|
||||
file://builddir.patch \
|
||||
file://0001-python-Resolve-intermediate-staging-issues.patch \
|
||||
file://parallel-makeinst-create-bindir.patch \
|
||||
file://revert_use_of_sysconfigdata.patch \
|
||||
file://0001-python-native-fix-one-do_populate_sysroot-warning.patch \
|
||||
@@ -8,16 +8,10 @@ INC_PR = "r1"
|
||||
LIC_FILES_CHKSUM = "file://LICENSE;md5=e466242989bd33c1bd2b6a526a742498"
|
||||
|
||||
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
|
||||
file://bpo-35907-cve-2019-9948.patch \
|
||||
file://bpo-35907-cve-2019-9948-fix.patch \
|
||||
file://bpo-36216-cve-2019-9636.patch \
|
||||
file://bpo-36216-cve-2019-9636-fix.patch \
|
||||
file://CVE-2019-9740.patch \
|
||||
file://CVE-2018-20852.patch \
|
||||
"
|
||||
|
||||
SRC_URI[md5sum] = "30157d85a2c0479c09ea2cbe61f2aaf5"
|
||||
SRC_URI[sha256sum] = "f222ef602647eecb6853681156d32de4450a2c39f4de93bd5b20235f2e660ed7"
|
||||
SRC_URI[md5sum] = "b3b6d2c92f42a60667814358ab9f0cfd"
|
||||
SRC_URI[sha256sum] = "4d43f033cdbd0aa7b7023c81b0e986fd11e653b5248dac9144d508f11812ba41"
|
||||
|
||||
# python recipe is actually python 2.x
|
||||
# also, exclude pre-releases for both python 2.x and 3.x
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
When cross compiling python, we used to need to install the Makefile, pyconfig.h
|
||||
and the python library to their final location before being able to compile the
|
||||
From 77bcb3238b2853d511714544e0f84a37be6c79bf Mon Sep 17 00:00:00 2001
|
||||
From: Richard Purdie <richard.purdie@linuxfoundation.org>
|
||||
Date: Wed, 14 Nov 2012 14:31:24 +0000
|
||||
Subject: [PATCH] python: Resolve intermediate staging issues
|
||||
|
||||
When cross compiling python, we used to need to install the Makefile, pyconfig.h
|
||||
and the python library to their final location before being able to compile the
|
||||
rest of python. This change allows us to point python at its own source when
|
||||
building, avoiding a variety of sysroot staging issues and simplifying the main
|
||||
python recipe.
|
||||
@@ -7,10 +12,29 @@ python recipe.
|
||||
Upstream-Status: Inappropriate
|
||||
RP 2012/11/13
|
||||
|
||||
Index: Python-2.7.9/Lib/sysconfig.py
|
||||
===================================================================
|
||||
--- Python-2.7.9.orig/Lib/sysconfig.py
|
||||
+++ Python-2.7.9/Lib/sysconfig.py
|
||||
---
|
||||
Lib/distutils/sysconfig.py | 3 +++
|
||||
Lib/sysconfig.py | 5 ++++-
|
||||
2 files changed, 7 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
|
||||
index 2f4b8ca..15bceb5 100644
|
||||
--- a/Lib/distutils/sysconfig.py
|
||||
+++ b/Lib/distutils/sysconfig.py
|
||||
@@ -31,6 +31,9 @@ else:
|
||||
# sys.executable can be empty if argv[0] has been changed and Python is
|
||||
# unable to retrieve the real program name
|
||||
project_base = os.getcwd()
|
||||
+_PYTHONBUILDDIR = os.environ.get("PYTHONBUILDDIR", None)
|
||||
+if _PYTHONBUILDDIR:
|
||||
+ project_base = _PYTHONBUILDDIR
|
||||
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
|
||||
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
|
||||
# PC/VS7.1
|
||||
diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py
|
||||
index 9c8350d..bddbe2e 100644
|
||||
--- a/Lib/sysconfig.py
|
||||
+++ b/Lib/sysconfig.py
|
||||
@@ -93,6 +93,7 @@ _PREFIX = os.path.normpath(sys.prefix)
|
||||
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
|
||||
_CONFIG_VARS = None
|
||||
@@ -30,17 +54,6 @@ Index: Python-2.7.9/Lib/sysconfig.py
|
||||
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
|
||||
else:
|
||||
# sys.executable can be empty if argv[0] has been changed and Python is
|
||||
Index: Python-2.7.9/Lib/distutils/sysconfig.py
|
||||
===================================================================
|
||||
--- Python-2.7.9.orig/Lib/distutils/sysconfig.py
|
||||
+++ Python-2.7.9/Lib/distutils/sysconfig.py
|
||||
@@ -26,6 +26,9 @@ EXEC_PREFIX = os.path.normpath(sys.exec_
|
||||
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
|
||||
# it'll live in project/PCbuild/amd64.
|
||||
project_base = os.path.dirname(os.path.abspath(sys.executable))
|
||||
+_PYTHONBUILDDIR = os.environ.get("PYTHONBUILDDIR", None)
|
||||
+if _PYTHONBUILDDIR:
|
||||
+ project_base = _PYTHONBUILDDIR
|
||||
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
|
||||
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
|
||||
# PC/VS7.1
|
||||
--
|
||||
2.17.1
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
From 979daae300916adb399ab5b51410b6ebd0888f13 Mon Sep 17 00:00:00 2001
|
||||
From: Xtreak <tir.karthi@gmail.com>
|
||||
Date: Sat, 15 Jun 2019 20:59:43 +0530
|
||||
Subject: [PATCH] [2.7] bpo-35121: prefix dot in domain for proper subdomain
|
||||
validation (GH-10258) (GH-13426)
|
||||
|
||||
This is a manual backport of ca7fe5063593958e5efdf90f068582837f07bd14 since 2.7 has `http.cookiejar` in `cookielib`
|
||||
|
||||
|
||||
https://bugs.python.org/issue35121
|
||||
CVE: CVE-2018-20852
|
||||
Upstream-Status: Backport [https://github.com/python/cpython/pull/13426]
|
||||
Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
|
||||
---
|
||||
Lib/cookielib.py | 13 ++++++--
|
||||
Lib/test/test_cookielib.py | 30 +++++++++++++++++++
|
||||
.../2019-05-20-00-35-12.bpo-35121.RRi-HU.rst | 4 +++
|
||||
3 files changed, 45 insertions(+), 2 deletions(-)
|
||||
create mode 100644 Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
|
||||
|
||||
diff --git a/Lib/cookielib.py b/Lib/cookielib.py
|
||||
index 2dd7c48728e0..0b471a42f296 100644
|
||||
--- a/Lib/cookielib.py
|
||||
+++ b/Lib/cookielib.py
|
||||
@@ -1139,6 +1139,11 @@ def return_ok_domain(self, cookie, request):
|
||||
req_host, erhn = eff_request_host(request)
|
||||
domain = cookie.domain
|
||||
|
||||
+ if domain and not domain.startswith("."):
|
||||
+ dotdomain = "." + domain
|
||||
+ else:
|
||||
+ dotdomain = domain
|
||||
+
|
||||
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
|
||||
if (cookie.version == 0 and
|
||||
(self.strict_ns_domain & self.DomainStrictNonDomain) and
|
||||
@@ -1151,7 +1156,7 @@ def return_ok_domain(self, cookie, request):
|
||||
_debug(" effective request-host name %s does not domain-match "
|
||||
"RFC 2965 cookie domain %s", erhn, domain)
|
||||
return False
|
||||
- if cookie.version == 0 and not ("."+erhn).endswith(domain):
|
||||
+ if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
|
||||
_debug(" request-host %s does not match Netscape cookie domain "
|
||||
"%s", req_host, domain)
|
||||
return False
|
||||
@@ -1165,7 +1170,11 @@ def domain_return_ok(self, domain, request):
|
||||
req_host = "."+req_host
|
||||
if not erhn.startswith("."):
|
||||
erhn = "."+erhn
|
||||
- if not (req_host.endswith(domain) or erhn.endswith(domain)):
|
||||
+ if domain and not domain.startswith("."):
|
||||
+ dotdomain = "." + domain
|
||||
+ else:
|
||||
+ dotdomain = domain
|
||||
+ if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
|
||||
#_debug(" request domain %s does not match cookie domain %s",
|
||||
# req_host, domain)
|
||||
return False
|
||||
diff --git a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
|
||||
index f2dd9727d137..7f7ff614d61d 100644
|
||||
--- a/Lib/test/test_cookielib.py
|
||||
+++ b/Lib/test/test_cookielib.py
|
||||
@@ -368,6 +368,7 @@ def test_domain_return_ok(self):
|
||||
("http://foo.bar.com/", ".foo.bar.com", True),
|
||||
("http://foo.bar.com/", "foo.bar.com", True),
|
||||
("http://foo.bar.com/", ".bar.com", True),
|
||||
+ ("http://foo.bar.com/", "bar.com", True),
|
||||
("http://foo.bar.com/", "com", True),
|
||||
("http://foo.com/", "rhubarb.foo.com", False),
|
||||
("http://foo.com/", ".foo.com", True),
|
||||
@@ -378,6 +379,8 @@ def test_domain_return_ok(self):
|
||||
("http://foo/", "foo", True),
|
||||
("http://foo/", "foo.local", True),
|
||||
("http://foo/", ".local", True),
|
||||
+ ("http://barfoo.com", ".foo.com", False),
|
||||
+ ("http://barfoo.com", "foo.com", False),
|
||||
]:
|
||||
request = urllib2.Request(url)
|
||||
r = pol.domain_return_ok(domain, request)
|
||||
@@ -938,6 +941,33 @@ def test_domain_block(self):
|
||||
c.add_cookie_header(req)
|
||||
self.assertFalse(req.has_header("Cookie"))
|
||||
|
||||
+ c.clear()
|
||||
+
|
||||
+ pol.set_blocked_domains([])
|
||||
+ req = Request("http://acme.com/")
|
||||
+ res = FakeResponse(headers, "http://acme.com/")
|
||||
+ cookies = c.make_cookies(res, req)
|
||||
+ c.extract_cookies(res, req)
|
||||
+ self.assertEqual(len(c), 1)
|
||||
+
|
||||
+ req = Request("http://acme.com/")
|
||||
+ c.add_cookie_header(req)
|
||||
+ self.assertTrue(req.has_header("Cookie"))
|
||||
+
|
||||
+ req = Request("http://badacme.com/")
|
||||
+ c.add_cookie_header(req)
|
||||
+ self.assertFalse(pol.return_ok(cookies[0], req))
|
||||
+ self.assertFalse(req.has_header("Cookie"))
|
||||
+
|
||||
+ p = pol.set_blocked_domains(["acme.com"])
|
||||
+ req = Request("http://acme.com/")
|
||||
+ c.add_cookie_header(req)
|
||||
+ self.assertFalse(req.has_header("Cookie"))
|
||||
+
|
||||
+ req = Request("http://badacme.com/")
|
||||
+ c.add_cookie_header(req)
|
||||
+ self.assertFalse(req.has_header("Cookie"))
|
||||
+
|
||||
def test_secure(self):
|
||||
from cookielib import CookieJar, DefaultCookiePolicy
|
||||
|
||||
diff --git a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
|
||||
new file mode 100644
|
||||
index 000000000000..77251806163b
|
||||
--- /dev/null
|
||||
+++ b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
|
||||
@@ -0,0 +1,4 @@
|
||||
+Don't send cookies of domain A without Domain attribute to domain B when
|
||||
+domain A is a suffix match of domain B while using a cookiejar with
|
||||
+:class:`cookielib.DefaultCookiePolicy` policy. Patch by Karthikeyan
|
||||
+Singaravelan.
|
||||
@@ -1,216 +0,0 @@
|
||||
From bb8071a4cae5ab3fe321481dd3d73662ffb26052 Mon Sep 17 00:00:00 2001
|
||||
From: Victor Stinner <victor.stinner@gmail.com>
|
||||
Date: Tue, 21 May 2019 15:12:33 +0200
|
||||
Subject: [PATCH] bpo-30458: Disallow control chars in http URLs (GH-12755)
|
||||
(GH-13154) (GH-13315)
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Disallow control chars in http URLs in urllib2.urlopen. This
|
||||
addresses a potential security problem for applications that do not
|
||||
sanity check their URLs where http request headers could be injected.
|
||||
|
||||
Disable https related urllib tests on a build without ssl (GH-13032)
|
||||
These tests require an SSL enabled build. Skip these tests when
|
||||
python is built without SSL to fix test failures.
|
||||
|
||||
Use httplib.InvalidURL instead of ValueError as the new error case's
|
||||
exception. (GH-13044)
|
||||
|
||||
Backport Co-Authored-By: Miro Hrončok <miro@hroncok.cz>
|
||||
|
||||
(cherry picked from commit 7e200e0763f5b71c199aaf98bd5588f291585619)
|
||||
|
||||
Notes on backport to Python 2.7:
|
||||
|
||||
* test_urllib tests urllib.urlopen() which quotes the URL and so is
|
||||
not vulerable to HTTP Header Injection.
|
||||
* Add tests to test_urllib2 on urllib2.urlopen().
|
||||
* Reject non-ASCII characters: range 0x80-0xff.
|
||||
|
||||
Upstream-Status: Backport
|
||||
CVE: CVE-2019-9740
|
||||
CVE: CVE-2019-9947
|
||||
Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
|
||||
---
|
||||
Lib/httplib.py | 16 ++++++
|
||||
Lib/test/test_urllib.py | 25 +++++++++
|
||||
Lib/test/test_urllib2.py | 51 ++++++++++++++++++-
|
||||
Lib/test/test_xmlrpc.py | 8 ++-
|
||||
.../2019-04-10-08-53-30.bpo-30458.51E-DA.rst | 1 +
|
||||
5 files changed, 99 insertions(+), 2 deletions(-)
|
||||
create mode 100644 Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
|
||||
|
||||
diff --git a/Lib/httplib.py b/Lib/httplib.py
|
||||
index 60a8fb4e355f..1b41c346e090 100644
|
||||
--- a/Lib/httplib.py
|
||||
+++ b/Lib/httplib.py
|
||||
@@ -247,6 +247,16 @@
|
||||
_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
|
||||
_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
|
||||
|
||||
+# These characters are not allowed within HTTP URL paths.
|
||||
+# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
|
||||
+# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
|
||||
+# Prevents CVE-2019-9740. Includes control characters such as \r\n.
|
||||
+# Restrict non-ASCII characters above \x7f (0x80-0xff).
|
||||
+_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
|
||||
+# Arguably only these _should_ allowed:
|
||||
+# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
|
||||
+# We are more lenient for assumed real world compatibility purposes.
|
||||
+
|
||||
# We always set the Content-Length header for these methods because some
|
||||
# servers will otherwise respond with a 411
|
||||
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
|
||||
@@ -927,6 +937,12 @@ def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
|
||||
self._method = method
|
||||
if not url:
|
||||
url = '/'
|
||||
+ # Prevent CVE-2019-9740.
|
||||
+ match = _contains_disallowed_url_pchar_re.search(url)
|
||||
+ if match:
|
||||
+ raise InvalidURL("URL can't contain control characters. %r "
|
||||
+ "(found at least %r)"
|
||||
+ % (url, match.group()))
|
||||
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
|
||||
|
||||
self._output(hdr)
|
||||
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
|
||||
index 1ce9201c0693..d7778d4194f3 100644
|
||||
--- a/Lib/test/test_urllib.py
|
||||
+++ b/Lib/test/test_urllib.py
|
||||
@@ -257,6 +257,31 @@ def test_url_fragment(self):
|
||||
finally:
|
||||
self.unfakehttp()
|
||||
|
||||
+ def test_url_with_control_char_rejected(self):
|
||||
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
|
||||
+ char = chr(char_no)
|
||||
+ schemeless_url = "//localhost:7777/test%s/" % char
|
||||
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
|
||||
+ try:
|
||||
+ # urllib quotes the URL so there is no injection.
|
||||
+ resp = urllib.urlopen("http:" + schemeless_url)
|
||||
+ self.assertNotIn(char, resp.geturl())
|
||||
+ finally:
|
||||
+ self.unfakehttp()
|
||||
+
|
||||
+ def test_url_with_newline_header_injection_rejected(self):
|
||||
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
|
||||
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
|
||||
+ schemeless_url = "//" + host + ":8080/test/?test=a"
|
||||
+ try:
|
||||
+ # urllib quotes the URL so there is no injection.
|
||||
+ resp = urllib.urlopen("http:" + schemeless_url)
|
||||
+ self.assertNotIn(' ', resp.geturl())
|
||||
+ self.assertNotIn('\r', resp.geturl())
|
||||
+ self.assertNotIn('\n', resp.geturl())
|
||||
+ finally:
|
||||
+ self.unfakehttp()
|
||||
+
|
||||
def test_read_bogus(self):
|
||||
# urlopen() should raise IOError for many error codes.
|
||||
self.fakehttp('''HTTP/1.1 401 Authentication Required
|
||||
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
|
||||
index 6d24d5ddf83c..9531818e16b2 100644
|
||||
--- a/Lib/test/test_urllib2.py
|
||||
+++ b/Lib/test/test_urllib2.py
|
||||
@@ -15,6 +15,9 @@
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
+from test.test_urllib import FakeHTTPMixin
|
||||
+
|
||||
+
|
||||
# XXX
|
||||
# Request
|
||||
# CacheFTPHandler (hard to write)
|
||||
@@ -1262,7 +1265,7 @@ def _test_basic_auth(self, opener, auth_handler, auth_header,
|
||||
self.assertEqual(len(http_handler.requests), 1)
|
||||
self.assertFalse(http_handler.requests[0].has_header(auth_header))
|
||||
|
||||
-class MiscTests(unittest.TestCase):
|
||||
+class MiscTests(unittest.TestCase, FakeHTTPMixin):
|
||||
|
||||
def test_build_opener(self):
|
||||
class MyHTTPHandler(urllib2.HTTPHandler): pass
|
||||
@@ -1317,6 +1320,52 @@ def test_unsupported_algorithm(self):
|
||||
"Unsupported digest authentication algorithm 'invalid'"
|
||||
)
|
||||
|
||||
+ @unittest.skipUnless(ssl, "ssl module required")
|
||||
+ def test_url_with_control_char_rejected(self):
|
||||
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
|
||||
+ char = chr(char_no)
|
||||
+ schemeless_url = "//localhost:7777/test%s/" % char
|
||||
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
|
||||
+ try:
|
||||
+ # We explicitly test urllib.request.urlopen() instead of the top
|
||||
+ # level 'def urlopen()' function defined in this... (quite ugly)
|
||||
+ # test suite. They use different url opening codepaths. Plain
|
||||
+ # urlopen uses FancyURLOpener which goes via a codepath that
|
||||
+ # calls urllib.parse.quote() on the URL which makes all of the
|
||||
+ # above attempts at injection within the url _path_ safe.
|
||||
+ escaped_char_repr = repr(char).replace('\\', r'\\')
|
||||
+ InvalidURL = httplib.InvalidURL
|
||||
+ with self.assertRaisesRegexp(
|
||||
+ InvalidURL, "contain control.*" + escaped_char_repr):
|
||||
+ urllib2.urlopen("http:" + schemeless_url)
|
||||
+ with self.assertRaisesRegexp(
|
||||
+ InvalidURL, "contain control.*" + escaped_char_repr):
|
||||
+ urllib2.urlopen("https:" + schemeless_url)
|
||||
+ finally:
|
||||
+ self.unfakehttp()
|
||||
+
|
||||
+ @unittest.skipUnless(ssl, "ssl module required")
|
||||
+ def test_url_with_newline_header_injection_rejected(self):
|
||||
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
|
||||
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
|
||||
+ schemeless_url = "//" + host + ":8080/test/?test=a"
|
||||
+ try:
|
||||
+ # We explicitly test urllib2.urlopen() instead of the top
|
||||
+ # level 'def urlopen()' function defined in this... (quite ugly)
|
||||
+ # test suite. They use different url opening codepaths. Plain
|
||||
+ # urlopen uses FancyURLOpener which goes via a codepath that
|
||||
+ # calls urllib.parse.quote() on the URL which makes all of the
|
||||
+ # above attempts at injection within the url _path_ safe.
|
||||
+ InvalidURL = httplib.InvalidURL
|
||||
+ with self.assertRaisesRegexp(
|
||||
+ InvalidURL, r"contain control.*\\r.*(found at least . .)"):
|
||||
+ urllib2.urlopen("http:" + schemeless_url)
|
||||
+ with self.assertRaisesRegexp(InvalidURL, r"contain control.*\\n"):
|
||||
+ urllib2.urlopen("https:" + schemeless_url)
|
||||
+ finally:
|
||||
+ self.unfakehttp()
|
||||
+
|
||||
+
|
||||
|
||||
class RequestTests(unittest.TestCase):
|
||||
|
||||
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
|
||||
index 36b3be67fd6b..90ccb30716ff 100644
|
||||
--- a/Lib/test/test_xmlrpc.py
|
||||
+++ b/Lib/test/test_xmlrpc.py
|
||||
@@ -659,7 +659,13 @@ def test_dotted_attribute(self):
|
||||
def test_partial_post(self):
|
||||
# Check that a partial POST doesn't make the server loop: issue #14001.
|
||||
conn = httplib.HTTPConnection(ADDR, PORT)
|
||||
- conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
|
||||
+ conn.send('POST /RPC2 HTTP/1.0\r\n'
|
||||
+ 'Content-Length: 100\r\n\r\n'
|
||||
+ 'bye HTTP/1.1\r\n'
|
||||
+ 'Host: %s:%s\r\n'
|
||||
+ 'Accept-Encoding: identity\r\n'
|
||||
+ 'Content-Length: 0\r\n\r\n'
|
||||
+ % (ADDR, PORT))
|
||||
conn.close()
|
||||
|
||||
class SimpleServerEncodingTestCase(BaseServerTestCase):
|
||||
diff --git a/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
|
||||
new file mode 100644
|
||||
index 000000000000..47cb899df1af
|
||||
--- /dev/null
|
||||
+++ b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
|
||||
@@ -0,0 +1 @@
|
||||
+Address CVE-2019-9740 by disallowing URL paths with embedded whitespace or control characters through into the underlying http client request. Such potentially malicious header injection URLs now cause an httplib.InvalidURL exception to be raised.
|
||||
@@ -1,55 +0,0 @@
|
||||
From 179a5f75f1121dab271fe8f90eb35145f9dcbbda Mon Sep 17 00:00:00 2001
|
||||
From: Sihoon Lee <push0ebp@gmail.com>
|
||||
Date: Fri, 17 May 2019 02:41:06 +0900
|
||||
Subject: [PATCH] Update test_urllib.py and urllib.py\nchange assertEqual into
|
||||
assertRasies in DummyURLopener test, and simplify mitigation
|
||||
|
||||
Upstream-Status: Submitted https://github.com/python/cpython/pull/11842
|
||||
|
||||
CVE: CVE-2019-9948
|
||||
|
||||
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
|
||||
---
|
||||
Lib/test/test_urllib.py | 11 +++--------
|
||||
Lib/urllib.py | 4 ++--
|
||||
2 files changed, 5 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
|
||||
index e5f210e62a18..1e23dfb0bb16 100644
|
||||
--- a/Lib/test/test_urllib.py
|
||||
+++ b/Lib/test/test_urllib.py
|
||||
@@ -1027,14 +1027,9 @@ def test_local_file_open(self):
|
||||
class DummyURLopener(urllib.URLopener):
|
||||
def open_local_file(self, url):
|
||||
return url
|
||||
- self.assertEqual(DummyURLopener().open(
|
||||
- 'local-file://example'), '//example')
|
||||
- self.assertEqual(DummyURLopener().open(
|
||||
- 'local_file://example'), '//example')
|
||||
- self.assertRaises(IOError, urllib.urlopen,
|
||||
- 'local-file://example')
|
||||
- self.assertRaises(IOError, urllib.urlopen,
|
||||
- 'local_file://example')
|
||||
+ for url in ('local_file://example', 'local-file://example'):
|
||||
+ self.assertRaises(IOError, DummyURLopener().open, url)
|
||||
+ self.assertRaises(IOError, urllib.urlopen, url)
|
||||
|
||||
# Just commented them out.
|
||||
# Can't really tell why keep failing in windows and sparc.
|
||||
diff --git a/Lib/urllib.py b/Lib/urllib.py
|
||||
index a24e9a5c68fb..39b834054e9e 100644
|
||||
--- a/Lib/urllib.py
|
||||
+++ b/Lib/urllib.py
|
||||
@@ -203,10 +203,10 @@ def open(self, fullurl, data=None):
|
||||
name = 'open_' + urltype
|
||||
self.type = urltype
|
||||
name = name.replace('-', '_')
|
||||
-
|
||||
+
|
||||
# bpo-35907: # disallow the file reading with the type not allowed
|
||||
if not hasattr(self, name) or \
|
||||
- (self == _urlopener and name == 'open_local_file'):
|
||||
+ getattr(self, name) == self.open_local_file:
|
||||
if proxy:
|
||||
return self.open_unknown_proxy(proxy, fullurl, data)
|
||||
else:
|
||||
@@ -1,55 +0,0 @@
|
||||
From 8f99cc799e4393bf1112b9395b2342f81b3f45ef Mon Sep 17 00:00:00 2001
|
||||
From: push0ebp <push0ebp@shl-MacBook-Pro.local>
|
||||
Date: Thu, 14 Feb 2019 02:05:46 +0900
|
||||
Subject: [PATCH] bpo-35907: Avoid file reading as disallowing the unnecessary
|
||||
URL scheme in urllib
|
||||
|
||||
Upstream-Status: Submitted https://github.com/python/cpython/pull/11842
|
||||
|
||||
CVE: CVE-2019-9948
|
||||
|
||||
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
|
||||
---
|
||||
Lib/test/test_urllib.py | 12 ++++++++++++
|
||||
Lib/urllib.py | 5 ++++-
|
||||
2 files changed, 16 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
|
||||
index 1ce9201c0693..e5f210e62a18 100644
|
||||
--- a/Lib/test/test_urllib.py
|
||||
+++ b/Lib/test/test_urllib.py
|
||||
@@ -1023,6 +1023,18 @@ def open_spam(self, url):
|
||||
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
|
||||
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
|
||||
|
||||
+ def test_local_file_open(self):
|
||||
+ class DummyURLopener(urllib.URLopener):
|
||||
+ def open_local_file(self, url):
|
||||
+ return url
|
||||
+ self.assertEqual(DummyURLopener().open(
|
||||
+ 'local-file://example'), '//example')
|
||||
+ self.assertEqual(DummyURLopener().open(
|
||||
+ 'local_file://example'), '//example')
|
||||
+ self.assertRaises(IOError, urllib.urlopen,
|
||||
+ 'local-file://example')
|
||||
+ self.assertRaises(IOError, urllib.urlopen,
|
||||
+ 'local_file://example')
|
||||
|
||||
# Just commented them out.
|
||||
# Can't really tell why keep failing in windows and sparc.
|
||||
diff --git a/Lib/urllib.py b/Lib/urllib.py
|
||||
index d85504a5cb7e..a24e9a5c68fb 100644
|
||||
--- a/Lib/urllib.py
|
||||
+++ b/Lib/urllib.py
|
||||
@@ -203,7 +203,10 @@ def open(self, fullurl, data=None):
|
||||
name = 'open_' + urltype
|
||||
self.type = urltype
|
||||
name = name.replace('-', '_')
|
||||
- if not hasattr(self, name):
|
||||
+
|
||||
+ # bpo-35907: # disallow the file reading with the type not allowed
|
||||
+ if not hasattr(self, name) or \
|
||||
+ (self == _urlopener and name == 'open_local_file'):
|
||||
if proxy:
|
||||
return self.open_unknown_proxy(proxy, fullurl, data)
|
||||
else:
|
||||
@@ -1,28 +0,0 @@
|
||||
From 06b5ee585d6e76bdbb4002f642d864d860cbbd2b Mon Sep 17 00:00:00 2001
|
||||
From: Steve Dower <steve.dower@python.org>
|
||||
Date: Tue, 12 Mar 2019 08:23:33 -0700
|
||||
Subject: [PATCH] bpo-36216: Only print test messages when verbose
|
||||
|
||||
CVE: CVE-2019-9636
|
||||
|
||||
Upstream-Status: Backport https://github.com/python/cpython/pull/12291/commits/06b5ee585d6e76bdbb4002f642d864d860cbbd2b
|
||||
|
||||
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
|
||||
---
|
||||
Lib/test/test_urlparse.py | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
|
||||
index 73b0228ea8e3..1830d0b28688 100644
|
||||
--- a/Lib/test/test_urlparse.py
|
||||
+++ b/Lib/test/test_urlparse.py
|
||||
@@ -644,7 +644,8 @@ def test_urlsplit_normalization(self):
|
||||
for scheme in [u"http", u"https", u"ftp"]:
|
||||
for c in denorm_chars:
|
||||
url = u"{}://netloc{}false.netloc/path".format(scheme, c)
|
||||
- print "Checking %r" % url
|
||||
+ if test_support.verbose:
|
||||
+ print "Checking %r" % url
|
||||
with self.assertRaises(ValueError):
|
||||
urlparse.urlsplit(url)
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
From 3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e Mon Sep 17 00:00:00 2001
|
||||
From: Steve Dower <steve.dower@python.org>
|
||||
Date: Thu, 7 Mar 2019 08:25:22 -0800
|
||||
Subject: [PATCH] bpo-36216: Add check for characters in netloc that normalize
|
||||
to separators (GH-12201)
|
||||
|
||||
CVE: CVE-2019-9636
|
||||
|
||||
Upstream-Status: Backport https://github.com/python/cpython/pull/12216/commits/3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e
|
||||
|
||||
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
|
||||
---
|
||||
Doc/library/urlparse.rst | 20 ++++++++++++++++
|
||||
Lib/test/test_urlparse.py | 24 +++++++++++++++++++
|
||||
Lib/urlparse.py | 17 +++++++++++++
|
||||
.../2019-03-06-09-38-40.bpo-36216.6q1m4a.rst | 3 +++
|
||||
4 files changed, 64 insertions(+)
|
||||
create mode 100644 Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
||||
|
||||
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
|
||||
index 4e1ded73c266..73b0228ea8e3 100644
|
||||
--- a/Lib/test/test_urlparse.py
|
||||
+++ b/Lib/test/test_urlparse.py
|
||||
@@ -1,4 +1,6 @@
|
||||
from test import test_support
|
||||
+import sys
|
||||
+import unicodedata
|
||||
import unittest
|
||||
import urlparse
|
||||
|
||||
@@ -624,6 +626,28 @@ def test_portseparator(self):
|
||||
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
|
||||
('http','www.python.org:80','','','',''))
|
||||
|
||||
+ def test_urlsplit_normalization(self):
|
||||
+ # Certain characters should never occur in the netloc,
|
||||
+ # including under normalization.
|
||||
+ # Ensure that ALL of them are detected and cause an error
|
||||
+ illegal_chars = u'/:#?@'
|
||||
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
|
||||
+ denorm_chars = [
|
||||
+ c for c in map(unichr, range(128, sys.maxunicode))
|
||||
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
|
||||
+ and c not in illegal_chars
|
||||
+ ]
|
||||
+ # Sanity check that we found at least one such character
|
||||
+ self.assertIn(u'\u2100', denorm_chars)
|
||||
+ self.assertIn(u'\uFF03', denorm_chars)
|
||||
+
|
||||
+ for scheme in [u"http", u"https", u"ftp"]:
|
||||
+ for c in denorm_chars:
|
||||
+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
|
||||
+ print "Checking %r" % url
|
||||
+ with self.assertRaises(ValueError):
|
||||
+ urlparse.urlsplit(url)
|
||||
+
|
||||
def test_main():
|
||||
test_support.run_unittest(UrlParseTestCase)
|
||||
|
||||
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
|
||||
index f7c2b032b097..54eda08651ab 100644
|
||||
--- a/Lib/urlparse.py
|
||||
+++ b/Lib/urlparse.py
|
||||
@@ -165,6 +165,21 @@ def _splitnetloc(url, start=0):
|
||||
delim = min(delim, wdelim) # use earliest delim position
|
||||
return url[start:delim], url[delim:] # return (domain, rest)
|
||||
|
||||
+def _checknetloc(netloc):
|
||||
+ if not netloc or not isinstance(netloc, unicode):
|
||||
+ return
|
||||
+ # looking for characters like \u2100 that expand to 'a/c'
|
||||
+ # IDNA uses NFKC equivalence, so normalize for this check
|
||||
+ import unicodedata
|
||||
+ netloc2 = unicodedata.normalize('NFKC', netloc)
|
||||
+ if netloc == netloc2:
|
||||
+ return
|
||||
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
|
||||
+ for c in '/?#@:':
|
||||
+ if c in netloc2:
|
||||
+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
|
||||
+ "characters under NFKC normalization")
|
||||
+
|
||||
def urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL into 5 components:
|
||||
<scheme>://<netloc>/<path>?<query>#<fragment>
|
||||
@@ -193,6 +208,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
||||
url, fragment = url.split('#', 1)
|
||||
if '?' in url:
|
||||
url, query = url.split('?', 1)
|
||||
+ _checknetloc(netloc)
|
||||
v = SplitResult(scheme, netloc, url, query, fragment)
|
||||
_parse_cache[key] = v
|
||||
return v
|
||||
@@ -216,6 +232,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
||||
url, fragment = url.split('#', 1)
|
||||
if '?' in url:
|
||||
url, query = url.split('?', 1)
|
||||
+ _checknetloc(netloc)
|
||||
v = SplitResult(scheme, netloc, url, query, fragment)
|
||||
_parse_cache[key] = v
|
||||
return v
|
||||
diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
||||
new file mode 100644
|
||||
index 000000000000..1e1ad92c6feb
|
||||
--- /dev/null
|
||||
+++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
|
||||
@@ -0,0 +1,3 @@
|
||||
+Changes urlsplit() to raise ValueError when the URL contains characters that
|
||||
+decompose under IDNA encoding (NFKC-normalization) into characters that
|
||||
+affect how the URL is parsed.
|
||||
\ No newline at end of file
|
||||
@@ -70,7 +70,7 @@ index 6e81b2f..671a20e 100644
|
||||
|
||||
Programs/python.o: $(srcdir)/Programs/python.c
|
||||
@@ -856,7 +857,7 @@ regen-opcode:
|
||||
Python/compile.o Python/symtable.o Python/ast_unparse.o Python/ast.o: $(srcdir)/Include/graminit.h $(srcdir)/Include/Python-ast.h
|
||||
Python/compile.o Python/symtable.o Python/ast_unparse.o Python/ast.o Python/future.o Parser/parsetok.o: $(srcdir)/Include/graminit.h $(srcdir)/Include/Python-ast.h
|
||||
|
||||
Python/getplatform.o: $(srcdir)/Python/getplatform.c
|
||||
- $(CC) -c $(PY_CORE_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c
|
||||
|
||||
@@ -63,7 +63,7 @@ index a7de901..4a3681f 100644
|
||||
+ case $cc_basename in
|
||||
*clang*)
|
||||
AC_SUBST(LLVM_AR)
|
||||
AC_PATH_TARGET_TOOL(LLVM_AR, llvm-ar, '', ${llvm_path})
|
||||
AC_PATH_TOOL(LLVM_AR, llvm-ar, '', ${llvm_path})
|
||||
@@ -1426,7 +1427,7 @@ then
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -498,7 +498,8 @@
|
||||
"${libdir}/pkgconfig"
|
||||
],
|
||||
"rdepends": [
|
||||
"core"
|
||||
"core",
|
||||
"distutils"
|
||||
],
|
||||
"summary": "Python development package"
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user