mirror of
https://git.yoctoproject.org/poky
synced 2026-02-20 08:29:42 +01:00
Compare commits
153 Commits
langdale-4
...
honister-3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b53230c08d | ||
|
|
3837e8bb9f | ||
|
|
e3d86eb738 | ||
|
|
b4c64791a0 | ||
|
|
5bcb2b1732 | ||
|
|
07c12415c6 | ||
|
|
10a700c094 | ||
|
|
fb09b37f2a | ||
|
|
8178470ec6 | ||
|
|
c4f08fc43d | ||
|
|
243758b8f4 | ||
|
|
b6b0af0889 | ||
|
|
9594c5893b | ||
|
|
ca1d3dee3c | ||
|
|
719728fe47 | ||
|
|
2bfad06778 | ||
|
|
c04d6ccd4a | ||
|
|
e8d40a2dab | ||
|
|
7884e05de9 | ||
|
|
7752c9e87b | ||
|
|
40cefcce5d | ||
|
|
3e1a0f0d09 | ||
|
|
fbb7df5adb | ||
|
|
8dbdb9e1e2 | ||
|
|
e8bdd45fe8 | ||
|
|
f8ad42fc49 | ||
|
|
4b28378957 | ||
|
|
c43f22e42e | ||
|
|
79964afc90 | ||
|
|
6108762d02 | ||
|
|
fe1583b445 | ||
|
|
e180b85efb | ||
|
|
da5d1b540e | ||
|
|
7acf465dd5 | ||
|
|
30fb5da7ee | ||
|
|
543567bb8a | ||
|
|
9c87e0204b | ||
|
|
7836a7c4d4 | ||
|
|
5258dd0cd0 | ||
|
|
aaa0c06d85 | ||
|
|
b989276b40 | ||
|
|
59eed5965c | ||
|
|
443816b4dd | ||
|
|
025b64f23e | ||
|
|
f57e4968b3 | ||
|
|
27d151c032 | ||
|
|
dfca86c200 | ||
|
|
5045ce3c04 | ||
|
|
ed66d58ed6 | ||
|
|
ce68ec010f | ||
|
|
be28d98b3a | ||
|
|
d6768d9d52 | ||
|
|
4aa48aeab8 | ||
|
|
77e8fa92c2 | ||
|
|
a9ed4b6357 | ||
|
|
520493bef5 | ||
|
|
ef62cff62e | ||
|
|
b9a58411fc | ||
|
|
b0db46f667 | ||
|
|
fc46b14304 | ||
|
|
6e02c340bf | ||
|
|
903333da5b | ||
|
|
679e630732 | ||
|
|
a48e0bb5ec | ||
|
|
a498f39e5b | ||
|
|
164c944983 | ||
|
|
98d101475a | ||
|
|
9b66d8fb60 | ||
|
|
72f8934284 | ||
|
|
0979299bb9 | ||
|
|
a10db5945e | ||
|
|
b922f5cfa1 | ||
|
|
ceb1f52dff | ||
|
|
4c9414b35d | ||
|
|
c7b2db1fa6 | ||
|
|
17be7973d6 | ||
|
|
d70d12d140 | ||
|
|
2f3ee2aff2 | ||
|
|
1eb9a963ff | ||
|
|
ae397dcedc | ||
|
|
b254cbfcff | ||
|
|
88e6b99201 | ||
|
|
25e649f67e | ||
|
|
3fe7557392 | ||
|
|
4d2d21ef4d | ||
|
|
e11d7bf851 | ||
|
|
ef4175b82f | ||
|
|
49e0ce0e29 | ||
|
|
5254f0af57 | ||
|
|
d35822d09a | ||
|
|
47e8cde01f | ||
|
|
d74def94da | ||
|
|
907ca04187 | ||
|
|
e0218edf84 | ||
|
|
fb2300c144 | ||
|
|
1a344c3242 | ||
|
|
57e91d6136 | ||
|
|
13a4588253 | ||
|
|
43cfa130d9 | ||
|
|
7ff34cf041 | ||
|
|
41320cf8c3 | ||
|
|
e6ef65341e | ||
|
|
91527514f3 | ||
|
|
4f97823c12 | ||
|
|
2610a43530 | ||
|
|
1eb0c9e32e | ||
|
|
1fb6bc3cbd | ||
|
|
cfc03f903a | ||
|
|
792d4036e2 | ||
|
|
6af7e42820 | ||
|
|
095aa7adfd | ||
|
|
b332b342b4 | ||
|
|
8e140d5d12 | ||
|
|
d484fcd8df | ||
|
|
6c3a8ae1f9 | ||
|
|
be011b75c0 | ||
|
|
984e5e04aa | ||
|
|
5fa3c638ed | ||
|
|
b26b40bbf7 | ||
|
|
a02cfe25a5 | ||
|
|
3bf9467bcb | ||
|
|
48efa6a229 | ||
|
|
c53f1f413a | ||
|
|
8f140e272a | ||
|
|
fa13f88d19 | ||
|
|
11ff9d080a | ||
|
|
57703b457c | ||
|
|
eb7bcac465 | ||
|
|
86d6c4cbfe | ||
|
|
9b197bef11 | ||
|
|
5d8c7e6ab6 | ||
|
|
73ebd1f04d | ||
|
|
dc851c427b | ||
|
|
9d87c1c9d4 | ||
|
|
291da72ce1 | ||
|
|
0b500dba7a | ||
|
|
3c5a5bbc19 | ||
|
|
a9fdfc41ba | ||
|
|
8eb5dd8757 | ||
|
|
80f109a7da | ||
|
|
1b1369d52c | ||
|
|
14d4106f56 | ||
|
|
3507345f40 | ||
|
|
42322ad2ce | ||
|
|
34d69cc1b3 | ||
|
|
9bdb2db854 | ||
|
|
d03d151093 | ||
|
|
2abbaa3319 | ||
|
|
f0428cea44 | ||
|
|
cc8df6ce5a | ||
|
|
5c6455f1af | ||
|
|
c3223e3101 | ||
|
|
7f4efed145 |
@@ -1,2 +1,2 @@
|
||||
# Template settings
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf/templates/default}
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
|
||||
|
||||
@@ -53,6 +53,7 @@ Maintainers needed
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Prelink-cross
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
|
||||
@@ -6,24 +6,24 @@ of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
https://www.openembedded.org/
|
||||
http://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
https://docs.yoctoproject.org/
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/openembedded-core
|
||||
http://lists.openembedded.org/mailman/listinfo/openembedded-core
|
||||
|
||||
Source code:
|
||||
|
||||
https://git.openembedded.org/openembedded-core/
|
||||
http://git.openembedded.org/openembedded-core/
|
||||
|
||||
@@ -28,7 +28,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "2.2.0"
|
||||
__version__ = "1.52.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
import argparse
|
||||
import logging
|
||||
@@ -28,7 +27,6 @@ logger = bb.msg.logger_create(myname)
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
@@ -54,7 +52,6 @@ def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
sys.exit(2)
|
||||
return result
|
||||
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
|
||||
@@ -63,13 +60,13 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
|
||||
if sig1 and sig2:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
|
||||
if not sigfiles:
|
||||
if len(sigfiles) == 0:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif sig1 not in sigfiles:
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif sig2 not in sigfiles:
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
@@ -89,11 +86,11 @@ def recursecb(key, hash1, hash2):
|
||||
hashfiles = find_siginfo(tinfoil, key, None, hashes)
|
||||
|
||||
recout = []
|
||||
if not hashfiles:
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif hash1 not in hashfiles:
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif hash2 not in hashfiles:
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
@@ -113,36 +110,36 @@ parser.add_argument('-D', '--debug',
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
@@ -160,8 +157,7 @@ if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0],
|
||||
options.sigargs[1])
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
@@ -170,8 +166,7 @@ if options.taskargs:
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (
|
||||
options.taskargs[0], options.taskargs[1]))
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
|
||||
@@ -68,11 +68,11 @@ def main():
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
plugin.register_commands(subparsers)
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
|
||||
if not registered:
|
||||
logger.error("No commands registered - missing plugins?")
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -154,10 +152,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
fakeenv = {}
|
||||
umask = None
|
||||
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
|
||||
|
||||
taskdep = workerdata["taskdeps"][fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
umask = taskdep['umask'][taskname]
|
||||
@@ -243,7 +237,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
the_data.setVar("BB_TASKDEPDATA", taskdepdata)
|
||||
the_data.setVar('BB_CURRENTTASK', taskname.replace("do_", ""))
|
||||
if cfg.limited_deps:
|
||||
the_data.setVar("BB_LIMITEDDEPS", "1")
|
||||
the_data.setVar("BUILDNAME", workerdata["buildname"])
|
||||
@@ -263,13 +256,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
|
||||
if not the_data.getVarFlag(taskname, 'network', False):
|
||||
if bb.utils.is_local_uid(uid):
|
||||
logger.debug("Attempting to disable network for %s" % taskname)
|
||||
bb.utils.disable_network(uid, gid)
|
||||
else:
|
||||
logger.debug("Skipping disable network for %s since %s is not a local uid." % (taskname, uid))
|
||||
|
||||
# exported_vars() returns a generator which *cannot* be passed to os.environ.update()
|
||||
# successfully. We also need to unset anything from the environment which shouldn't be there
|
||||
exports = bb.data.exported_vars(the_data)
|
||||
@@ -442,7 +428,7 @@ class BitbakeWorker(object):
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.cookercfg, worker=True)
|
||||
self.databuilder.parseBaseConfiguration(worker=True)
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
|
||||
def handle_extraconfigdata(self, data):
|
||||
@@ -457,7 +443,6 @@ class BitbakeWorker(object):
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcdata[mc].setVar("PRSERV_HOST", self.workerdata["prhost"])
|
||||
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.workerdata["hashservaddr"])
|
||||
self.databuilder.mcdata[mc].setVar("__bbclasstype", "recipe")
|
||||
|
||||
def handle_newtaskhashes(self, data):
|
||||
self.workerdata["newhashes"] = pickle.loads(data)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ databaseCheck()
|
||||
$MANAGE migrate --noinput || retval=1
|
||||
|
||||
if [ $retval -eq 1 ]; then
|
||||
echo "Failed migrations, halting system start" 1>&2
|
||||
echo "Failed migrations, aborting system start" 1>&2
|
||||
return $retval
|
||||
fi
|
||||
# Make sure that checksettings can pick up any value for TEMPLATECONF
|
||||
@@ -41,7 +41,7 @@ databaseCheck()
|
||||
$MANAGE checksettings --traceback || retval=1
|
||||
|
||||
if [ $retval -eq 1 ]; then
|
||||
printf "\nError while checking settings; exiting\n"
|
||||
printf "\nError while checking settings; aborting\n"
|
||||
return $retval
|
||||
fi
|
||||
|
||||
@@ -248,7 +248,7 @@ fi
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
export TOASTER_DIR=$TOASTERDIR
|
||||
export BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS TOASTER_DIR"
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
|
||||
|
||||
# Determine the action. If specified by arguments, fine, if not, toggle it
|
||||
if [ "$CMD" = "start" ] ; then
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Copyright (c) 2021 Joshua Watt <JPEWhacker@gmail.com>
|
||||
#
|
||||
#
|
||||
# Dockerfile to build a bitbake hash equivalence server container
|
||||
#
|
||||
# From the root of the bitbake repository, run:
|
||||
@@ -15,9 +15,5 @@ RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
COPY lib/bb /opt/bbhashserv/lib/bb/
|
||||
COPY lib/codegen.py /opt/bbhashserv/lib/codegen.py
|
||||
COPY lib/ply /opt/bbhashserv/lib/ply/
|
||||
COPY lib/bs4 /opt/bbhashserv/lib/bs4/
|
||||
|
||||
ENTRYPOINT ["/opt/bbhashserv/bin/bitbake-hashserv"]
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Copyright (c) 2022 Daniel Gomez <daniel@qtec.com>
|
||||
#
|
||||
# Dockerfile to build a bitbake PR service container
|
||||
#
|
||||
# From the root of the bitbake repository, run:
|
||||
#
|
||||
# docker build -f contrib/prserv/Dockerfile . -t prserv
|
||||
#
|
||||
# Running examples:
|
||||
#
|
||||
# 1. PR Service in RW mode, port 18585:
|
||||
#
|
||||
# docker run --detach --tty \
|
||||
# --env PORT=18585 \
|
||||
# --publish 18585:18585 \
|
||||
# --volume $PWD:/var/lib/bbprserv \
|
||||
# prserv
|
||||
#
|
||||
# 2. PR Service in RO mode, default port (8585) and custom LOGFILE:
|
||||
#
|
||||
# docker run --detach --tty \
|
||||
# --env DBMODE="--read-only" \
|
||||
# --env LOGFILE=/var/lib/bbprserv/prservro.log \
|
||||
# --publish 8585:8585 \
|
||||
# --volume $PWD:/var/lib/bbprserv \
|
||||
# prserv
|
||||
#
|
||||
|
||||
FROM alpine:3.14.4
|
||||
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-prserv /opt/bbprserv/bin/
|
||||
COPY lib/prserv /opt/bbprserv/lib/prserv/
|
||||
COPY lib/bb /opt/bbprserv/lib/bb/
|
||||
COPY lib/codegen.py /opt/bbprserv/lib/codegen.py
|
||||
COPY lib/ply /opt/bbprserv/lib/ply/
|
||||
COPY lib/bs4 /opt/bbprserv/lib/bs4/
|
||||
|
||||
ENV PATH=$PATH:/opt/bbprserv/bin
|
||||
|
||||
RUN mkdir -p /var/lib/bbprserv
|
||||
|
||||
ENV DBFILE=/var/lib/bbprserv/prserv.sqlite3 \
|
||||
LOGFILE=/var/lib/bbprserv/prserv.log \
|
||||
LOGLEVEL=debug \
|
||||
HOST=0.0.0.0 \
|
||||
PORT=8585 \
|
||||
DBMODE=""
|
||||
|
||||
ENTRYPOINT [ "/bin/sh", "-c", \
|
||||
"bitbake-prserv \
|
||||
--file=$DBFILE \
|
||||
--log=$LOGFILE \
|
||||
--loglevel=$LOGLEVEL \
|
||||
--start \
|
||||
--host=$HOST \
|
||||
--port=$PORT \
|
||||
$DBMODE \
|
||||
&& tail -f $LOGFILE"]
|
||||
@@ -8,7 +8,7 @@ Manual Organization
|
||||
|
||||
Folders exist for individual manuals as follows:
|
||||
|
||||
* bitbake-user-manual --- The BitBake User Manual
|
||||
* bitbake-user-manual - The BitBake User Manual
|
||||
|
||||
Each folder is self-contained regarding content and figures.
|
||||
|
||||
|
||||
@@ -79,8 +79,8 @@ directives.
|
||||
Prior to parsing configuration files, BitBake looks at certain
|
||||
variables, including:
|
||||
|
||||
- :term:`BB_ENV_PASSTHROUGH`
|
||||
- :term:`BB_ENV_PASSTHROUGH_ADDITIONS`
|
||||
- :term:`BB_ENV_WHITELIST`
|
||||
- :term:`BB_ENV_EXTRAWHITE`
|
||||
- :term:`BB_PRESERVE_ENV`
|
||||
- :term:`BB_ORIGENV`
|
||||
- :term:`BITBAKE_UI`
|
||||
@@ -228,7 +228,7 @@ and then reload it.
|
||||
Where possible, subsequent BitBake commands reuse this cache of recipe
|
||||
information. The validity of this cache is determined by first computing
|
||||
a checksum of the base configuration data (see
|
||||
:term:`BB_HASHCONFIG_IGNORE_VARS`) and
|
||||
:term:`BB_HASHCONFIG_WHITELIST`) and
|
||||
then checking if the checksum matches. If that checksum matches what is
|
||||
in the cache and the recipe and class files have not changed, BitBake is
|
||||
able to use the cache. BitBake then reloads the cached information about
|
||||
@@ -435,7 +435,7 @@ BitBake writes a shell script to
|
||||
executes the script. The generated shell script contains all the
|
||||
exported variables, and the shell functions with all variables expanded.
|
||||
Output from the shell script goes to the file
|
||||
``${``\ :term:`T`\ ``}/log.do_taskname.pid``. Looking at the expanded shell functions in
|
||||
``${T}/log.do_taskname.pid``. Looking at the expanded shell functions in
|
||||
the run file and the output in the log files is a useful debugging
|
||||
technique.
|
||||
|
||||
@@ -477,7 +477,7 @@ changes because it should not affect the output for target packages. The
|
||||
simplistic approach for excluding the working directory is to set it to
|
||||
some fixed value and create the checksum for the "run" script. BitBake
|
||||
goes one step better and uses the
|
||||
:term:`BB_BASEHASH_IGNORE_VARS` variable
|
||||
:term:`BB_HASHBASE_WHITELIST` variable
|
||||
to define a list of variables that should never be included when
|
||||
generating the signatures.
|
||||
|
||||
@@ -523,7 +523,7 @@ it cannot figure out dependencies.
|
||||
Thus far, this section has limited discussion to the direct inputs into
|
||||
a task. Information based on direct inputs is referred to as the
|
||||
"basehash" in the code. However, there is still the question of a task's
|
||||
indirect inputs --- the things that were already built and present in the
|
||||
indirect inputs - the things that were already built and present in the
|
||||
build directory. The checksum (or signature) for a particular task needs
|
||||
to add the hashes of all the tasks on which the particular task depends.
|
||||
Choosing which dependencies to add is a policy decision. However, the
|
||||
@@ -534,11 +534,11 @@ At the code level, there are a variety of ways both the basehash and the
|
||||
dependent task hashes can be influenced. Within the BitBake
|
||||
configuration file, we can give BitBake some extra information to help
|
||||
it construct the basehash. The following statement effectively results
|
||||
in a list of global variable dependency excludes --- variables never
|
||||
in a list of global variable dependency excludes - variables never
|
||||
included in any checksum. This example uses variables from OpenEmbedded
|
||||
to help illustrate the concept::
|
||||
|
||||
BB_BASEHASH_IGNORE_VARS ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \
|
||||
BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \
|
||||
SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL \
|
||||
USER FILESPATH STAGING_DIR_HOST STAGING_DIR_TARGET COREBASE PRSERV_HOST \
|
||||
PRSERV_DUMPDIR PRSERV_DUMPFILE PRSERV_LOCKDOWN PARALLEL_MAKE \
|
||||
|
||||
@@ -84,18 +84,18 @@ fetcher does know how to use HTTP as a transport.
|
||||
Here are some examples that show commonly used mirror definitions::
|
||||
|
||||
PREMIRRORS ?= "\
|
||||
bzr://.*/.\* http://somemirror.org/sources/ \
|
||||
cvs://.*/.\* http://somemirror.org/sources/ \
|
||||
git://.*/.\* http://somemirror.org/sources/ \
|
||||
hg://.*/.\* http://somemirror.org/sources/ \
|
||||
osc://.*/.\* http://somemirror.org/sources/ \
|
||||
p4://.*/.\* http://somemirror.org/sources/ \
|
||||
svn://.*/.\* http://somemirror.org/sources/"
|
||||
bzr://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
cvs://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
git://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
hg://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
osc://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
p4://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
svn://.*/.\* http://somemirror.org/sources/ \\n"
|
||||
|
||||
MIRRORS =+ "\
|
||||
ftp://.*/.\* http://somemirror.org/sources/ \
|
||||
http://.*/.\* http://somemirror.org/sources/ \
|
||||
https://.*/.\* http://somemirror.org/sources/"
|
||||
ftp://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
http://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
https://.*/.\* http://somemirror.org/sources/ \\n"
|
||||
|
||||
It is useful to note that BitBake
|
||||
supports cross-URLs. It is possible to mirror a Git repository on an
|
||||
@@ -167,9 +167,6 @@ govern the behavior of the unpack stage:
|
||||
- *dos:* Applies to ``.zip`` and ``.jar`` files and specifies whether
|
||||
to use DOS line ending conversion on text files.
|
||||
|
||||
- *striplevel:* Strip specified number of leading components (levels)
|
||||
from file names on extraction
|
||||
|
||||
- *subdir:* Unpacks the specific URL to the specified subdirectory
|
||||
within the root directory.
|
||||
|
||||
@@ -229,11 +226,6 @@ downloaded file is useful for avoiding collisions in
|
||||
:term:`DL_DIR` when dealing with multiple files that
|
||||
have the same name.
|
||||
|
||||
If a username and password are specified in the ``SRC_URI``, a Basic
|
||||
Authorization header will be added to each request, including across redirects.
|
||||
To instead limit the Authorization header to the first request, add
|
||||
"redirectauth=0" to the list of parameters.
|
||||
|
||||
Some example URLs are as follows::
|
||||
|
||||
SRC_URI = "http://oe.handhelds.org/not_there.aac"
|
||||
@@ -396,19 +388,6 @@ This fetcher supports the following parameters:
|
||||
protocol is "file". You can also use "http", "https", "ssh" and
|
||||
"rsync".
|
||||
|
||||
.. note::
|
||||
|
||||
When ``protocol`` is "ssh", the URL expected in :term:`SRC_URI` differs
|
||||
from the one that is typically passed to ``git clone`` command and provided
|
||||
by the Git server to fetch from. For example, the URL returned by GitLab
|
||||
server for ``mesa`` when cloning over SSH is
|
||||
``git@gitlab.freedesktop.org:mesa/mesa.git``, however the expected URL in
|
||||
:term:`SRC_URI` is the following::
|
||||
|
||||
SRC_URI = "git://git@gitlab.freedesktop.org/mesa/mesa.git;branch=main;protocol=ssh;..."
|
||||
|
||||
Note the ``:`` character changed for a ``/`` before the path to the project.
|
||||
|
||||
- *"nocheckout":* Tells the fetcher to not checkout source code when
|
||||
unpacking when set to "1". Set this option for the URL where there is
|
||||
a custom routine to checkout code. The default is "0".
|
||||
@@ -432,9 +411,9 @@ This fetcher supports the following parameters:
|
||||
raw Git metadata is provided. This parameter implies the "nocheckout"
|
||||
parameter as well.
|
||||
|
||||
- *"branch":* The branch(es) of the Git tree to clone. Unless
|
||||
"nobranch" is set to "1", this is a mandatory parameter. The number of
|
||||
branch parameters must match the number of name parameters.
|
||||
- *"branch":* The branch(es) of the Git tree to clone. If unset, this
|
||||
is assumed to be "master". The number of branch parameters much match
|
||||
the number of name parameters.
|
||||
|
||||
- *"rev":* The revision to use for the checkout. The default is
|
||||
"master".
|
||||
@@ -457,9 +436,8 @@ This fetcher supports the following parameters:
|
||||
|
||||
Here are some example URLs::
|
||||
|
||||
SRC_URI = "git://github.com/fronteed/icheck.git;protocol=https;branch=${PV};tag=${PV}"
|
||||
SRC_URI = "git://github.com/asciidoc/asciidoc-py;protocol=https;branch=main"
|
||||
SRC_URI = "git://git@gitlab.freedesktop.org/mesa/mesa.git;branch=main;protocol=ssh;..."
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;tag=version-1"
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;protocol=http"
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -688,98 +666,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
----------------------------
|
||||
|
||||
This submodule fetches code for
|
||||
`Rust language "crates" <https://doc.rust-lang.org/reference/glossary.html?highlight=crate#crate>`__
|
||||
corresponding to Rust libraries and programs to compile. Such crates are typically shared
|
||||
on https://crates.io/ but this fetcher supports other crate registries too.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "crate://REGISTRY/NAME/VERSION"
|
||||
|
||||
Here is an example URL::
|
||||
|
||||
SRC_URI = "crate://crates.io/glob/0.2.11"
|
||||
|
||||
.. _npm-fetcher:
|
||||
|
||||
NPM Fetcher (``npm://``)
|
||||
------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM <https://en.wikipedia.org/wiki/Npm_(software)>`__
|
||||
Javascript package registry.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npm://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"package":* The NPM package name. This is a mandatory parameter.
|
||||
|
||||
- *"version":* The NPM package version. This is a mandatory parameter.
|
||||
|
||||
- *"downloadfilename":* Specifies the filename used when storing the downloaded file.
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the package (default: ``npm``).
|
||||
|
||||
Note that NPM fetcher only fetches the package source itself. The dependencies
|
||||
can be fetched through the `npmsw-fetcher`_.
|
||||
|
||||
Here is an example URL with both fetchers::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
See :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
in the Yocto Project manual for details about using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
to automatically create a recipe from an NPM URL.
|
||||
|
||||
.. _npmsw-fetcher:
|
||||
|
||||
NPM shrinkwrap Fetcher (``npmsw://``)
|
||||
-------------------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM shrinkwrap <https://docs.npmjs.com/cli/v8/commands/npm-shrinkwrap>`__
|
||||
description file, which lists the dependencies
|
||||
of an NPM package while locking their versions.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npmsw://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"dev":* Set this parameter to ``1`` to install "devDependencies".
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the dependencies
|
||||
(``${S}`` by default).
|
||||
|
||||
Note that the shrinkwrap file can also be provided by the recipe for
|
||||
the package which has such dependencies, for example::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
Such a file can automatically be generated using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
section of the Yocto Project.
|
||||
|
||||
Other Fetchers
|
||||
--------------
|
||||
|
||||
@@ -789,6 +675,8 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- Mercurial (``hg://``)
|
||||
|
||||
- npm (``npm://``)
|
||||
|
||||
- OSC (``osc://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
@@ -61,9 +61,10 @@ member Chris Larson split the project into two distinct pieces:
|
||||
|
||||
Today, BitBake is the primary basis of the
|
||||
`OpenEmbedded <https://www.openembedded.org/>`__ project, which is being
|
||||
used to build and maintain Linux distributions such as the `Poky
|
||||
Reference Distribution <https://www.yoctoproject.org/software-item/poky/>`__,
|
||||
developed under the umbrella of the `Yocto Project <https://www.yoctoproject.org>`__.
|
||||
used to build and maintain Linux distributions such as the `Angstrom
|
||||
Distribution <http://www.angstrom-distribution.org/>`__, and which is
|
||||
also being used as the build tool for Linux projects such as the `Yocto
|
||||
Project <https://www.yoctoproject.org>`__.
|
||||
|
||||
Prior to BitBake, no other build tool adequately met the needs of an
|
||||
aspiring embedded Linux distribution. All of the build systems used by
|
||||
@@ -536,7 +537,7 @@ current working directory:
|
||||
- ``pn-buildlist``: Shows a simple list of targets that are to be
|
||||
built.
|
||||
|
||||
To stop depending on common depends, use the ``-I`` depend option and
|
||||
To stop depending on common depends, use the "-I" depend option and
|
||||
BitBake omits them from the graph. Leaving this information out can
|
||||
produce more readable graphs. This way, you can remove from the graph
|
||||
:term:`DEPENDS` from inherited classes such as ``base.bbclass``.
|
||||
|
||||
@@ -104,15 +104,15 @@ Line Joining
|
||||
|
||||
Outside of :ref:`functions <bitbake-user-manual/bitbake-user-manual-metadata:functions>`,
|
||||
BitBake joins any line ending in
|
||||
a backslash character ("\\") with the following line before parsing
|
||||
statements. The most common use for the "\\" character is to split
|
||||
a backslash character ("\") with the following line before parsing
|
||||
statements. The most common use for the "\" character is to split
|
||||
variable assignments over multiple lines, as in the following example::
|
||||
|
||||
FOO = "bar \
|
||||
baz \
|
||||
qaz"
|
||||
|
||||
Both the "\\" character and the newline
|
||||
Both the "\" character and the newline
|
||||
character that follow it are removed when joining lines. Thus, no
|
||||
newline characters end up in the value of ``FOO``.
|
||||
|
||||
@@ -125,7 +125,7 @@ Consider this additional example where the two assignments both assign
|
||||
|
||||
.. note::
|
||||
|
||||
BitBake does not interpret escape sequences like "\\n" in variable
|
||||
BitBake does not interpret escape sequences like "\n" in variable
|
||||
values. For these to have an effect, the value must be passed to some
|
||||
utility that interprets escape sequences, such as
|
||||
``printf`` or ``echo -n``.
|
||||
@@ -159,7 +159,7 @@ behavior::
|
||||
C = "qux"
|
||||
*At this point, ${A} equals "qux bar baz"*
|
||||
B = "norf"
|
||||
*At this point, ${A} equals "norf baz"*
|
||||
*At this point, ${A} equals "norf baz"\*
|
||||
|
||||
Contrast this behavior with the
|
||||
:ref:`bitbake-user-manual/bitbake-user-manual-metadata:immediate variable
|
||||
@@ -195,45 +195,22 @@ value. However, if ``A`` is not set, the variable is set to "aval".
|
||||
Setting a weak default value (??=)
|
||||
----------------------------------
|
||||
|
||||
The weak default value of a variable is the value which that variable
|
||||
will expand to if no value has been assigned to it via any of the other
|
||||
assignment operators. The "??=" operator takes effect immediately, replacing
|
||||
any previously defined weak default value. Here is an example::
|
||||
It is possible to use a "weaker" assignment than in the previous section
|
||||
by using the "??=" operator. This assignment behaves identical to "?="
|
||||
except that the assignment is made at the end of the parsing process
|
||||
rather than immediately. Consequently, when multiple "??=" assignments
|
||||
exist, the last one is used. Also, any "=" or "?=" assignment will
|
||||
override the value set with "??=". Here is an example::
|
||||
|
||||
W ??= "x"
|
||||
A := "${W}" # Immediate variable expansion
|
||||
W ??= "y"
|
||||
B := "${W}" # Immediate variable expansion
|
||||
W ??= "z"
|
||||
C = "${W}"
|
||||
W ?= "i"
|
||||
A ??= "somevalue"
|
||||
A ??= "someothervalue"
|
||||
|
||||
After parsing we will have::
|
||||
If ``A`` is set before the above statements are
|
||||
parsed, the variable retains its value. If ``A`` is not set, the
|
||||
variable is set to "someothervalue".
|
||||
|
||||
A = "x"
|
||||
B = "y"
|
||||
C = "i"
|
||||
W = "i"
|
||||
|
||||
Appending and prepending non-override style will not substitute the weak
|
||||
default value, which means that after parsing::
|
||||
|
||||
W ??= "x"
|
||||
W += "y"
|
||||
|
||||
we will have::
|
||||
|
||||
W = " y"
|
||||
|
||||
On the other hand, override-style appends/prepends/removes are applied after
|
||||
any active weak default value has been substituted::
|
||||
|
||||
W ??= "x"
|
||||
W:append = "y"
|
||||
|
||||
After parsing we will have::
|
||||
|
||||
W = "xy"
|
||||
Again, this assignment is a "lazy" or "weak" assignment because it does
|
||||
not occur until the end of the parsing process.
|
||||
|
||||
Immediate variable expansion (:=)
|
||||
---------------------------------
|
||||
@@ -533,8 +510,8 @@ variable.
|
||||
|
||||
.. note::
|
||||
|
||||
Overrides can only use lower-case characters, digits and dashes.
|
||||
In particular, colons are not permitted in override names as they are used to
|
||||
Overrides can only use lower-case characters. Additionally,
|
||||
underscores are not permitted in override names as they are used to
|
||||
separate overrides from each other and from the variable name.
|
||||
|
||||
- *Selecting a Variable:* The :term:`OVERRIDES` variable is a
|
||||
@@ -546,14 +523,14 @@ variable.
|
||||
|
||||
OVERRIDES = "architecture:os:machine"
|
||||
TEST = "default"
|
||||
TEST:os = "osspecific"
|
||||
TEST:nooverride = "othercondvalue"
|
||||
TEST_os = "osspecific"
|
||||
TEST_nooverride = "othercondvalue"
|
||||
|
||||
In this example, the :term:`OVERRIDES`
|
||||
variable lists three overrides: "architecture", "os", and "machine".
|
||||
The variable ``TEST`` by itself has a default value of "default". You
|
||||
select the os-specific version of the ``TEST`` variable by appending
|
||||
the "os" override to the variable (i.e. ``TEST:os``).
|
||||
the "os" override to the variable (i.e. ``TEST_os``).
|
||||
|
||||
To better understand this, consider a practical example that assumes
|
||||
an OpenEmbedded metadata-based Linux kernel recipe file. The
|
||||
@@ -590,7 +567,7 @@ variable.
|
||||
- *Setting a Variable for a Single Task:* BitBake supports setting a
|
||||
variable just for the duration of a single task. Here is an example::
|
||||
|
||||
FOO:task-configure = "val 1"
|
||||
FOO_task-configure = "val 1"
|
||||
FOO:task-compile = "val 2"
|
||||
|
||||
In the
|
||||
@@ -608,16 +585,6 @@ variable.
|
||||
|
||||
EXTRA_OEMAKE:prepend:task-compile = "${PARALLEL_MAKE} "
|
||||
|
||||
.. note::
|
||||
|
||||
Before BitBake 1.52 (Honister 3.4), the syntax for :term:`OVERRIDES`
|
||||
used ``_`` instead of ``:``, so you will still find a lot of documentation
|
||||
using ``_append``, ``_prepend``, and ``_remove``, for example.
|
||||
|
||||
For details, see the
|
||||
:yocto_docs:`Overrides Syntax Changes </migration-guides/migration-3.4.html#override-syntax-changes>`
|
||||
section in the Yocto Project manual migration notes.
|
||||
|
||||
Key Expansion
|
||||
-------------
|
||||
|
||||
@@ -927,7 +894,7 @@ Regardless of the type of function, you can only define them in class
|
||||
Shell Functions
|
||||
---------------
|
||||
|
||||
Functions written in shell script are executed either directly as
|
||||
Functions written in shell script and executed either directly as
|
||||
functions, tasks, or both. They can also be called by other shell
|
||||
functions. Here is an example shell function definition::
|
||||
|
||||
@@ -977,7 +944,7 @@ Running ``do_foo`` prints the following::
|
||||
Overrides and override-style operators can be applied to any shell
|
||||
function, not just :ref:`tasks <bitbake-user-manual/bitbake-user-manual-metadata:tasks>`.
|
||||
|
||||
You can use the ``bitbake -e recipename`` command to view the final
|
||||
You can use the ``bitbake -e`` recipename command to view the final
|
||||
assembled function after all overrides have been applied.
|
||||
|
||||
BitBake-Style Python Functions
|
||||
@@ -1029,7 +996,7 @@ Running ``do_foo`` prints the following::
|
||||
recipename do_foo: second
|
||||
recipename do_foo: third
|
||||
|
||||
You can use the ``bitbake -e recipename`` command to view
|
||||
You can use the ``bitbake -e`` recipename command to view
|
||||
the final assembled function after all overrides have been applied.
|
||||
|
||||
Python Functions
|
||||
@@ -1376,8 +1343,8 @@ the build machine cannot influence the build.
|
||||
.. note::
|
||||
|
||||
By default, BitBake cleans the environment to include only those
|
||||
things exported or listed in its passthrough list to ensure that the
|
||||
build environment is reproducible and consistent. You can prevent this
|
||||
things exported or listed in its whitelist to ensure that the build
|
||||
environment is reproducible and consistent. You can prevent this
|
||||
"cleaning" by setting the :term:`BB_PRESERVE_ENV` variable.
|
||||
|
||||
Consequently, if you do want something to get passed into the build task
|
||||
@@ -1385,14 +1352,14 @@ environment, you must take these two steps:
|
||||
|
||||
#. Tell BitBake to load what you want from the environment into the
|
||||
datastore. You can do so through the
|
||||
:term:`BB_ENV_PASSTHROUGH` and
|
||||
:term:`BB_ENV_PASSTHROUGH_ADDITIONS` variables. For
|
||||
:term:`BB_ENV_WHITELIST` and
|
||||
:term:`BB_ENV_EXTRAWHITE` variables. For
|
||||
example, assume you want to prevent the build system from accessing
|
||||
your ``$HOME/.ccache`` directory. The following command adds the
|
||||
the environment variable ``CCACHE_DIR`` to BitBake's passthrough
|
||||
list to allow that variable into the datastore::
|
||||
your ``$HOME/.ccache`` directory. The following command "whitelists"
|
||||
the environment variable ``CCACHE_DIR`` causing BitBake to allow that
|
||||
variable into the datastore::
|
||||
|
||||
export BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS CCACHE_DIR"
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE CCACHE_DIR"
|
||||
|
||||
#. Tell BitBake to export what you have loaded into the datastore to the
|
||||
task environment of every running task. Loading something from the
|
||||
@@ -1409,7 +1376,7 @@ environment, you must take these two steps:
|
||||
A side effect of the previous steps is that BitBake records the
|
||||
variable as a dependency of the build process in things like the
|
||||
setscene checksums. If doing so results in unnecessary rebuilds of
|
||||
tasks, you can also flag the variable so that the setscene code
|
||||
tasks, you can whitelist the variable so that the setscene code
|
||||
ignores the dependency when it creates checksums.
|
||||
|
||||
Sometimes, it is useful to be able to obtain information from the
|
||||
@@ -1681,8 +1648,8 @@ user interfaces:
|
||||
|
||||
.. _variants-class-extension-mechanism:
|
||||
|
||||
Variants --- Class Extension Mechanism
|
||||
======================================
|
||||
Variants - Class Extension Mechanism
|
||||
====================================
|
||||
|
||||
BitBake supports multiple incarnations of a recipe file via the
|
||||
:term:`BBCLASSEXTEND` variable.
|
||||
@@ -1922,15 +1889,6 @@ looking at the source code of the ``bb`` module, which is in
|
||||
the commonly used functions ``bb.utils.contains()`` and
|
||||
``bb.utils.mkdirhier()``, which come with docstrings.
|
||||
|
||||
Testing and Debugging BitBake Python code
|
||||
-----------------------------------------
|
||||
|
||||
The OpenEmbedded build system implements a convenient ``pydevshell`` target which
|
||||
you can use to access the BitBake datastore and experiment with your own Python
|
||||
code. See :yocto_docs:`Using a Python Development Shell
|
||||
</dev-manual/common-tasks.html#using-a-python-development-shell>` in the Yocto
|
||||
Project manual for details.
|
||||
|
||||
Task Checksums and Setscene
|
||||
===========================
|
||||
|
||||
@@ -1963,6 +1921,12 @@ The following list describes related variables:
|
||||
Specifies a function BitBake calls that determines whether BitBake
|
||||
requires a setscene dependency to be met.
|
||||
|
||||
- :term:`BB_STAMP_POLICY`: Defines the mode
|
||||
for comparing timestamps of stamp files.
|
||||
|
||||
- :term:`BB_STAMP_WHITELIST`: Lists stamp
|
||||
files that are looked at when the stamp policy is "whitelist".
|
||||
|
||||
- :term:`BB_TASKHASH`: Within an executing task,
|
||||
this variable holds the hash of the task as returned by the currently
|
||||
enabled signature generator.
|
||||
|
||||
@@ -93,13 +93,6 @@ overview of their function and contents.
|
||||
fetcher does not attempt to use the host listed in :term:`SRC_URI` after
|
||||
a successful fetch from the :term:`PREMIRRORS` occurs.
|
||||
|
||||
:term:`BB_BASEHASH_IGNORE_VARS`
|
||||
Lists variables that are excluded from checksum and dependency data.
|
||||
Variables that are excluded can therefore change without affecting
|
||||
the checksum mechanism. A common example would be the variable for
|
||||
the path of the build. BitBake's output should not (and usually does
|
||||
not) depend on the directory in which it was built.
|
||||
|
||||
:term:`BB_CHECK_SSL_CERTS`
|
||||
Specifies if SSL certificates should be checked when fetching. The default
|
||||
value is ``1`` and certificates are not checked if the value is set to ``0``.
|
||||
@@ -145,7 +138,7 @@ overview of their function and contents.
|
||||
where:
|
||||
|
||||
<action> is:
|
||||
HALT: Immediately halt the build when
|
||||
ABORT: Immediately abort the build when
|
||||
a threshold is broken.
|
||||
STOPTASKS: Stop the build after the currently
|
||||
executing tasks have finished when
|
||||
@@ -176,13 +169,13 @@ overview of their function and contents.
|
||||
|
||||
Here are some examples::
|
||||
|
||||
BB_DISKMON_DIRS = "HALT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
|
||||
BB_DISKMON_DIRS = "ABORT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
|
||||
BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G"
|
||||
BB_DISKMON_DIRS = "HALT,${TMPDIR},,100K"
|
||||
BB_DISKMON_DIRS = "ABORT,${TMPDIR},,100K"
|
||||
|
||||
The first example works only if you also set the
|
||||
:term:`BB_DISKMON_WARNINTERVAL`
|
||||
variable. This example causes the build system to immediately halt
|
||||
variable. This example causes the build system to immediately abort
|
||||
when either the disk space in ``${TMPDIR}`` drops below 1 Gbyte or
|
||||
the available free inodes drops below 100 Kbytes. Because two
|
||||
directories are provided with the variable, the build system also
|
||||
@@ -196,7 +189,7 @@ overview of their function and contents.
|
||||
directory drops below 1 Gbyte. No disk monitoring occurs for the free
|
||||
inodes in this case.
|
||||
|
||||
The final example immediately halts the build when the number of
|
||||
The final example immediately aborts the build when the number of
|
||||
free inodes in the ``${TMPDIR}`` directory drops below 100 Kbytes. No
|
||||
disk space monitoring for the directory itself occurs in this case.
|
||||
|
||||
@@ -243,23 +236,23 @@ overview of their function and contents.
|
||||
based on the interval occur each time a respective interval is
|
||||
reached beyond the initial warning (i.e. 1 Gbytes and 100 Kbytes).
|
||||
|
||||
:term:`BB_ENV_PASSTHROUGH`
|
||||
Specifies the internal list of variables to allow through from
|
||||
the external environment into BitBake's datastore. If the value of
|
||||
this variable is not specified (which is the default), the following
|
||||
list is used: :term:`BBPATH`, :term:`BB_PRESERVE_ENV`,
|
||||
:term:`BB_ENV_PASSTHROUGH`, and :term:`BB_ENV_PASSTHROUGH_ADDITIONS`.
|
||||
:term:`BB_ENV_EXTRAWHITE`
|
||||
Specifies an additional set of variables to allow through (whitelist)
|
||||
from the external environment into BitBake's datastore. This list of
|
||||
variables are on top of the internal list set in
|
||||
:term:`BB_ENV_WHITELIST`.
|
||||
|
||||
.. note::
|
||||
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_ENV_PASSTHROUGH_ADDITIONS`
|
||||
Specifies an additional set of variables to allow through from the
|
||||
external environment into BitBake's datastore. This list of variables
|
||||
are on top of the internal list set in
|
||||
:term:`BB_ENV_PASSTHROUGH`.
|
||||
:term:`BB_ENV_WHITELIST`
|
||||
Specifies the internal whitelist of variables to allow through from
|
||||
the external environment into BitBake's datastore. If the value of
|
||||
this variable is not specified (which is the default), the following
|
||||
list is used: :term:`BBPATH`, :term:`BB_PRESERVE_ENV`,
|
||||
:term:`BB_ENV_WHITELIST`, and :term:`BB_ENV_EXTRAWHITE`.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -288,61 +281,12 @@ overview of their function and contents.
|
||||
|
||||
BB_GENERATE_MIRROR_TARBALLS = "1"
|
||||
|
||||
:term:`BB_GENERATE_SHALLOW_TARBALLS`
|
||||
Setting this variable to "1" when :term:`BB_GIT_SHALLOW` is also set to
|
||||
"1" causes bitbake to generate shallow mirror tarballs when fetching git
|
||||
repositories. The number of commits included in the shallow mirror
|
||||
tarballs is controlled by :term:`BB_GIT_SHALLOW_DEPTH`.
|
||||
|
||||
If both :term:`BB_GIT_SHALLOW` and :term:`BB_GENERATE_MIRROR_TARBALLS` are
|
||||
enabled, bitbake will generate shallow mirror tarballs by default for git
|
||||
repositories. This separate variable exists so that shallow tarball
|
||||
generation can be enabled without needing to also enable normal mirror
|
||||
generation if it is not desired.
|
||||
|
||||
For example usage, see :term:`BB_GIT_SHALLOW`.
|
||||
|
||||
:term:`BB_GIT_SHALLOW`
|
||||
Setting this variable to "1" enables the support for fetching, using and
|
||||
generating mirror tarballs of `shallow git repositories <https://riptutorial.com/git/example/4584/shallow-clone>`_.
|
||||
The external `git-make-shallow <https://git.openembedded.org/bitbake/tree/bin/git-make-shallow>`_
|
||||
script is used for shallow mirror tarball creation.
|
||||
|
||||
When :term:`BB_GIT_SHALLOW` is enabled, bitbake will attempt to fetch a shallow
|
||||
mirror tarball. If the shallow mirror tarball cannot be fetched, it will
|
||||
try to fetch the full mirror tarball and use that.
|
||||
|
||||
When a mirror tarball is not available, a full git clone will be performed
|
||||
regardless of whether this variable is set or not. Support for shallow
|
||||
clones is not currently implemented as git does not directly support
|
||||
shallow cloning a particular git commit hash (it only supports cloning
|
||||
from a tag or branch reference).
|
||||
|
||||
See also :term:`BB_GIT_SHALLOW_DEPTH` and
|
||||
:term:`BB_GENERATE_SHALLOW_TARBALLS`.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_GIT_SHALLOW ?= "1"
|
||||
|
||||
# Keep only the top commit
|
||||
BB_GIT_SHALLOW_DEPTH ?= "1"
|
||||
|
||||
# This defaults to enabled if both BB_GIT_SHALLOW and
|
||||
# BB_GENERATE_MIRROR_TARBALLS are enabled
|
||||
BB_GENERATE_SHALLOW_TARBALLS ?= "1"
|
||||
|
||||
:term:`BB_GIT_SHALLOW_DEPTH`
|
||||
When used with :term:`BB_GENERATE_SHALLOW_TARBALLS`, this variable sets
|
||||
the number of commits to include in generated shallow mirror tarballs.
|
||||
With a depth of 1, only the commit referenced in :term:`SRCREV` is
|
||||
included in the shallow mirror tarball. Increasing the depth includes
|
||||
additional parent commits, working back through the commit history.
|
||||
|
||||
If this variable is unset, bitbake will default to a depth of 1 when
|
||||
generating shallow mirror tarballs.
|
||||
|
||||
For example usage, see :term:`BB_GIT_SHALLOW`.
|
||||
:term:`BB_HASHBASE_WHITELIST`
|
||||
Lists variables that are excluded from checksum and dependency data.
|
||||
Variables that are excluded can therefore change without affecting
|
||||
the checksum mechanism. A common example would be the variable for
|
||||
the path of the build. BitBake's output should not (and usually does
|
||||
not) depend on the directory in which it was built.
|
||||
|
||||
:term:`BB_HASHCHECK_FUNCTION`
|
||||
Specifies the name of the function to call during the "setscene" part
|
||||
@@ -358,7 +302,7 @@ overview of their function and contents.
|
||||
However, the more accurate the data returned, the more efficient the
|
||||
build will be.
|
||||
|
||||
:term:`BB_HASHCONFIG_IGNORE_VARS`
|
||||
:term:`BB_HASHCONFIG_WHITELIST`
|
||||
Lists variables that are excluded from base configuration checksum,
|
||||
which is used to determine if the cache can be reused.
|
||||
|
||||
@@ -374,35 +318,12 @@ overview of their function and contents.
|
||||
Specifies the Hash Equivalence server to use.
|
||||
|
||||
If set to ``auto``, BitBake automatically starts its own server
|
||||
over a UNIX domain socket. An option is to connect this server
|
||||
to an upstream one, by setting :term:`BB_HASHSERVE_UPSTREAM`.
|
||||
over a UNIX domain socket.
|
||||
|
||||
If set to ``unix://path``, BitBake will connect to an existing
|
||||
hash server available over a UNIX domain socket.
|
||||
|
||||
If set to ``host:port``, BitBake will connect to a remote server on the
|
||||
If set to ``host:port``, BitBake will use a remote server on the
|
||||
specified host. This allows multiple clients to share the same
|
||||
hash equivalence data.
|
||||
|
||||
The remote server can be started manually through
|
||||
the ``bin/bitbake-hashserv`` script provided by BitBake,
|
||||
which supports UNIX domain sockets too. This script also allows
|
||||
to start the server in read-only mode, to avoid accepting
|
||||
equivalences that correspond to Share State caches that are
|
||||
only available on specific clients.
|
||||
|
||||
:term:`BB_HASHSERVE_UPSTREAM`
|
||||
Specifies an upstream Hash Equivalence server.
|
||||
|
||||
This optional setting is only useful when a local Hash Equivalence
|
||||
server is started (setting :term:`BB_HASHSERVE` to ``auto``),
|
||||
and you wish the local server to query an upstream server for
|
||||
Hash Equivalence data.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
re-parsing the base metadata (i.e. all the recipes). The
|
||||
@@ -426,19 +347,6 @@ overview of their function and contents.
|
||||
If you want to force log files to take a specific name, you can set this
|
||||
variable in a configuration file.
|
||||
|
||||
:term:`BB_MULTI_PROVIDER_ALLOWED`
|
||||
Allows you to suppress BitBake warnings caused when building two
|
||||
separate recipes that provide the same output.
|
||||
|
||||
BitBake normally issues a warning when building two different recipes
|
||||
where each provides the same output. This scenario is usually
|
||||
something the user does not want. However, cases do exist where it
|
||||
makes sense, particularly in the ``virtual/*`` namespace. You can use
|
||||
this variable to suppress BitBake's warnings.
|
||||
|
||||
To use the variable, list provider names (e.g. recipe names,
|
||||
``virtual/kernel``, and so forth).
|
||||
|
||||
:term:`BB_NICE_LEVEL`
|
||||
Allows BitBake to run at a specific priority (i.e. nice level).
|
||||
System permissions usually mean that BitBake can reduce its priority
|
||||
@@ -465,9 +373,8 @@ overview of their function and contents.
|
||||
|
||||
:term:`BB_ORIGENV`
|
||||
Contains a copy of the original external environment in which BitBake
|
||||
was run. The copy is taken before any variable values configured to
|
||||
pass through from the external environment are filtered into BitBake's
|
||||
datastore.
|
||||
was run. The copy is taken before any whitelisted variable values are
|
||||
filtered into BitBake's datastore.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -475,39 +382,14 @@ overview of their function and contents.
|
||||
queried using the normal datastore operations.
|
||||
|
||||
:term:`BB_PRESERVE_ENV`
|
||||
Disables environment filtering and instead allows all variables through
|
||||
from the external environment into BitBake's datastore.
|
||||
Disables whitelisting and instead allows all variables through from
|
||||
the external environment into BitBake's datastore.
|
||||
|
||||
.. note::
|
||||
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_PRESSURE_MAX_CPU`
|
||||
The threshold for maximum CPU pressure before BitBake prevents the
|
||||
scheduling of new tasks. Once the :term:`BB_PRESSURE_MAX_CPU` threshold
|
||||
is exceeded, new tasks are not started until the pressure subsides to
|
||||
below the threshold. If :term:`BB_PRESSURE_MAX_CPU` is not set, CPU
|
||||
pressure is not monitored. A threshold can be set in ``conf/local.conf``
|
||||
as::
|
||||
|
||||
BB_PRESSURE_MAX_CPU = "500"
|
||||
|
||||
:term:`BB_PRESSURE_MAX_IO`
|
||||
The threshold for maximum IO pressure experienced before BitBake
|
||||
prevents the scheduling of new tasks. The IO pressure is regulated in the
|
||||
same way as :term:`BB_PRESSURE_MAX_CPU`. At this point in time,
|
||||
experiments show that IO pressure tends to be short-lived and regulating
|
||||
just the CPU can help to reduce it.
|
||||
|
||||
:term:`BB_PRESSURE_MAX_MEMORY`
|
||||
The threshold for maximum memory pressure experienced before BitBake
|
||||
prevents the scheduling of new tasks. The memory pressure is regulated in
|
||||
the same way as :term:`BB_PRESSURE_MAX_CPU`. Note that any memory
|
||||
pressure indicates that a system is being pushed beyond its capacity. At
|
||||
this point in time, experiments show that memory pressure tends to be
|
||||
short-lived and regulating just the CPU can help to reduce it.
|
||||
|
||||
:term:`BB_RUNFMT`
|
||||
Specifies the name of the executable script files (i.e. run files)
|
||||
saved into ``${``\ :term:`T`\ ``}``. By default, the
|
||||
@@ -528,14 +410,14 @@ overview of their function and contents.
|
||||
Selects the name of the scheduler to use for the scheduling of
|
||||
BitBake tasks. Three options exist:
|
||||
|
||||
- *basic* --- the basic framework from which everything derives. Using
|
||||
- *basic* - The basic framework from which everything derives. Using
|
||||
this option causes tasks to be ordered numerically as they are
|
||||
parsed.
|
||||
|
||||
- *speed* --- executes tasks first that have more tasks depending on
|
||||
- *speed* - Executes tasks first that have more tasks depending on
|
||||
them. The "speed" option is the default.
|
||||
|
||||
- *completion* --- causes the scheduler to try to complete a given
|
||||
- *completion* - Causes the scheduler to try to complete a given
|
||||
recipe once its build has started.
|
||||
|
||||
:term:`BB_SCHEDULERS`
|
||||
@@ -582,12 +464,35 @@ overview of their function and contents.
|
||||
|
||||
The variable can be set using one of two policies:
|
||||
|
||||
- *cache* --- retains the value the system obtained previously rather
|
||||
- *cache* - Retains the value the system obtained previously rather
|
||||
than querying the source control system each time.
|
||||
|
||||
- *clear* --- queries the source controls system every time. With this
|
||||
- *clear* - Queries the source controls system every time. With this
|
||||
policy, there is no cache. The "clear" policy is the default.
|
||||
|
||||
:term:`BB_STAMP_POLICY`
|
||||
Defines the mode used for how timestamps of stamp files are compared.
|
||||
You can set the variable to one of the following modes:
|
||||
|
||||
- *perfile* - Timestamp comparisons are only made between timestamps
|
||||
of a specific recipe. This is the default mode.
|
||||
|
||||
- *full* - Timestamp comparisons are made for all dependencies.
|
||||
|
||||
- *whitelist* - Identical to "full" mode except timestamp
|
||||
comparisons are made for recipes listed in the
|
||||
:term:`BB_STAMP_WHITELIST` variable.
|
||||
|
||||
.. note::
|
||||
|
||||
Stamp policies are largely obsolete with the introduction of
|
||||
setscene tasks.
|
||||
|
||||
:term:`BB_STAMP_WHITELIST`
|
||||
Lists files whose stamp file timestamps are compared when the stamp
|
||||
policy mode is set to "whitelist". For information on stamp policies,
|
||||
see the :term:`BB_STAMP_POLICY` variable.
|
||||
|
||||
:term:`BB_STRICT_CHECKSUM`
|
||||
Sets a more strict checksum mechanism for non-local URLs. Setting
|
||||
this variable to a value causes BitBake to report an error if it
|
||||
@@ -711,7 +616,7 @@ overview of their function and contents.
|
||||
This variable is useful in situations where the same recipe appears
|
||||
in more than one layer. Setting this variable allows you to
|
||||
prioritize a layer against other layers that contain the same recipe
|
||||
--- effectively letting you control the precedence for the multiple
|
||||
- effectively letting you control the precedence for the multiple
|
||||
layers. The precedence established through this variable stands
|
||||
regardless of a recipe's version (:term:`PV` variable).
|
||||
For example, a layer that has a recipe with a higher :term:`PV` value but
|
||||
@@ -773,7 +678,7 @@ overview of their function and contents.
|
||||
"
|
||||
|
||||
This next example shows an error message that occurs because invalid
|
||||
entries are found, which cause parsing to fail::
|
||||
entries are found, which cause parsing to abort::
|
||||
|
||||
ERROR: BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:
|
||||
/work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend
|
||||
@@ -1092,6 +997,19 @@ overview of their function and contents.
|
||||
upstream source, and then locations specified by :term:`MIRRORS` in that
|
||||
order.
|
||||
|
||||
:term:`MULTI_PROVIDER_WHITELIST`
|
||||
Allows you to suppress BitBake warnings caused when building two
|
||||
separate recipes that provide the same output.
|
||||
|
||||
BitBake normally issues a warning when building two different recipes
|
||||
where each provides the same output. This scenario is usually
|
||||
something the user does not want. However, cases do exist where it
|
||||
makes sense, particularly in the ``virtual/*`` namespace. You can use
|
||||
this variable to suppress BitBake's warnings.
|
||||
|
||||
To use the variable, list provider names (e.g. recipe names,
|
||||
``virtual/kernel``, and so forth).
|
||||
|
||||
:term:`OVERRIDES`
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
@@ -1205,10 +1123,10 @@ overview of their function and contents.
|
||||
your configuration::
|
||||
|
||||
PREMIRRORS:prepend = "\
|
||||
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
http://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
https://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
|
||||
git://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
ftp://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
http://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
https://.*/.* http://www.yoctoproject.org/sources/ \n"
|
||||
|
||||
These changes cause the build system to intercept Git, FTP, HTTP, and
|
||||
HTTPS requests and direct them to the ``http://`` sources mirror. You can
|
||||
@@ -1356,106 +1274,70 @@ overview of their function and contents.
|
||||
The section in which packages should be categorized.
|
||||
|
||||
:term:`SRC_URI`
|
||||
The list of source files --- local or remote. This variable tells
|
||||
The list of source files - local or remote. This variable tells
|
||||
BitBake which bits to pull for the build and how to pull them. For
|
||||
example, if the recipe or append file needs to fetch a single tarball
|
||||
from the Internet, the recipe or append file uses a :term:`SRC_URI`
|
||||
entry that specifies that tarball. On the other hand, if the recipe or
|
||||
append file needs to fetch a tarball, apply two patches, and include
|
||||
a custom file, the recipe or append file needs an :term:`SRC_URI`
|
||||
variable that specifies all those sources.
|
||||
from the Internet, the recipe or append file uses a :term:`SRC_URI` entry
|
||||
that specifies that tarball. On the other hand, if the recipe or
|
||||
append file needs to fetch a tarball and include a custom file, the
|
||||
recipe or append file needs an :term:`SRC_URI` variable that specifies
|
||||
all those sources.
|
||||
|
||||
The following list explains the available URI protocols. URI
|
||||
protocols are highly dependent on particular BitBake Fetcher
|
||||
submodules. Depending on the fetcher BitBake uses, various URL
|
||||
parameters are employed. For specifics on the supported Fetchers, see
|
||||
the :ref:`bitbake-user-manual/bitbake-user-manual-fetching:fetchers`
|
||||
section.
|
||||
The following list explains the available URI protocols:
|
||||
|
||||
- ``az://``: Fetches files from an Azure Storage account using HTTPS.
|
||||
- ``file://`` : Fetches files, which are usually files shipped
|
||||
with the metadata, from the local machine. The path is relative to
|
||||
the :term:`FILESPATH` variable.
|
||||
|
||||
- ``bzr://``: Fetches files from a Bazaar revision control
|
||||
- ``bzr://`` : Fetches files from a Bazaar revision control
|
||||
repository.
|
||||
|
||||
- ``ccrc://``: Fetches files from a ClearCase repository.
|
||||
|
||||
- ``cvs://``: Fetches files from a CVS revision control
|
||||
- ``git://`` : Fetches files from a Git revision control
|
||||
repository.
|
||||
|
||||
- ``file://``: Fetches files, which are usually files shipped
|
||||
with the Metadata, from the local machine.
|
||||
The path is relative to the :term:`FILESPATH`
|
||||
variable. Thus, the build system searches, in order, from the
|
||||
following directories, which are assumed to be a subdirectories of
|
||||
the directory in which the recipe file (``.bb``) or append file
|
||||
(``.bbappend``) resides:
|
||||
|
||||
- ``${BPN}``: the base recipe name without any special suffix
|
||||
or version numbers.
|
||||
|
||||
- ``${BP}`` - ``${BPN}-${PV}``: the base recipe name and
|
||||
version but without any special package name suffix.
|
||||
|
||||
- ``files``: files within a directory, which is named ``files``
|
||||
and is also alongside the recipe or append file.
|
||||
|
||||
- ``ftp://``: Fetches files from the Internet using FTP.
|
||||
|
||||
- ``git://``: Fetches files from a Git revision control
|
||||
repository.
|
||||
|
||||
- ``gitsm://``: Fetches submodules from a Git revision control
|
||||
repository.
|
||||
|
||||
- ``hg://``: Fetches files from a Mercurial (``hg``) revision
|
||||
control repository.
|
||||
|
||||
- ``http://``: Fetches files from the Internet using HTTP.
|
||||
|
||||
- ``https://``: Fetches files from the Internet using HTTPS.
|
||||
|
||||
- ``npm://``: Fetches JavaScript modules from a registry.
|
||||
|
||||
- ``osc://``: Fetches files from an OSC (OpenSUSE Build service)
|
||||
- ``osc://`` : Fetches files from an OSC (OpenSUSE Build service)
|
||||
revision control repository.
|
||||
|
||||
- ``p4://``: Fetches files from a Perforce (``p4``) revision
|
||||
- ``repo://`` : Fetches files from a repo (Git) repository.
|
||||
|
||||
- ``http://`` : Fetches files from the Internet using HTTP.
|
||||
|
||||
- ``https://`` : Fetches files from the Internet using HTTPS.
|
||||
|
||||
- ``ftp://`` : Fetches files from the Internet using FTP.
|
||||
|
||||
- ``cvs://`` : Fetches files from a CVS revision control
|
||||
repository.
|
||||
|
||||
- ``hg://`` : Fetches files from a Mercurial (``hg``) revision
|
||||
control repository.
|
||||
|
||||
- ``repo://``: Fetches files from a repo (Git) repository.
|
||||
|
||||
- ``ssh://``: Fetches files from a secure shell.
|
||||
|
||||
- ``svn://``: Fetches files from a Subversion (``svn``) revision
|
||||
- ``p4://`` : Fetches files from a Perforce (``p4``) revision
|
||||
control repository.
|
||||
|
||||
- ``ssh://`` : Fetches files from a secure shell.
|
||||
|
||||
- ``svn://`` : Fetches files from a Subversion (``svn``) revision
|
||||
control repository.
|
||||
|
||||
- ``az://`` : Fetches files from an Azure Storage account using HTTPS.
|
||||
|
||||
Here are some additional options worth mentioning:
|
||||
|
||||
- ``downloadfilename``: Specifies the filename used when storing
|
||||
the downloaded file.
|
||||
- ``unpack`` : Controls whether or not to unpack the file if it is
|
||||
an archive. The default action is to unpack the file.
|
||||
|
||||
- ``name``: Specifies a name to be used for association with
|
||||
:term:`SRC_URI` checksums or :term:`SRCREV` when you have more than one
|
||||
file or git repository specified in :term:`SRC_URI`. For example::
|
||||
|
||||
SRC_URI = "git://example.com/foo.git;branch=main;name=first \
|
||||
git://example.com/bar.git;branch=main;name=second \
|
||||
http://example.com/file.tar.gz;name=third"
|
||||
|
||||
SRCREV_first = "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15"
|
||||
SRCREV_second = "e242ed3bffccdf271b7fbaf34ed72d089537b42f"
|
||||
SRC_URI[third.sha256sum] = "13550350a8681c84c861aac2e5b440161c2b33a3e4f302ac680ca5b686de48de"
|
||||
|
||||
- ``subdir``: Places the file (or extracts its contents) into the
|
||||
- ``subdir`` : Places the file (or extracts its contents) into the
|
||||
specified subdirectory. This option is useful for unusual tarballs
|
||||
or other archives that do not have their files already in a
|
||||
subdirectory within the archive.
|
||||
|
||||
- ``subpath``: Limits the checkout to a specific subpath of the
|
||||
tree when using the Git fetcher is used.
|
||||
- ``name`` : Specifies a name to be used for association with
|
||||
:term:`SRC_URI` checksums when you have more than one file specified
|
||||
in :term:`SRC_URI`.
|
||||
|
||||
- ``unpack``: Controls whether or not to unpack the file if it is
|
||||
an archive. The default action is to unpack the file.
|
||||
- ``downloadfilename`` : Specifies the filename used when storing
|
||||
the downloaded file.
|
||||
|
||||
:term:`SRCDATE`
|
||||
The date of the source code used to build the package. This variable
|
||||
|
||||
@@ -1,74 +1,32 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
===========================
|
||||
Supported Release Manuals
|
||||
===========================
|
||||
|
||||
******************************
|
||||
Release Series 3.4 (honister)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`3.4 BitBake User Manual </3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.1 BitBake User Manual </3.4.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.2 BitBake User Manual </3.4.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
******************************
|
||||
Release Series 3.3 (hardknott)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`3.3 BitBake User Manual </3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.1 BitBake User Manual </3.3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.2 BitBake User Manual </3.3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.3 BitBake User Manual </3.3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.4 BitBake User Manual </3.3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.5 BitBake User Manual </3.3.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
=========================
|
||||
Current Release Manuals
|
||||
=========================
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
3.1 'dunfell' Release Series
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`3.1 BitBake User Manual </3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.1 BitBake User Manual </3.1.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.2 BitBake User Manual </3.1.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.3 BitBake User Manual </3.1.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.4 BitBake User Manual </3.1.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.5 BitBake User Manual </3.1.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.6 BitBake User Manual </3.1.6/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.7 BitBake User Manual </3.1.7/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.8 BitBake User Manual </3.1.8/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.9 BitBake User Manual </3.1.9/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.10 BitBake User Manual </3.1.10/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.11 BitBake User Manual </3.1.11/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.12 BitBake User Manual </3.1.12/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.13 BitBake User Manual </3.1.13/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.14 BitBake User Manual </3.1.14/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
==========================
|
||||
Outdated Release Manuals
|
||||
Previous Release Manuals
|
||||
==========================
|
||||
|
||||
*******************************
|
||||
Release Series 3.2 (gatesgarth)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`3.2 BitBake User Manual </3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.1 BitBake User Manual </3.2.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.2 BitBake User Manual </3.2.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.3 BitBake User Manual </3.2.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.4 BitBake User Manual </3.2.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 3.0 (zeus)
|
||||
3.0 'zeus' Release Series
|
||||
*************************
|
||||
|
||||
- :yocto_docs:`3.0 BitBake User Manual </3.0/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.0.1 BitBake User Manual </3.0.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.0.2 BitBake User Manual </3.0.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.0.3 BitBake User Manual </3.0.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.0.4 BitBake User Manual </3.0.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
****************************
|
||||
Release Series 2.7 (warrior)
|
||||
2.7 'warrior' Release Series
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`2.7 BitBake User Manual </2.7/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -78,7 +36,7 @@ Release Series 2.7 (warrior)
|
||||
- :yocto_docs:`2.7.4 BitBake User Manual </2.7.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 2.6 (thud)
|
||||
2.6 'thud' Release Series
|
||||
*************************
|
||||
|
||||
- :yocto_docs:`2.6 BitBake User Manual </2.6/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -88,16 +46,16 @@ Release Series 2.6 (thud)
|
||||
- :yocto_docs:`2.6.4 BitBake User Manual </2.6.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 2.5 (sumo)
|
||||
2.5 'sumo' Release Series
|
||||
*************************
|
||||
|
||||
- :yocto_docs:`2.5 Documentation </2.5>`
|
||||
- :yocto_docs:`2.5.1 Documentation </2.5.1>`
|
||||
- :yocto_docs:`2.5.2 Documentation </2.5.2>`
|
||||
- :yocto_docs:`2.5.3 Documentation </2.5.3>`
|
||||
- :yocto_docs:`2.5 BitBake User Manual </2.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`2.5.1 BitBake User Manual </2.5.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`2.5.2 BitBake User Manual </2.5.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`2.5.3 BitBake User Manual </2.5.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
**************************
|
||||
Release Series 2.4 (rocko)
|
||||
2.4 'rocko' Release Series
|
||||
**************************
|
||||
|
||||
- :yocto_docs:`2.4 BitBake User Manual </2.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -107,7 +65,7 @@ Release Series 2.4 (rocko)
|
||||
- :yocto_docs:`2.4.4 BitBake User Manual </2.4.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 2.3 (pyro)
|
||||
2.3 'pyro' Release Series
|
||||
*************************
|
||||
|
||||
- :yocto_docs:`2.3 BitBake User Manual </2.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -117,7 +75,7 @@ Release Series 2.3 (pyro)
|
||||
- :yocto_docs:`2.3.4 BitBake User Manual </2.3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
**************************
|
||||
Release Series 2.2 (morty)
|
||||
2.2 'morty' Release Series
|
||||
**************************
|
||||
|
||||
- :yocto_docs:`2.2 BitBake User Manual </2.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -126,7 +84,7 @@ Release Series 2.2 (morty)
|
||||
- :yocto_docs:`2.2.3 BitBake User Manual </2.2.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
****************************
|
||||
Release Series 2.1 (krogoth)
|
||||
2.1 'krogoth' Release Series
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`2.1 BitBake User Manual </2.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -135,7 +93,7 @@ Release Series 2.1 (krogoth)
|
||||
- :yocto_docs:`2.1.3 BitBake User Manual </2.1.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
***************************
|
||||
Release Series 2.0 (jethro)
|
||||
2.0 'jethro' Release Series
|
||||
***************************
|
||||
|
||||
- :yocto_docs:`1.9 BitBake User Manual </1.9/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -145,7 +103,7 @@ Release Series 2.0 (jethro)
|
||||
- :yocto_docs:`2.0.3 BitBake User Manual </2.0.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 1.8 (fido)
|
||||
1.8 'fido' Release Series
|
||||
*************************
|
||||
|
||||
- :yocto_docs:`1.8 BitBake User Manual </1.8/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -153,7 +111,7 @@ Release Series 1.8 (fido)
|
||||
- :yocto_docs:`1.8.2 BitBake User Manual </1.8.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
**************************
|
||||
Release Series 1.7 (dizzy)
|
||||
1.7 'dizzy' Release Series
|
||||
**************************
|
||||
|
||||
- :yocto_docs:`1.7 BitBake User Manual </1.7/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
@@ -162,7 +120,7 @@ Release Series 1.7 (dizzy)
|
||||
- :yocto_docs:`1.7.3 BitBake User Manual </1.7.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
**************************
|
||||
Release Series 1.6 (daisy)
|
||||
1.6 'daisy' Release Series
|
||||
**************************
|
||||
|
||||
- :yocto_docs:`1.6 BitBake User Manual </1.6/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Please Note:
|
||||
# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
|
||||
# Assign a file to __warn__ to get warnings about slow operations.
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.2.0"
|
||||
__version__ = "1.52.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 6, 0):
|
||||
@@ -60,10 +60,6 @@ class BBLoggerMixin(object):
|
||||
return
|
||||
if loglevel < bb.msg.loggerDefaultLogLevel:
|
||||
return
|
||||
|
||||
if not isinstance(level, int) or not isinstance(msg, str):
|
||||
mainlogger.warning("Invalid arguments in bbdebug: %s" % repr((level, msg,) + args))
|
||||
|
||||
return self.log(loglevel, msg, *args, **kwargs)
|
||||
|
||||
def plain(self, msg, *args, **kwargs):
|
||||
@@ -75,13 +71,6 @@ class BBLoggerMixin(object):
|
||||
def verbnote(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO + 2, msg, *args, **kwargs)
|
||||
|
||||
def warnonce(self, msg, *args, **kwargs):
|
||||
return self.log(logging.WARNING - 1, msg, *args, **kwargs)
|
||||
|
||||
def erroronce(self, msg, *args, **kwargs):
|
||||
return self.log(logging.ERROR - 1, msg, *args, **kwargs)
|
||||
|
||||
|
||||
Logger = logging.getLoggerClass()
|
||||
class BBLogger(Logger, BBLoggerMixin):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
@@ -168,15 +157,9 @@ def verbnote(*args):
|
||||
def warn(*args):
|
||||
mainlogger.warning(''.join(args))
|
||||
|
||||
def warnonce(*args):
|
||||
mainlogger.warnonce(''.join(args))
|
||||
|
||||
def error(*args, **kwargs):
|
||||
mainlogger.error(''.join(args), extra=kwargs)
|
||||
|
||||
def erroronce(*args):
|
||||
mainlogger.erroronce(''.join(args))
|
||||
|
||||
def fatal(*args, **kwargs):
|
||||
mainlogger.critical(''.join(args), extra=kwargs)
|
||||
raise BBHandledException()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -30,4 +28,4 @@ def chunkify(msg, max_chunk):
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection, ClientError, ServerError
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -31,17 +29,7 @@ class AsyncClient(object):
|
||||
|
||||
async def connect_unix(self, path):
|
||||
async def connect_sock():
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
# The socket must be opened synchronously so that CWD doesn't get
|
||||
# changed out from underneath us so we pass as a sock into asyncio
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
return await asyncio.open_unix_connection(sock=sock)
|
||||
return await asyncio.open_unix_connection(path)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
@@ -160,8 +148,14 @@ class Client(object):
|
||||
setattr(self, m, self._get_downcall_wrapper(downcall))
|
||||
|
||||
def connect_unix(self, path):
|
||||
self.loop.run_until_complete(self.client.connect_unix(path))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
@property
|
||||
def max_chunk(self):
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -153,13 +151,6 @@ class AsyncServer(object):
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
|
||||
@@ -20,7 +20,6 @@ import itertools
|
||||
import time
|
||||
import re
|
||||
import stat
|
||||
import datetime
|
||||
import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
@@ -570,6 +569,7 @@ exit $ret
|
||||
def _task_data(fn, task, d):
|
||||
localdata = bb.data.createCopy(d)
|
||||
localdata.setVar('BB_FILENAME', fn)
|
||||
localdata.setVar('BB_CURRENTTASK', task[3:])
|
||||
localdata.setVar('OVERRIDES', 'task-%s:%s' %
|
||||
(task[3:].replace('_', '-'), d.getVar('OVERRIDES', False)))
|
||||
localdata.finalize()
|
||||
@@ -583,7 +583,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
running it with its own local metadata, and with some useful variables set.
|
||||
"""
|
||||
if not d.getVarFlag(task, 'task', False):
|
||||
event.fire(TaskInvalid(task, fn, d), d)
|
||||
event.fire(TaskInvalid(task, d), d)
|
||||
logger.error("No such task: %s" % task)
|
||||
return 1
|
||||
|
||||
@@ -619,8 +619,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logorder = os.path.join(tempdir, 'log.task_order')
|
||||
try:
|
||||
with open(logorder, 'a') as logorderfile:
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f")
|
||||
logorderfile.write('{0} {1} ({2}): {3}\n'.format(timestamp, task, os.getpid(), logbase))
|
||||
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
|
||||
except OSError:
|
||||
logger.exception("Opening log file '%s'", logorder)
|
||||
pass
|
||||
@@ -716,23 +715,19 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.debug2("Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
except bb.BBHandledException:
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, True), localdata)
|
||||
return 1
|
||||
except (Exception, SystemExit) as exc:
|
||||
handled = False
|
||||
if isinstance(exc, bb.BBHandledException):
|
||||
handled = True
|
||||
|
||||
if quieterr:
|
||||
if not handled:
|
||||
logger.warning(repr(exc))
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
# If the output is already on stdout, we've printed the information in the
|
||||
# logs once already so don't duplicate
|
||||
if verboseStdoutLogging or handled:
|
||||
if verboseStdoutLogging:
|
||||
errprinted = True
|
||||
if not handled:
|
||||
logger.error(repr(exc))
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
|
||||
@@ -837,7 +832,11 @@ def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
|
||||
def clean_stamp(task, d, file_name = None):
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
@@ -848,14 +847,6 @@ def clean_stamp(task, d, file_name = None):
|
||||
if name.endswith('.taint'):
|
||||
continue
|
||||
os.unlink(name)
|
||||
return
|
||||
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
clean_stamp(task, d, file_name)
|
||||
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
# Remove the file and recreate to force timestamp
|
||||
|
||||
@@ -24,7 +24,6 @@ from collections.abc import Mapping
|
||||
import bb.utils
|
||||
from bb import PrefixLoggerAdapter
|
||||
import re
|
||||
import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
@@ -285,15 +284,36 @@ def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
Parse a recipe
|
||||
"""
|
||||
|
||||
chdir_back = False
|
||||
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
return bb_data
|
||||
# The ConfHandler first looks if there is a TOPDIR and if not
|
||||
# then it would call getcwd().
|
||||
# Previously, we chdir()ed to bbfile_loc, called the handler
|
||||
# and finally chdir()ed back, a couple of thousand times. We now
|
||||
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
|
||||
if not bb_data.getVar('TOPDIR', False):
|
||||
chdir_back = True
|
||||
bb_data.setVar('TOPDIR', bbfile_loc)
|
||||
try:
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
return bb_data
|
||||
except:
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
class NoCache(object):
|
||||
@@ -620,7 +640,7 @@ class Cache(NoCache):
|
||||
for f in flist:
|
||||
if not f:
|
||||
continue
|
||||
f, exist = f.rsplit(":", 1)
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
self.logger.debug2("%s's file checksum list file %s changed",
|
||||
fn, f)
|
||||
@@ -999,11 +1019,3 @@ class SimpleCache(object):
|
||||
p.dump([data, self.cacheversion])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
def copyfile(self, target):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
shutil.copy(self.cachefile, target)
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
@@ -11,13 +11,10 @@ import os
|
||||
import stat
|
||||
import bb.utils
|
||||
import logging
|
||||
import re
|
||||
from bb.cache import MultiProcessCache
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+')
|
||||
|
||||
# mtime cache (non-persistent)
|
||||
# based upon the assumption that files do not change during bitbake run
|
||||
class FileMtimeCache(object):
|
||||
@@ -53,7 +50,6 @@ class FileChecksumCache(MultiProcessCache):
|
||||
MultiProcessCache.__init__(self)
|
||||
|
||||
def get_checksum(self, f):
|
||||
f = os.path.normpath(f)
|
||||
entry = self.cachedata[0].get(f)
|
||||
cmtime = self.mtime_cache.cached_mtime(f)
|
||||
if entry:
|
||||
@@ -88,36 +84,22 @@ class FileChecksumCache(MultiProcessCache):
|
||||
return None
|
||||
return checksum
|
||||
|
||||
#
|
||||
# Changing the format of file-checksums is problematic as both OE and Bitbake have
|
||||
# knowledge of them. We need to encode a new piece of data, the portion of the path
|
||||
# we care about from a checksum perspective. This means that files that change subdirectory
|
||||
# are tracked by the task hashes. To do this, we do something horrible and put a "/./" into
|
||||
# the path. The filesystem handles it but it gives us a marker to know which subsection
|
||||
# of the path to cache.
|
||||
#
|
||||
def checksum_dir(pth):
|
||||
# Handle directories recursively
|
||||
if pth == "/":
|
||||
bb.fatal("Refusing to checksum /")
|
||||
pth = pth.rstrip("/")
|
||||
dirchecksums = []
|
||||
for root, dirs, files in os.walk(pth, topdown=True):
|
||||
[dirs.remove(d) for d in list(dirs) if d in localdirsexclude]
|
||||
for name in files:
|
||||
fullpth = os.path.join(root, name).replace(pth, os.path.join(pth, "."))
|
||||
fullpth = os.path.join(root, name)
|
||||
checksum = checksum_file(fullpth)
|
||||
if checksum:
|
||||
dirchecksums.append((fullpth, checksum))
|
||||
return dirchecksums
|
||||
|
||||
checksums = []
|
||||
for pth in filelist_regex.split(filelist):
|
||||
if not pth:
|
||||
continue
|
||||
pth = pth.strip()
|
||||
if not pth:
|
||||
continue
|
||||
for pth in filelist.split():
|
||||
exist = pth.split(":")[1]
|
||||
if exist == "False":
|
||||
continue
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -197,10 +195,6 @@ class BufferedLogger(Logger):
|
||||
self.target.handle(record)
|
||||
self.buffer = []
|
||||
|
||||
class DummyLogger():
|
||||
def flush(self):
|
||||
return
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
@@ -282,9 +276,7 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
self.execs = set()
|
||||
self.references = set()
|
||||
self._log = log
|
||||
# Defer init as expensive
|
||||
self.log = DummyLogger()
|
||||
self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, log)
|
||||
|
||||
self.unhandled_message = "in call of %s, argument '%s' is not a string literal"
|
||||
self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message)
|
||||
@@ -311,9 +303,6 @@ class PythonParser():
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
return
|
||||
|
||||
# Need to parse so take the hit on the real log buffer
|
||||
self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, self._log)
|
||||
|
||||
# We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though
|
||||
node = "\n" * int(lineno) + node
|
||||
code = compile(check_indent(str(node)), filename, "exec",
|
||||
@@ -332,11 +321,7 @@ class ShellParser():
|
||||
self.funcdefs = set()
|
||||
self.allexecs = set()
|
||||
self.execs = set()
|
||||
self._name = name
|
||||
self._log = log
|
||||
# Defer init as expensive
|
||||
self.log = DummyLogger()
|
||||
|
||||
self.log = BufferedLogger('BitBake.Data.%s' % name, logging.DEBUG, log)
|
||||
self.unhandled_template = "unable to handle non-literal command '%s'"
|
||||
self.unhandled_template = "while parsing %s, %s" % (name, self.unhandled_template)
|
||||
|
||||
@@ -355,9 +340,6 @@ class ShellParser():
|
||||
self.execs = set(codeparsercache.shellcacheextras[h].execs)
|
||||
return self.execs
|
||||
|
||||
# Need to parse so take the hit on the real log buffer
|
||||
self.log = BufferedLogger('BitBake.Data.%s' % self._name, logging.DEBUG, self._log)
|
||||
|
||||
self._parse_shell(value)
|
||||
self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Helper library to implement streaming compression and decompression using an
|
||||
@@ -51,7 +49,7 @@ def open_wrap(
|
||||
raise ValueError("Argument 'newline' not supported in binary mode")
|
||||
|
||||
file_mode = mode.replace("t", "")
|
||||
if isinstance(filename, (str, bytes, os.PathLike, int)):
|
||||
if isinstance(filename, (str, bytes, os.PathLike)):
|
||||
binary_file = cls(filename, file_mode, **kwargs)
|
||||
elif hasattr(filename, "read") or hasattr(filename, "write"):
|
||||
binary_file = cls(None, file_mode, fileobj=filename, **kwargs)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import sys, os, glob, os.path, re, time
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import sre_constants
|
||||
import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
@@ -158,9 +159,6 @@ class BBCooker:
|
||||
for f in featureSet:
|
||||
self.featureset.setFeature(f)
|
||||
|
||||
self.orig_syspath = sys.path.copy()
|
||||
self.orig_sysmodules = [*sys.modules]
|
||||
|
||||
self.configuration = bb.cookerdata.CookerConfiguration()
|
||||
|
||||
self.idleCallBackRegister = idleCallBackRegister
|
||||
@@ -168,15 +166,27 @@ class BBCooker:
|
||||
bb.debug(1, "BBCooker starting %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatcher = None
|
||||
self.confignotifier = None
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
bb.debug(1, "BBCooker pyinotify1 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatcher.bbseen = set()
|
||||
self.configwatcher.bbwatchedfiles = set()
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
bb.debug(1, "BBCooker pyinotify2 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
|
||||
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
bb.debug(1, "BBCooker pyinotify3 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.watcher.bbseen = set()
|
||||
self.watcher.bbwatchedfiles = set()
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
self.watcher = None
|
||||
self.notifier = None
|
||||
bb.debug(1, "BBCooker pyinotify complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
# If being called by something like tinfoil, we need to clean cached data
|
||||
# which may now be invalid
|
||||
@@ -189,7 +199,7 @@ class BBCooker:
|
||||
|
||||
self.inotify_modified_files = []
|
||||
|
||||
def _process_inotify_updates(server, cooker, halt):
|
||||
def _process_inotify_updates(server, cooker, abort):
|
||||
cooker.process_inotify_updates()
|
||||
return 1.0
|
||||
|
||||
@@ -227,29 +237,9 @@ class BBCooker:
|
||||
sys.stdout.flush()
|
||||
self.handlePRServ()
|
||||
|
||||
def setupConfigWatcher(self):
|
||||
if self.configwatcher:
|
||||
self.configwatcher.close()
|
||||
self.confignotifier = None
|
||||
self.configwatcher = None
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
self.configwatcher.bbseen = set()
|
||||
self.configwatcher.bbwatchedfiles = set()
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
|
||||
def setupParserWatcher(self):
|
||||
if self.watcher:
|
||||
self.watcher.close()
|
||||
self.notifier = None
|
||||
self.watcher = None
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
self.watcher.bbseen = set()
|
||||
self.watcher.bbwatchedfiles = set()
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
def process_inotify_updates(self):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
if n.check_events(timeout=0):
|
||||
# read notified events and enqeue them
|
||||
n.read_events()
|
||||
n.process_events()
|
||||
@@ -263,12 +253,6 @@ class BBCooker:
|
||||
return
|
||||
if not event.pathname in self.configwatcher.bbwatchedfiles:
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.configwatcher.bbseen:
|
||||
self.configwatcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.baseconfig_valid = False
|
||||
@@ -282,12 +266,6 @@ class BBCooker:
|
||||
if event.pathname.endswith("bitbake-cookerdaemon.log") \
|
||||
or event.pathname.endswith("bitbake.lock"):
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.watcher.bbseen:
|
||||
self.watcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.parsecache_valid = False
|
||||
@@ -352,13 +330,6 @@ class BBCooker:
|
||||
self.state = state.initial
|
||||
self.caches_array = []
|
||||
|
||||
sys.path = self.orig_syspath.copy()
|
||||
for mod in [*sys.modules]:
|
||||
if mod not in self.orig_sysmodules:
|
||||
del sys.modules[mod]
|
||||
|
||||
self.setupConfigWatcher()
|
||||
|
||||
# Need to preserve BB_CONSOLELOG over resets
|
||||
consolelog = None
|
||||
if hasattr(self, "data"):
|
||||
@@ -402,7 +373,6 @@ class BBCooker:
|
||||
for mc in self.databuilder.mcdata.values():
|
||||
mc.renameVar("__depends", "__base_depends")
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
|
||||
mc.setVar("__bbclasstype", "recipe")
|
||||
|
||||
self.baseconfig_valid = True
|
||||
self.parsecache_valid = False
|
||||
@@ -425,7 +395,7 @@ class BBCooker:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
@@ -535,7 +505,7 @@ class BBCooker:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
|
||||
def runCommands(self, server, data, halt):
|
||||
def runCommands(self, server, data, abort):
|
||||
"""
|
||||
Run any queued asynchronous command
|
||||
This is done by the idle handler so it runs in true context rather than
|
||||
@@ -585,8 +555,6 @@ class BBCooker:
|
||||
if not orig_tracking:
|
||||
self.enableDataTracking()
|
||||
self.reset()
|
||||
# reset() resets to the UI requested value so we have to redo this
|
||||
self.enableDataTracking()
|
||||
|
||||
def mc_base(p):
|
||||
if p.startswith('mc:'):
|
||||
@@ -610,7 +578,7 @@ class BBCooker:
|
||||
if pkgs_to_build[0] in set(ignore.split()):
|
||||
bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0])
|
||||
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.halt, allowincomplete=True)
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True)
|
||||
|
||||
mc = runlist[0][0]
|
||||
fn = runlist[0][3]
|
||||
@@ -639,7 +607,7 @@ class BBCooker:
|
||||
data.emit_env(env, envdata, True)
|
||||
logger.plain(env.getvalue())
|
||||
|
||||
# emit the metadata which isn't valid shell
|
||||
# emit the metadata which isnt valid shell
|
||||
for e in sorted(envdata.keys()):
|
||||
if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False):
|
||||
logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False))
|
||||
@@ -648,7 +616,7 @@ class BBCooker:
|
||||
self.disableDataTracking()
|
||||
self.reset()
|
||||
|
||||
def buildTaskData(self, pkgs_to_build, task, halt, allowincomplete=False):
|
||||
def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
@@ -695,7 +663,7 @@ class BBCooker:
|
||||
localdata = {}
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
|
||||
bb.data.expandKeys(localdata[mc])
|
||||
|
||||
@@ -744,18 +712,19 @@ class BBCooker:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
mcdeps |= set(taskdata[mc].get_mcdepends())
|
||||
new = False
|
||||
for k in mcdeps:
|
||||
if k in seen:
|
||||
continue
|
||||
l = k.split(':')
|
||||
depmc = l[2]
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
for mc in self.multiconfigs:
|
||||
for k in mcdeps:
|
||||
if k in seen:
|
||||
continue
|
||||
l = k.split(':')
|
||||
depmc = l[2]
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
@@ -768,7 +737,7 @@ class BBCooker:
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
|
||||
# We set halt to False here to prevent unbuildable targets raising
|
||||
# We set abort to False here to prevent unbuildable targets raising
|
||||
# an exception when we're just generating data
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
|
||||
|
||||
@@ -1277,15 +1246,15 @@ class BBCooker:
|
||||
except bb.utils.VersionStringException as vse:
|
||||
bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse)))
|
||||
if not res:
|
||||
parselog.debug3("Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver)
|
||||
parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver)
|
||||
continue
|
||||
else:
|
||||
parselog.debug3("Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec)
|
||||
parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec)
|
||||
continue
|
||||
parselog.debug3("Layer '%s' recommends layer '%s', so we are adding it", c, rec)
|
||||
parselog.debug(3,"Layer '%s' recommends layer '%s', so we are adding it", c, rec)
|
||||
collection_depends[c].append(rec)
|
||||
else:
|
||||
parselog.debug3("Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec)
|
||||
parselog.debug(3,"Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec)
|
||||
|
||||
# Recursively work out collection priorities based on dependencies
|
||||
def calc_layer_priority(collection):
|
||||
@@ -1297,7 +1266,7 @@ class BBCooker:
|
||||
if depprio > max_depprio:
|
||||
max_depprio = depprio
|
||||
max_depprio += 1
|
||||
parselog.debug("Calculated priority of layer %s as %d", collection, max_depprio)
|
||||
parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio)
|
||||
collection_priorities[collection] = max_depprio
|
||||
|
||||
# Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities
|
||||
@@ -1309,7 +1278,7 @@ class BBCooker:
|
||||
errors = True
|
||||
continue
|
||||
elif regex == "":
|
||||
parselog.debug("BBFILE_PATTERN_%s is empty" % c)
|
||||
parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c)
|
||||
cre = re.compile('^NULL$')
|
||||
errors = False
|
||||
else:
|
||||
@@ -1456,7 +1425,7 @@ class BBCooker:
|
||||
|
||||
# Setup taskdata structure
|
||||
taskdata = {}
|
||||
taskdata[mc] = bb.taskdata.TaskData(self.configuration.halt)
|
||||
taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort)
|
||||
taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item)
|
||||
|
||||
if quietlog:
|
||||
@@ -1472,11 +1441,11 @@ class BBCooker:
|
||||
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
|
||||
def buildFileIdle(server, rq, halt):
|
||||
def buildFileIdle(server, rq, abort):
|
||||
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == state.forceshutdown:
|
||||
if abort or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
@@ -1518,10 +1487,10 @@ class BBCooker:
|
||||
Attempt to build the targets specified
|
||||
"""
|
||||
|
||||
def buildTargetsIdle(server, rq, halt):
|
||||
def buildTargetsIdle(server, rq, abort):
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == state.forceshutdown:
|
||||
if abort or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
@@ -1564,7 +1533,7 @@ class BBCooker:
|
||||
|
||||
bb.event.fire(bb.event.BuildInit(packages), self.data)
|
||||
|
||||
taskdata, runlist = self.buildTaskData(targets, task, self.configuration.halt)
|
||||
taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort)
|
||||
|
||||
buildname = self.data.getVar("BUILDNAME", False)
|
||||
|
||||
@@ -1632,7 +1601,7 @@ class BBCooker:
|
||||
|
||||
if self.state in (state.shutdown, state.forceshutdown, state.error):
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.shutdown(clean=False, force = True)
|
||||
self.parser.final_cleanup()
|
||||
raise bb.BBHandledException()
|
||||
|
||||
@@ -1640,8 +1609,6 @@ class BBCooker:
|
||||
self.updateCacheSync()
|
||||
|
||||
if self.state != state.parsing and not self.parsecache_valid:
|
||||
self.setupParserWatcher()
|
||||
|
||||
bb.parse.siggen.reset(self.data)
|
||||
self.parseConfiguration ()
|
||||
if CookerFeatures.SEND_SANITYEVENTS in self.featureset:
|
||||
@@ -1678,7 +1645,7 @@ class BBCooker:
|
||||
self.state = state.parsing
|
||||
|
||||
if not self.parser.parse_next():
|
||||
collectlog.debug("parsing complete")
|
||||
collectlog.debug(1, "parsing complete")
|
||||
if self.parser.error:
|
||||
raise bb.BBHandledException()
|
||||
self.show_appends_with_no_recipes()
|
||||
@@ -1701,7 +1668,7 @@ class BBCooker:
|
||||
# Return a copy, don't modify the original
|
||||
pkgs_to_build = pkgs_to_build[:]
|
||||
|
||||
if not pkgs_to_build:
|
||||
if len(pkgs_to_build) == 0:
|
||||
raise NothingToBuild
|
||||
|
||||
ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split()
|
||||
@@ -1723,7 +1690,7 @@ class BBCooker:
|
||||
|
||||
if 'universe' in pkgs_to_build:
|
||||
parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.")
|
||||
parselog.debug("collating packages for \"universe\"")
|
||||
parselog.debug(1, "collating packages for \"universe\"")
|
||||
pkgs_to_build.remove('universe')
|
||||
for mc in self.multiconfigs:
|
||||
for t in self.recipecaches[mc].universe_target:
|
||||
@@ -1748,8 +1715,6 @@ class BBCooker:
|
||||
def post_serve(self):
|
||||
self.shutdown(force=True)
|
||||
prserv.serv.auto_shutdown()
|
||||
if hasattr(bb.parse, "siggen"):
|
||||
bb.parse.siggen.exit()
|
||||
if self.hashserv:
|
||||
self.hashserv.process.terminate()
|
||||
self.hashserv.process.join()
|
||||
@@ -1763,15 +1728,13 @@ class BBCooker:
|
||||
self.state = state.shutdown
|
||||
|
||||
if self.parser:
|
||||
self.parser.shutdown(clean=not force)
|
||||
self.parser.shutdown(clean=not force, force=force)
|
||||
self.parser.final_cleanup()
|
||||
|
||||
def finishcommand(self):
|
||||
self.state = state.initial
|
||||
|
||||
def reset(self):
|
||||
if hasattr(bb.parse, "siggen"):
|
||||
bb.parse.siggen.exit()
|
||||
self.initConfigurationData()
|
||||
self.handlePRServ()
|
||||
|
||||
@@ -1800,7 +1763,7 @@ class CookerCollectFiles(object):
|
||||
def __init__(self, priorities, mc=''):
|
||||
self.mc = mc
|
||||
self.bbappends = []
|
||||
# Priorities is a list of tuples, with the second element as the pattern.
|
||||
# Priorities is a list of tupples, with the second element as the pattern.
|
||||
# We need to sort the list with the longest pattern first, and so on to
|
||||
# the shortest. This allows nested layers to be properly evaluated.
|
||||
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
|
||||
@@ -1836,7 +1799,7 @@ class CookerCollectFiles(object):
|
||||
"""Collect all available .bb build files"""
|
||||
masked = 0
|
||||
|
||||
collectlog.debug("collecting .bb files")
|
||||
collectlog.debug(1, "collecting .bb files")
|
||||
|
||||
files = (config.getVar( "BBFILES") or "").split()
|
||||
|
||||
@@ -1844,10 +1807,10 @@ class CookerCollectFiles(object):
|
||||
files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] )
|
||||
config.setVar("BBFILES_PRIORITIZED", " ".join(files))
|
||||
|
||||
if not files:
|
||||
if not len(files):
|
||||
files = self.get_bbfiles()
|
||||
|
||||
if not files:
|
||||
if not len(files):
|
||||
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
@@ -1907,7 +1870,7 @@ class CookerCollectFiles(object):
|
||||
try:
|
||||
re.compile(mask)
|
||||
bbmasks.append(mask)
|
||||
except re.error:
|
||||
except sre_constants.error:
|
||||
collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
|
||||
|
||||
# Then validate the combined regular expressions. This should never
|
||||
@@ -1915,7 +1878,7 @@ class CookerCollectFiles(object):
|
||||
bbmask = "|".join(bbmasks)
|
||||
try:
|
||||
bbmask_compiled = re.compile(bbmask)
|
||||
except re.error:
|
||||
except sre_constants.error:
|
||||
collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
|
||||
bbmask = None
|
||||
|
||||
@@ -1923,7 +1886,7 @@ class CookerCollectFiles(object):
|
||||
bbappend = []
|
||||
for f in newfiles:
|
||||
if bbmask and bbmask_compiled.search(f):
|
||||
collectlog.debug("skipping masked file %s", f)
|
||||
collectlog.debug(1, "skipping masked file %s", f)
|
||||
masked += 1
|
||||
continue
|
||||
if f.endswith('.bb'):
|
||||
@@ -1931,7 +1894,7 @@ class CookerCollectFiles(object):
|
||||
elif f.endswith('.bbappend'):
|
||||
bbappend.append(f)
|
||||
else:
|
||||
collectlog.debug("skipping %s: unknown file extension", f)
|
||||
collectlog.debug(1, "skipping %s: unknown file extension", f)
|
||||
|
||||
# Build a list of .bbappend files for each .bb file
|
||||
for f in bbappend:
|
||||
@@ -2033,30 +1996,15 @@ class ParsingFailure(Exception):
|
||||
Exception.__init__(self, realexception, recipe)
|
||||
|
||||
class Parser(multiprocessing.Process):
|
||||
def __init__(self, jobs, results, quit, profile):
|
||||
def __init__(self, jobs, results, quit, init, profile):
|
||||
self.jobs = jobs
|
||||
self.results = results
|
||||
self.quit = quit
|
||||
self.init = init
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.context = bb.utils.get_context().copy()
|
||||
self.handlers = bb.event.get_class_handlers().copy()
|
||||
self.profile = profile
|
||||
self.queue_signals = False
|
||||
self.signal_received = []
|
||||
self.signal_threadlock = threading.Lock()
|
||||
|
||||
def catch_sig(self, signum, frame):
|
||||
if self.queue_signals:
|
||||
self.signal_received.append(signum)
|
||||
else:
|
||||
self.handle_sig(signum, frame)
|
||||
|
||||
def handle_sig(self, signum, frame):
|
||||
if signum == signal.SIGTERM:
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
elif signum == signal.SIGINT:
|
||||
signal.default_int_handler(signum, frame)
|
||||
|
||||
def run(self):
|
||||
|
||||
@@ -2076,48 +2024,36 @@ class Parser(multiprocessing.Process):
|
||||
prof.dump_stats(logfile)
|
||||
|
||||
def realrun(self):
|
||||
# Signal handling here is hard. We must not terminate any process or thread holding the write
|
||||
# lock for the event stream as it will not be released, ever, and things will hang.
|
||||
# Python handles signals in the main thread/process but they can be raised from any thread and
|
||||
# we want to defer processing of any SIGTERM/SIGINT signal until we're outside the critical section
|
||||
# and don't hold the lock (see server/process.py). We therefore always catch the signals (so any
|
||||
# new thread should also do so) and we defer handling but we handle with the local thread lock
|
||||
# held (a threading lock, not a multiprocessing one) so that no other thread in the process
|
||||
# can be in the critical section.
|
||||
signal.signal(signal.SIGTERM, self.catch_sig)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, self.catch_sig)
|
||||
bb.utils.set_process_name(multiprocessing.current_process().name)
|
||||
multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1)
|
||||
multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
|
||||
if self.init:
|
||||
self.init()
|
||||
|
||||
pending = []
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
self.quit.get_nowait()
|
||||
except queue.Empty:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
while True:
|
||||
try:
|
||||
self.quit.get_nowait()
|
||||
except queue.Empty:
|
||||
pass
|
||||
else:
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
break
|
||||
|
||||
if pending:
|
||||
result = pending.pop()
|
||||
else:
|
||||
try:
|
||||
job = self.jobs.pop()
|
||||
except IndexError:
|
||||
break
|
||||
result = self.parse(*job)
|
||||
# Clear the siggen cache after parsing to control memory usage, its huge
|
||||
bb.parse.siggen.postparsing_clean_cache()
|
||||
if pending:
|
||||
result = pending.pop()
|
||||
else:
|
||||
try:
|
||||
self.results.put(result, timeout=0.25)
|
||||
except queue.Full:
|
||||
pending.append(result)
|
||||
finally:
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
job = self.jobs.pop()
|
||||
except IndexError:
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
break
|
||||
result = self.parse(*job)
|
||||
# Clear the siggen cache after parsing to control memory usage, its huge
|
||||
bb.parse.siggen.postparsing_clean_cache()
|
||||
try:
|
||||
self.results.put(result, timeout=0.25)
|
||||
except queue.Full:
|
||||
pending.append(result)
|
||||
|
||||
def parse(self, mc, cache, filename, appends):
|
||||
try:
|
||||
@@ -2138,12 +2074,12 @@ class Parser(multiprocessing.Process):
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
return True, None, exc
|
||||
return True, exc
|
||||
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
|
||||
# and for example a worker thread doesn't just exit on its own in response to
|
||||
# a SystemExit event for example.
|
||||
except BaseException as exc:
|
||||
return True, None, ParsingFailure(exc, filename)
|
||||
return True, ParsingFailure(exc, filename)
|
||||
finally:
|
||||
bb.event.LogHandler.filter = origfilter
|
||||
|
||||
@@ -2194,6 +2130,13 @@ class CookerParser(object):
|
||||
self.processes = []
|
||||
if self.toparse:
|
||||
bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata)
|
||||
def init():
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
bb.utils.set_process_name(multiprocessing.current_process().name)
|
||||
multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1)
|
||||
multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
|
||||
|
||||
self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes)
|
||||
self.result_queue = multiprocessing.Queue()
|
||||
@@ -2203,14 +2146,14 @@ class CookerParser(object):
|
||||
self.jobs = chunkify(list(self.willparse), self.num_processes)
|
||||
|
||||
for i in range(0, self.num_processes):
|
||||
parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile)
|
||||
parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, init, self.cooker.configuration.profile)
|
||||
parser.start()
|
||||
self.process_names.append(parser.name)
|
||||
self.processes.append(parser)
|
||||
|
||||
self.results = itertools.chain(self.results, self.parse_generator())
|
||||
|
||||
def shutdown(self, clean=True):
|
||||
def shutdown(self, clean=True, force=False):
|
||||
if not self.toparse:
|
||||
return
|
||||
if self.haveshutdown:
|
||||
@@ -2224,8 +2167,6 @@ class CookerParser(object):
|
||||
self.total)
|
||||
|
||||
bb.event.fire(event, self.cfgdata)
|
||||
else:
|
||||
bb.error("Parsing halted due to errors, see error messages above")
|
||||
|
||||
for process in self.processes:
|
||||
self.parser_quit.put(None)
|
||||
@@ -2239,24 +2180,11 @@ class CookerParser(object):
|
||||
break
|
||||
|
||||
for process in self.processes:
|
||||
process.join(0.5)
|
||||
|
||||
for process in self.processes:
|
||||
if process.exitcode is None:
|
||||
os.kill(process.pid, signal.SIGINT)
|
||||
|
||||
for process in self.processes:
|
||||
process.join(0.5)
|
||||
|
||||
for process in self.processes:
|
||||
if process.exitcode is None:
|
||||
if force:
|
||||
process.join(.1)
|
||||
process.terminate()
|
||||
|
||||
for process in self.processes:
|
||||
process.join()
|
||||
# Added in 3.7, cleans up zombies
|
||||
if hasattr(process, "close"):
|
||||
process.close()
|
||||
else:
|
||||
process.join()
|
||||
|
||||
self.parser_quit.close()
|
||||
# Allow data left in the cancel queue to be discarded
|
||||
@@ -2306,10 +2234,14 @@ class CookerParser(object):
|
||||
result = self.result_queue.get(timeout=0.25)
|
||||
except queue.Empty:
|
||||
empty = True
|
||||
yield None, None, None
|
||||
pass
|
||||
else:
|
||||
empty = False
|
||||
yield result
|
||||
value = result[1]
|
||||
if isinstance(value, BaseException):
|
||||
raise value
|
||||
else:
|
||||
yield result
|
||||
|
||||
if not (self.parsed >= self.toparse):
|
||||
raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None)
|
||||
@@ -2320,31 +2252,24 @@ class CookerParser(object):
|
||||
parsed = None
|
||||
try:
|
||||
parsed, mc, result = next(self.results)
|
||||
if isinstance(result, BaseException):
|
||||
# Turn exceptions back into exceptions
|
||||
raise result
|
||||
if parsed is None:
|
||||
# Timeout, loop back through the main loop
|
||||
return True
|
||||
|
||||
except StopIteration:
|
||||
self.shutdown()
|
||||
return False
|
||||
except bb.BBHandledException as exc:
|
||||
self.error += 1
|
||||
logger.debug('Failed to parse recipe: %s' % exc.recipe)
|
||||
self.shutdown(clean=False)
|
||||
logger.error('Failed to parse recipe: %s' % exc.recipe)
|
||||
self.shutdown(clean=False, force=True)
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False)
|
||||
self.shutdown(clean=False, force=True)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
self.error += 1
|
||||
logger.error(str(exc))
|
||||
self.shutdown(clean=False)
|
||||
self.shutdown(clean=False, force=True)
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
self.error += 1
|
||||
@@ -2353,7 +2278,7 @@ class CookerParser(object):
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False)
|
||||
self.shutdown(clean=False, force=True)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
@@ -2365,7 +2290,7 @@ class CookerParser(object):
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
logger.error('Exception during parse: %s' % traceback.format_exc())
|
||||
self.shutdown(clean=False)
|
||||
self.shutdown(clean=False, force=True)
|
||||
return False
|
||||
|
||||
self.current += 1
|
||||
|
||||
@@ -57,7 +57,7 @@ class ConfigParameters(object):
|
||||
|
||||
def updateToServer(self, server, environment):
|
||||
options = {}
|
||||
for o in ["halt", "force", "invalidate_stamp",
|
||||
for o in ["abort", "force", "invalidate_stamp",
|
||||
"dry_run", "dump_signatures",
|
||||
"extra_assume_provided", "profile",
|
||||
"prefile", "postfile", "server_timeout",
|
||||
@@ -86,7 +86,7 @@ class ConfigParameters(object):
|
||||
action['msg'] = "Only one target can be used with the --environment option."
|
||||
elif self.options.buildfile and len(self.options.pkgs_to_build) > 0:
|
||||
action['msg'] = "No target should be used with the --environment and --buildfile options."
|
||||
elif self.options.pkgs_to_build:
|
||||
elif len(self.options.pkgs_to_build) > 0:
|
||||
action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build]
|
||||
else:
|
||||
action['action'] = ["showEnvironment", self.options.buildfile]
|
||||
@@ -124,7 +124,7 @@ class CookerConfiguration(object):
|
||||
self.prefile = []
|
||||
self.postfile = []
|
||||
self.cmd = None
|
||||
self.halt = True
|
||||
self.abort = True
|
||||
self.force = False
|
||||
self.profile = False
|
||||
self.nosetscene = False
|
||||
@@ -210,7 +210,7 @@ def findConfigFile(configfile, data):
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, bitbake would fall back to cwd.
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
def findTopdir():
|
||||
@@ -223,8 +223,11 @@ def findTopdir():
|
||||
layerconf = findConfigFile("bblayers.conf", d)
|
||||
if layerconf:
|
||||
return os.path.dirname(os.path.dirname(layerconf))
|
||||
|
||||
return os.path.abspath(os.getcwd())
|
||||
if bbpath:
|
||||
bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
|
||||
if bitbakeconf:
|
||||
return os.path.dirname(os.path.dirname(bitbakeconf))
|
||||
return None
|
||||
|
||||
class CookerDataBuilder(object):
|
||||
|
||||
@@ -247,14 +250,10 @@ class CookerDataBuilder(object):
|
||||
self.savedenv = bb.data.init()
|
||||
for k in cookercfg.env:
|
||||
self.savedenv.setVar(k, cookercfg.env[k])
|
||||
if k in bb.data_smart.bitbake_renamed_vars:
|
||||
bb.error('Shell environment variable %s has been renamed to %s' % (k, bb.data_smart.bitbake_renamed_vars[k]))
|
||||
bb.fatal("Exiting to allow enviroment variables to be corrected")
|
||||
|
||||
filtered_keys = bb.utils.approved_variables()
|
||||
bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
|
||||
self.basedata.setVar("BB_ORIGENV", self.savedenv)
|
||||
self.basedata.setVar("__bbclasstype", "global")
|
||||
|
||||
if worker:
|
||||
self.basedata.setVar("BB_WORKERCONTEXT", "1")
|
||||
@@ -262,12 +261,12 @@ class CookerDataBuilder(object):
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
def parseBaseConfiguration(self):
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker:
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.parse.init_parser(self.data)
|
||||
bb.codeparser.parser_cache_init(self.data)
|
||||
@@ -311,26 +310,6 @@ class CookerDataBuilder(object):
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException()
|
||||
|
||||
|
||||
# Handle obsolete variable names
|
||||
d = self.data
|
||||
renamedvars = d.getVarFlags('BB_RENAMED_VARIABLES') or {}
|
||||
renamedvars.update(bb.data_smart.bitbake_renamed_vars)
|
||||
issues = False
|
||||
for v in renamedvars:
|
||||
if d.getVar(v) != None or d.hasOverrides(v):
|
||||
issues = True
|
||||
loginfo = {}
|
||||
history = d.varhistory.get_variable_refs(v)
|
||||
for h in history:
|
||||
for line in history[h]:
|
||||
loginfo = {'file' : h, 'line' : line}
|
||||
bb.data.data_smart._print_rename_error(v, loginfo, renamedvars)
|
||||
if not history:
|
||||
bb.data.data_smart._print_rename_error(v, loginfo, renamedvars)
|
||||
if issues:
|
||||
raise bb.BBHandledException()
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
@@ -356,7 +335,7 @@ class CookerDataBuilder(object):
|
||||
|
||||
layerconf = self._findLayerConf(data)
|
||||
if layerconf:
|
||||
parselog.debug2("Found bblayers.conf (%s)", layerconf)
|
||||
parselog.debug(2, "Found bblayers.conf (%s)", layerconf)
|
||||
# By definition bblayers.conf is in conf/ of TOPDIR.
|
||||
# We may have been called with cwd somewhere else so reset TOPDIR
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
@@ -384,7 +363,7 @@ class CookerDataBuilder(object):
|
||||
raise bb.BBHandledException()
|
||||
|
||||
for layer in layers:
|
||||
parselog.debug2("Adding layer %s", layer)
|
||||
parselog.debug(2, "Adding layer %s", layer)
|
||||
if 'HOME' in approved and '~' in layer:
|
||||
layer = os.path.expanduser(layer)
|
||||
if layer.endswith('/'):
|
||||
@@ -438,9 +417,6 @@ class CookerDataBuilder(object):
|
||||
" invoked bitbake from the wrong directory?")
|
||||
raise SystemExit(msg)
|
||||
|
||||
if not data.getVar("TOPDIR"):
|
||||
data.setVar("TOPDIR", os.path.abspath(os.getcwd()))
|
||||
|
||||
data = parse_config_file(os.path.join("conf", "bitbake.conf"), data)
|
||||
|
||||
# Parse files for loading *after* bitbake.conf and any includes
|
||||
@@ -452,7 +428,7 @@ class CookerDataBuilder(object):
|
||||
for bbclass in bbclasses:
|
||||
data = _inherit(bbclass, data)
|
||||
|
||||
# Normally we only register event handlers at the end of parsing .bb files
|
||||
# Nomally we only register event handlers at the end of parsing .bb files
|
||||
# We register any handlers we've found so far here...
|
||||
for var in data.getVar('__BBHANDLERS', False) or []:
|
||||
handlerfn = data.getVarFlag(var, "filename", False)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -76,26 +74,26 @@ def createDaemon(function, logfile):
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
|
||||
with open(logfile, 'a+') as so:
|
||||
try:
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = so
|
||||
try:
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = open(logfile, 'a+')
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two separate buffers
|
||||
sys.stderr = sys.stdout
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
sys.stderr = sys.stdout
|
||||
|
||||
try:
|
||||
function()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
# os._exit() doesn't flush open files like os.exit() does. Manually flush
|
||||
# stdout and stderr so that any logging output will be seen, particularly
|
||||
# exception tracebacks.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os._exit(0)
|
||||
try:
|
||||
function()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
# os._exit() doesn't flush open files like os.exit() does. Manually flush
|
||||
# stdout and stderr so that any logging output will be seen, particularly
|
||||
# exception tracebacks.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os._exit(0)
|
||||
|
||||
@@ -272,37 +272,34 @@ def update_data(d):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
d.finalize(parent = True)
|
||||
|
||||
def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
deps = set()
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
if vf[1] == "vardepvalueexclude":
|
||||
return deps, ""
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
exclusions = varflags.get("vardepsexclude", "").split()
|
||||
|
||||
def handle_contains(value, contains, exclusions, d):
|
||||
newvalue = []
|
||||
if value:
|
||||
newvalue.append(str(value))
|
||||
def handle_contains(value, contains, d):
|
||||
newvalue = ""
|
||||
for k in sorted(contains):
|
||||
if k in exclusions or k in ignored_vars:
|
||||
continue
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue.append("\n%s{%s} = Unset" % (k, item))
|
||||
newvalue += "\n%s{%s} = Unset" % (k, item)
|
||||
break
|
||||
else:
|
||||
newvalue.append("\n%s{%s} = Set" % (k, item))
|
||||
return "".join(newvalue)
|
||||
newvalue += "\n%s{%s} = Set" % (k, item)
|
||||
if not newvalue:
|
||||
return value
|
||||
if not value:
|
||||
return newvalue
|
||||
return value + newvalue
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
@@ -321,7 +318,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, exclusions, d)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
else:
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
@@ -329,7 +326,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, exclusions, d)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
if hasattr(parsedvar, "removes"):
|
||||
value = handle_remove(value, deps, parsedvar.removes, d)
|
||||
if vardeps is None:
|
||||
@@ -344,7 +341,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
value, parser = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, exclusions, d)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
if hasattr(parser, "removes"):
|
||||
value = handle_remove(value, deps, parser.removes, d)
|
||||
|
||||
@@ -364,7 +361,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
deps |= set(varfdeps)
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(exclusions)
|
||||
deps -= set(varflags.get("vardepsexclude", "").split())
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
@@ -374,7 +371,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
|
||||
#d.setVarFlag(key, "vardeps", deps)
|
||||
|
||||
def generate_dependencies(d, ignored_vars):
|
||||
def generate_dependencies(d, whitelist):
|
||||
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
@@ -385,22 +382,22 @@ def generate_dependencies(d, ignored_vars):
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
for task in tasklist:
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
|
||||
newdeps = deps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps - ignored_vars
|
||||
nextdeps = newdeps - whitelist
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep not in deps:
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d)
|
||||
newdeps |= deps[dep]
|
||||
newdeps -= seen
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
return tasklist, deps, values
|
||||
|
||||
def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
@@ -409,11 +406,9 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = []
|
||||
else:
|
||||
data = [data]
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= ignored_vars
|
||||
gendeps[task] -= whitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
@@ -421,27 +416,27 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in ignored_vars:
|
||||
if dep in whitelist:
|
||||
continue
|
||||
gendeps[dep] -= ignored_vars
|
||||
gendeps[dep] -= whitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data.append(dep)
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data.append(str(var))
|
||||
data = data + str(var)
|
||||
k = fn + ":" + task
|
||||
basehash[k] = hashlib.sha256("".join(data).encode("utf-8")).hexdigest()
|
||||
basehash[k] = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
needle = '/%s.bbclass' % klass
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
for v in val:
|
||||
if v.endswith(needle):
|
||||
return True
|
||||
|
||||
@@ -33,18 +33,6 @@ __expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
|
||||
bitbake_renamed_vars = {
|
||||
"BB_ENV_WHITELIST": "BB_ENV_PASSTHROUGH",
|
||||
"BB_ENV_EXTRAWHITE": "BB_ENV_PASSTHROUGH_ADDITIONS",
|
||||
"BB_HASHBASE_WHITELIST": "BB_BASEHASH_IGNORE_VARS",
|
||||
"BB_HASHCONFIG_WHITELIST": "BB_HASHCONFIG_IGNORE_VARS",
|
||||
"BB_HASHTASK_WHITELIST": "BB_TASKHASH_IGNORE_TASKS",
|
||||
"BB_SETSCENE_ENFORCE_WHITELIST": "BB_SETSCENE_ENFORCE_IGNORE_TASKS",
|
||||
"MULTI_PROVIDER_WHITELIST": "BB_MULTI_PROVIDER_ALLOWED",
|
||||
"BB_STAMP_WHITELIST": "is a deprecated variable and support has been removed",
|
||||
"BB_STAMP_POLICY": "is a deprecated variable and support has been removed",
|
||||
}
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
"""Save the caller the trouble of specifying everything."""
|
||||
# Save effort.
|
||||
@@ -152,9 +140,6 @@ class DataContext(dict):
|
||||
self['d'] = metadata
|
||||
|
||||
def __missing__(self, key):
|
||||
# Skip commonly accessed invalid variables
|
||||
if key in ['bb', 'oe', 'int', 'bool', 'time', 'str', 'os']:
|
||||
raise KeyError(key)
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
@@ -351,16 +336,6 @@ class VariableHistory(object):
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def get_variable_refs(self, var):
|
||||
"""Return a dict of file/line references"""
|
||||
var_history = self.variable(var)
|
||||
refs = {}
|
||||
for event in var_history:
|
||||
if event['file'] not in refs:
|
||||
refs[event['file']] = []
|
||||
refs[event['file']].append(event['line'])
|
||||
return refs
|
||||
|
||||
def get_variable_items_files(self, var):
|
||||
"""
|
||||
Use variable history to map items added to a list variable and
|
||||
@@ -395,23 +370,6 @@ class VariableHistory(object):
|
||||
else:
|
||||
self.variables[var] = []
|
||||
|
||||
def _print_rename_error(var, loginfo, renamedvars, fullvar=None):
|
||||
info = ""
|
||||
if "file" in loginfo:
|
||||
info = " file: %s" % loginfo["file"]
|
||||
if "line" in loginfo:
|
||||
info += " line: %s" % loginfo["line"]
|
||||
if fullvar and fullvar != var:
|
||||
info += " referenced as: %s" % fullvar
|
||||
if info:
|
||||
info = " (%s)" % info.strip()
|
||||
renameinfo = renamedvars[var]
|
||||
if " " in renameinfo:
|
||||
# A space signals a string to display instead of a rename
|
||||
bb.erroronce('Variable %s %s%s' % (var, renameinfo, info))
|
||||
else:
|
||||
bb.erroronce('Variable %s has been renamed to %s%s' % (var, renameinfo, info))
|
||||
|
||||
class DataSmart(MutableMapping):
|
||||
def __init__(self):
|
||||
self.dict = {}
|
||||
@@ -419,8 +377,6 @@ class DataSmart(MutableMapping):
|
||||
self.inchistory = IncludeHistory()
|
||||
self.varhistory = VariableHistory(self)
|
||||
self._tracking = False
|
||||
self._var_renames = {}
|
||||
self._var_renames.update(bitbake_renamed_vars)
|
||||
|
||||
self.expand_cache = {}
|
||||
|
||||
@@ -532,26 +488,18 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
self.initVar(var)
|
||||
|
||||
def hasOverrides(self, var):
|
||||
return var in self.overridedata
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
|
||||
if not var.startswith("__anon_") and ("_append" in var or "_prepend" in var or "_remove" in var):
|
||||
if "_append" in var or "_prepend" in var or "_remove" in var:
|
||||
info = "%s" % var
|
||||
if "file" in loginfo:
|
||||
info += " file: %s" % loginfo["file"]
|
||||
if "line" in loginfo:
|
||||
info += " line: %s" % loginfo["line"]
|
||||
if "filename" in loginfo:
|
||||
info += " file: %s" % loginfo[filename]
|
||||
if "lineno" in loginfo:
|
||||
info += " line: %s" % loginfo[lineno]
|
||||
bb.fatal("Variable %s contains an operation using the old override syntax. Please convert this layer/metadata before attempting to use with a newer bitbake." % info)
|
||||
|
||||
shortvar = var.split(":", 1)[0]
|
||||
if shortvar in self._var_renames:
|
||||
_print_rename_error(shortvar, loginfo, self._var_renames, fullvar=var)
|
||||
# Mark that we have seen a renamed variable
|
||||
self.setVar("_FAILPARSINGERRORHANDLED", True)
|
||||
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
@@ -673,11 +621,10 @@ class DataSmart(MutableMapping):
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(newkey, val, ignore=True, parsing=True)
|
||||
|
||||
srcflags = self.getVarFlags(key, False, True) or {}
|
||||
for i in srcflags:
|
||||
if i not in (__setvar_keyword__):
|
||||
for i in (__setvar_keyword__):
|
||||
src = self.getVarFlag(key, i, False)
|
||||
if src is None:
|
||||
continue
|
||||
src = srcflags[i]
|
||||
|
||||
dest = self.getVarFlag(newkey, i, False) or []
|
||||
dest.extend(src)
|
||||
@@ -738,14 +685,6 @@ class DataSmart(MutableMapping):
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
self.expand_cache = {}
|
||||
|
||||
if var == "BB_RENAMED_VARIABLES":
|
||||
self._var_renames[flag] = value
|
||||
|
||||
if var in self._var_renames:
|
||||
_print_rename_error(var, loginfo, self._var_renames)
|
||||
# Mark that we have seen a renamed variable
|
||||
self.setVar("_FAILPARSINGERRORHANDLED", True)
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
loginfo['flag'] = flag
|
||||
@@ -871,7 +810,7 @@ class DataSmart(MutableMapping):
|
||||
expanded_removes[r] = self.expand(r).split()
|
||||
|
||||
parser.removes = set()
|
||||
val = []
|
||||
val = ""
|
||||
for v in __whitespace_split__.split(parser.value):
|
||||
skip = False
|
||||
for r in removes:
|
||||
@@ -880,8 +819,8 @@ class DataSmart(MutableMapping):
|
||||
skip = True
|
||||
if skip:
|
||||
continue
|
||||
val.append(v)
|
||||
parser.value = "".join(val)
|
||||
val = val + v
|
||||
parser.value = val
|
||||
if expand:
|
||||
value = parser.value
|
||||
|
||||
@@ -985,7 +924,6 @@ class DataSmart(MutableMapping):
|
||||
data.inchistory = self.inchistory.copy()
|
||||
|
||||
data._tracking = self._tracking
|
||||
data._var_renames = self._var_renames
|
||||
|
||||
data.overrides = None
|
||||
data.overridevars = copy.copy(self.overridevars)
|
||||
@@ -1008,7 +946,7 @@ class DataSmart(MutableMapping):
|
||||
value = self.getVar(variable, False)
|
||||
for key in keys:
|
||||
referrervalue = self.getVar(key, False)
|
||||
if referrervalue and isinstance(referrervalue, str) and ref in referrervalue:
|
||||
if referrervalue and ref in referrervalue:
|
||||
self.setVar(key, referrervalue.replace(ref, value))
|
||||
|
||||
def localkeys(self):
|
||||
@@ -1074,10 +1012,10 @@ class DataSmart(MutableMapping):
|
||||
d = self.createCopy()
|
||||
bb.data.expandKeys(d)
|
||||
|
||||
config_ignore_vars = set((d.getVar("BB_HASHCONFIG_IGNORE_VARS") or "").split())
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
|
||||
keys = set(key for key in iter(d) if not key.startswith("__"))
|
||||
for key in keys:
|
||||
if key in config_ignore_vars:
|
||||
if key in config_whitelist:
|
||||
continue
|
||||
|
||||
value = d.getVar(key, False) or ""
|
||||
|
||||
@@ -40,7 +40,7 @@ class HeartbeatEvent(Event):
|
||||
"""Triggered at regular time intervals of 10 seconds. Other events can fire much more often
|
||||
(runQueueTaskStarted when there are many short tasks) or not at all for long periods
|
||||
of time (again runQueueTaskStarted, when there is just one long-running task), so this
|
||||
event is more suitable for doing some task-independent work occasionally."""
|
||||
event is more suitable for doing some task-independent work occassionally."""
|
||||
def __init__(self, time):
|
||||
Event.__init__(self)
|
||||
self.time = time
|
||||
@@ -132,14 +132,8 @@ def print_ui_queue():
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.flush()
|
||||
except:
|
||||
pass
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
@@ -492,7 +486,7 @@ class BuildCompleted(BuildBase, OperationCompleted):
|
||||
BuildBase.__init__(self, n, p, failures)
|
||||
|
||||
class DiskFull(Event):
|
||||
"""Disk full case build halted"""
|
||||
"""Disk full case build aborted"""
|
||||
def __init__(self, dev, type, freespace, mountpoint):
|
||||
Event.__init__(self)
|
||||
self._dev = dev
|
||||
@@ -770,7 +764,7 @@ class LogHandler(logging.Handler):
|
||||
class MetadataEvent(Event):
|
||||
"""
|
||||
Generic event that target for OE-Core classes
|
||||
to report information during asynchronous execution
|
||||
to report information during asynchrous execution
|
||||
"""
|
||||
def __init__(self, eventtype, eventdata):
|
||||
Event.__init__(self)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
There are expectations of users of the fetcher code. This file attempts to document
|
||||
some of the constraints that are present. Some are obvious, some are less so. It is
|
||||
documented in the context of how OE uses it but the API calls are generic.
|
||||
|
||||
a) network access for sources is only expected to happen in the do_fetch task.
|
||||
This is not enforced or tested but is required so that we can:
|
||||
|
||||
i) audit the sources used (i.e. for license/manifest reasons)
|
||||
ii) support offline builds with a suitable cache
|
||||
iii) allow work to continue even with downtime upstream
|
||||
iv) allow for changes upstream in incompatible ways
|
||||
v) allow rebuilding of the software in X years time
|
||||
|
||||
b) network access is not expected in do_unpack task.
|
||||
|
||||
c) you can take DL_DIR and use it as a mirror for offline builds.
|
||||
|
||||
d) access to the network is only made when explicitly configured in recipes
|
||||
(e.g. use of AUTOREV, or use of git tags which change revision).
|
||||
|
||||
e) fetcher output is deterministic (i.e. if you fetch configuration XXX now it
|
||||
will match in future exactly in a clean build with a new DL_DIR).
|
||||
One specific pain point example are git tags. They can be replaced and change
|
||||
so the git fetcher has to resolve them with the network. We use git revisions
|
||||
where possible to avoid this and ensure determinism.
|
||||
|
||||
f) network access is expected to work with the standard linux proxy variables
|
||||
so that access behind firewalls works (the fetcher sets these in the
|
||||
environment but only in the do_fetch tasks).
|
||||
|
||||
g) access during parsing has to be minimal, a "git ls-remote" for an AUTOREV
|
||||
git recipe might be ok but you can't expect to checkout a git tree.
|
||||
|
||||
h) we need to provide revision information during parsing such that a version
|
||||
for the recipe can be constructed.
|
||||
|
||||
i) versions are expected to be able to increase in a way which sorts allowing
|
||||
package feeds to operate (see PR server required for git revisions to sort).
|
||||
|
||||
j) API to query for possible version upgrades of a url is highly desireable to
|
||||
allow our automated upgrage code to function (it is implied this does always
|
||||
have network access).
|
||||
|
||||
k) Where fixes or changes to behaviour in the fetcher are made, we ask that
|
||||
test cases are added (run with "bitbake-selftest bb.tests.fetch"). We do
|
||||
have fairly extensive test coverage of the fetcher as it is the only way
|
||||
to track all of its corner cases, it still doesn't give entire coverage
|
||||
though sadly.
|
||||
|
||||
l) If using tools during parse time, they will have to be in ASSUME_PROVIDED
|
||||
in OE's context as we can't build git-native, then parse a recipe and use
|
||||
git ls-remote.
|
||||
|
||||
Not all fetchers support all features, autorev is optional and doesn't make
|
||||
sense for some. Upgrade detection means different things in different contexts
|
||||
too.
|
||||
|
||||
@@ -113,7 +113,7 @@ class MissingParameterError(BBFetchException):
|
||||
self.args = (missing, url)
|
||||
|
||||
class ParameterError(BBFetchException):
|
||||
"""Exception raised when a url cannot be processed due to invalid parameters."""
|
||||
"""Exception raised when a url cannot be proccessed due to invalid parameters."""
|
||||
def __init__(self, message, url):
|
||||
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
|
||||
self.url = url
|
||||
@@ -182,7 +182,7 @@ class URI(object):
|
||||
Some notes about relative URIs: while it's specified that
|
||||
a URI beginning with <scheme>:// should either be directly
|
||||
followed by a hostname or a /, the old URI handling of the
|
||||
fetch2 library did not conform to this. Therefore, this URI
|
||||
fetch2 library did not comform to this. Therefore, this URI
|
||||
class has some kludges to make sure that URIs are parsed in
|
||||
a way comforming to bitbake's current usage. This URI class
|
||||
supports the following:
|
||||
@@ -199,7 +199,7 @@ class URI(object):
|
||||
file://hostname/absolute/path.diff (would be IETF compliant)
|
||||
|
||||
Note that the last case only applies to a list of
|
||||
explicitly allowed schemes (currently only file://), that requires
|
||||
"whitelisted" schemes (currently only file://), that requires
|
||||
its URIs to not have a network location.
|
||||
"""
|
||||
|
||||
@@ -402,24 +402,24 @@ def encodeurl(decoded):
|
||||
|
||||
if not type:
|
||||
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
|
||||
url = ['%s://' % type]
|
||||
url = '%s://' % type
|
||||
if user and type != "file":
|
||||
url.append("%s" % user)
|
||||
url += "%s" % user
|
||||
if pswd:
|
||||
url.append(":%s" % pswd)
|
||||
url.append("@")
|
||||
url += ":%s" % pswd
|
||||
url += "@"
|
||||
if host and type != "file":
|
||||
url.append("%s" % host)
|
||||
url += "%s" % host
|
||||
if path:
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
url.append("%s" % urllib.parse.quote(path))
|
||||
url += "%s" % urllib.parse.quote(path)
|
||||
if p:
|
||||
for parm in p:
|
||||
url.append(";%s=%s" % (parm, p[parm]))
|
||||
url += ";%s=%s" % (parm, p[parm])
|
||||
|
||||
return "".join(url)
|
||||
return url
|
||||
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
if not ud.url or not uri_find or not uri_replace:
|
||||
@@ -473,13 +473,10 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
basename = os.path.basename(ud.localpath)
|
||||
if basename:
|
||||
uri_basename = os.path.basename(uri_decoded[loc])
|
||||
# Prefix with a slash as a sentinel in case
|
||||
# result_decoded[loc] does not contain one.
|
||||
path = "/" + result_decoded[loc]
|
||||
if uri_basename and basename != uri_basename and path.endswith("/" + uri_basename):
|
||||
result_decoded[loc] = path[1:-len(uri_basename)] + basename
|
||||
elif not path.endswith("/" + basename):
|
||||
result_decoded[loc] = os.path.join(path[1:], basename)
|
||||
if uri_basename and basename != uri_basename and result_decoded[loc].endswith(uri_basename):
|
||||
result_decoded[loc] = result_decoded[loc].replace(uri_basename, basename)
|
||||
elif not result_decoded[loc].endswith(basename):
|
||||
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
|
||||
else:
|
||||
return None
|
||||
result = encodeurl(result_decoded)
|
||||
@@ -545,7 +542,7 @@ def mirror_from_string(data):
|
||||
bb.warn('Invalid mirror data %s, should have paired members.' % data)
|
||||
return list(zip(*[iter(mirrors)]*2))
|
||||
|
||||
def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True):
|
||||
def verify_checksum(ud, d, precomputed={}):
|
||||
"""
|
||||
verify the MD5 and SHA256 checksum for downloaded src
|
||||
|
||||
@@ -563,16 +560,13 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
if ud.ignore_checksums or not ud.method.supports_checksum(ud):
|
||||
return {}
|
||||
|
||||
if localpath is None:
|
||||
localpath = ud.localpath
|
||||
|
||||
def compute_checksum_info(checksum_id):
|
||||
checksum_name = getattr(ud, "%s_name" % checksum_id)
|
||||
|
||||
if checksum_id in precomputed:
|
||||
checksum_data = precomputed[checksum_id]
|
||||
else:
|
||||
checksum_data = getattr(bb.utils, "%s_file" % checksum_id)(localpath)
|
||||
checksum_data = getattr(bb.utils, "%s_file" % checksum_id)(ud.localpath)
|
||||
|
||||
checksum_expected = getattr(ud, "%s_expected" % checksum_id)
|
||||
|
||||
@@ -598,7 +592,7 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
checksum_lines = ["SRC_URI[%s] = \"%s\"" % (ci["name"], ci["data"])]
|
||||
|
||||
# If no checksum has been provided
|
||||
if fatal_nochecksum and ud.method.recommends_checksum(ud) and all(ci["expected"] is None for ci in checksum_infos):
|
||||
if ud.method.recommends_checksum(ud) and all(ci["expected"] is None for ci in checksum_infos):
|
||||
messages = []
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
|
||||
|
||||
@@ -630,7 +624,7 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
for ci in checksum_infos:
|
||||
if ci["expected"] and ci["expected"] != ci["data"]:
|
||||
messages.append("File: '%s' has %s checksum '%s' when '%s' was " \
|
||||
"expected" % (localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
"expected" % (ud.localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
bad_checksum = ci["data"]
|
||||
|
||||
if bad_checksum:
|
||||
@@ -768,7 +762,6 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
that fetcher provides a method with the given name and the same signature as sortable_revision.
|
||||
"""
|
||||
|
||||
d.setVar("__BBSEENSRCREV", "1")
|
||||
recursion = d.getVar("__BBINSRCREV")
|
||||
if recursion:
|
||||
raise FetchError("There are recursive references in fetcher variables, likely through SRC_URI")
|
||||
@@ -781,7 +774,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
if urldata[u].method.supports_srcrev():
|
||||
scms.append(u)
|
||||
|
||||
if not scms:
|
||||
if len(scms) == 0:
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
@@ -842,7 +835,6 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSH_COMMAND',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -855,17 +847,6 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_DEFAULT_REGION']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
newenv = {}
|
||||
origenv = d.getVar("BB_ORIGENV")
|
||||
for name in bb.fetch2.FETCH_EXPORT_VARS:
|
||||
value = d.getVar(name)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(name)
|
||||
if value:
|
||||
newenv[name] = value
|
||||
return newenv
|
||||
|
||||
def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
"""
|
||||
Run cmd returning the command output
|
||||
@@ -980,7 +961,6 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.ignore_checksums = True
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
@@ -1101,8 +1081,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
dirname = os.path.dirname(link_name)
|
||||
bb.utils.mkdirhier(dirname)
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
@@ -1226,21 +1204,23 @@ def get_checksum_file_list(d):
|
||||
SRC_URI as a space-separated string
|
||||
"""
|
||||
fetch = Fetch([], d, cache = False, localonly = True)
|
||||
|
||||
dl_dir = d.getVar('DL_DIR')
|
||||
filelist = []
|
||||
for u in fetch.urls:
|
||||
ud = fetch.ud[u]
|
||||
|
||||
if ud and isinstance(ud.method, local.Local):
|
||||
found = False
|
||||
paths = ud.method.localpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
found = True
|
||||
if f.startswith(dl_dir):
|
||||
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
|
||||
if os.path.exists(f):
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
|
||||
else:
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
if not found:
|
||||
bb.fatal(("Unable to get checksum for %s SRC_URI entry %s: file could not be found"
|
||||
"\nThe following paths were searched:"
|
||||
"\n%s") % (d.getVar('PN'), os.path.basename(f), '\n'.join(paths)))
|
||||
|
||||
return " ".join(filelist)
|
||||
|
||||
@@ -1478,33 +1458,30 @@ class FetchMethod(object):
|
||||
cmd = None
|
||||
|
||||
if unpack:
|
||||
tar_cmd = 'tar --extract --no-same-owner'
|
||||
if 'striplevel' in urldata.parm:
|
||||
tar_cmd += ' --strip-components=%s' % urldata.parm['striplevel']
|
||||
if file.endswith('.tar'):
|
||||
cmd = '%s -f %s' % (tar_cmd, file)
|
||||
cmd = 'tar x --no-same-owner -f %s' % file
|
||||
elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
|
||||
cmd = '%s -z -f %s' % (tar_cmd, file)
|
||||
cmd = 'tar xz --no-same-owner -f %s' % file
|
||||
elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
|
||||
cmd = 'bzip2 -dc %s | %s -f -' % (file, tar_cmd)
|
||||
cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
|
||||
cmd = 'gzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.bz2'):
|
||||
cmd = 'bzip2 -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.txz') or file.endswith('.tar.xz'):
|
||||
cmd = 'xz -dc %s | %s -f -' % (file, tar_cmd)
|
||||
cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.xz'):
|
||||
cmd = 'xz -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.tar.lz'):
|
||||
cmd = 'lzip -dc %s | %s -f -' % (file, tar_cmd)
|
||||
cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.lz'):
|
||||
cmd = 'lzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.tar.7z'):
|
||||
cmd = '7z x -so %s | %s -f -' % (file, tar_cmd)
|
||||
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
elif file.endswith('.tzst') or file.endswith('.tar.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s | %s -f -' % (file, tar_cmd)
|
||||
cmd = 'zstd --decompress --stdout %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s > %s' % (file, efile)
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
@@ -1537,7 +1514,7 @@ class FetchMethod(object):
|
||||
raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url)
|
||||
else:
|
||||
raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
|
||||
cmd = 'ar x %s %s && %s -p -f %s && rm %s' % (file, datafile, tar_cmd, datafile, datafile)
|
||||
cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
|
||||
|
||||
# If 'subdir' param exists, create a dir and use it as destination for unpack cmd
|
||||
if 'subdir' in urldata.parm:
|
||||
@@ -1663,7 +1640,7 @@ class Fetch(object):
|
||||
if localonly and cache:
|
||||
raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
|
||||
|
||||
if not urls:
|
||||
if len(urls) == 0:
|
||||
urls = d.getVar("SRC_URI").split()
|
||||
self.urls = urls
|
||||
self.d = d
|
||||
@@ -1752,9 +1729,7 @@ class Fetch(object):
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
|
||||
firsterr = None
|
||||
verified_stamp = False
|
||||
if done:
|
||||
verified_stamp = m.verify_donestamp(ud, self.d)
|
||||
verified_stamp = m.verify_donestamp(ud, self.d)
|
||||
if not done and (not verified_stamp or m.need_update(ud, self.d)):
|
||||
try:
|
||||
if not trusted_network(self.d, ud.url):
|
||||
@@ -1813,11 +1788,7 @@ class Fetch(object):
|
||||
|
||||
def checkstatus(self, urls=None):
|
||||
"""
|
||||
Check all URLs exist upstream.
|
||||
|
||||
Returns None if the URLs exist, raises FetchError if the check wasn't
|
||||
successful but there wasn't an error (such as file not found), and
|
||||
raises other exceptions in error cases.
|
||||
Check all urls exist upstream
|
||||
"""
|
||||
|
||||
if not urls:
|
||||
@@ -1962,7 +1933,6 @@ from . import clearcase
|
||||
from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -1983,4 +1953,3 @@ methods.append(clearcase.ClearCase())
|
||||
methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
|
||||
@@ -74,9 +74,6 @@ from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
|
||||
sha1_re = re.compile(r'^[0-9a-f]{40}$')
|
||||
slash_re = re.compile(r"/+")
|
||||
|
||||
class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
"""Extract progress information from git output"""
|
||||
def __init__(self, d):
|
||||
@@ -149,7 +146,6 @@ class Git(FetchMethod):
|
||||
# github stopped supporting git protocol
|
||||
# https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git
|
||||
ud.proto = "https"
|
||||
bb.warn("URL: %s uses git protocol which is no longer supported by github. Please change to ;protocol=https in the url." % ud.url)
|
||||
|
||||
if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'):
|
||||
raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
|
||||
@@ -173,10 +169,7 @@ class Git(FetchMethod):
|
||||
ud.nocheckout = 1
|
||||
|
||||
ud.unresolvedrev = {}
|
||||
branches = ud.parm.get("branch", "").split(',')
|
||||
if branches == [""] and not ud.nobranch:
|
||||
bb.warn("URL: %s does not set any branch parameter. The future default branch used by tools and repositories is uncertain and we will therefore soon require this is set in all git urls." % ud.url)
|
||||
branches = ["master"]
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
@@ -243,7 +236,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false -c core.pager=cat"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -252,8 +245,8 @@ class Git(FetchMethod):
|
||||
ud.setup_revisions(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revisions[name] or ''):
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
|
||||
if ud.revisions[name]:
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
@@ -262,10 +255,10 @@ class Git(FetchMethod):
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
# For a rebaseable git repo, it is necessary to keep a mirror tar ball
|
||||
# per revision, so that even if the revision disappears from the
|
||||
# for rebaseable git repo, it is necessary to keep mirror tar ball
|
||||
# per revision, so that even the revision disappears from the
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
# contain the revision
|
||||
# contains the revision
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
@@ -353,15 +346,10 @@ class Git(FetchMethod):
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and self.need_update(ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
else:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s " % (ud.basecmd, shlex.quote(tmpdir))
|
||||
runfetchcmd(fetch_cmd, d, workdir=ud.clonedir)
|
||||
elif os.path.exists(ud.fullmirror) and not os.path.exists(ud.clonedir):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
@@ -407,7 +395,7 @@ class Git(FetchMethod):
|
||||
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
# of all LFS blobs needed at the the srcrev.
|
||||
#
|
||||
# It would be nice to just do this inline here by running 'git-lfs fetch'
|
||||
# on the bare clonedir, but that operation requires a working copy on some
|
||||
@@ -470,10 +458,7 @@ class Git(FetchMethod):
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
@@ -535,24 +520,13 @@ class Git(FetchMethod):
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
|
||||
subdir = ud.parm.get("subdir")
|
||||
subpath = ud.parm.get("subpath")
|
||||
readpathspec = ""
|
||||
def_destsuffix = "git/"
|
||||
|
||||
if subpath:
|
||||
readpathspec = ":%s" % subpath
|
||||
def_destsuffix = "%s/" % os.path.basename(subpath.rstrip('/'))
|
||||
|
||||
if subdir:
|
||||
# If 'subdir' param exists, create a dir and use it as destination for unpack cmd
|
||||
if os.path.isabs(subdir):
|
||||
if not os.path.realpath(subdir).startswith(os.path.realpath(destdir)):
|
||||
raise bb.fetch2.UnpackError("subdir argument isn't a subdirectory of unpack root %s" % destdir, ud.url)
|
||||
destdir = subdir
|
||||
else:
|
||||
destdir = os.path.join(destdir, subdir)
|
||||
def_destsuffix = ""
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % subdir
|
||||
def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
|
||||
else:
|
||||
readpathspec = ""
|
||||
def_destsuffix = "git/"
|
||||
|
||||
destsuffix = ud.parm.get("destsuffix", def_destsuffix)
|
||||
destdir = ud.destdir = os.path.join(destdir, destsuffix)
|
||||
@@ -567,12 +541,13 @@ class Git(FetchMethod):
|
||||
source_found = False
|
||||
source_error = []
|
||||
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
if not source_found:
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
|
||||
if not source_found:
|
||||
if ud.shallow:
|
||||
@@ -598,7 +573,7 @@ class Git(FetchMethod):
|
||||
bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
if subdir != "":
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d,
|
||||
workdir=destdir)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir)
|
||||
@@ -704,6 +679,7 @@ class Git(FetchMethod):
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
slash_re = re.compile(r"/+")
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev[name]
|
||||
|
||||
def _lsremote(self, ud, d, search):
|
||||
@@ -736,12 +712,6 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSEENSRCREV"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.get_autorev(d)
|
||||
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead:
|
||||
|
||||
@@ -88,7 +88,7 @@ class GitSM(Git):
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
@@ -115,9 +115,6 @@ class GitSM(Git):
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
if "{}{}".format(ud.host, ud.path) in url:
|
||||
raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \
|
||||
"Consider using git fetcher instead.")
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
@@ -166,7 +163,7 @@ class GitSM(Git):
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
if len(need_update_list) > 0:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
return True
|
||||
|
||||
|
||||
@@ -57,6 +57,12 @@ class Local(FetchMethod):
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
logger.debug2("Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
return searched
|
||||
return searched
|
||||
|
||||
def need_update(self, ud, d):
|
||||
@@ -72,6 +78,8 @@ class Local(FetchMethod):
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
locations.append(d.getVar("DL_DIR"))
|
||||
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
|
||||
@@ -52,13 +52,9 @@ def npm_filename(package, version):
|
||||
"""Get the filename of a npm package"""
|
||||
return npm_package(package) + "-" + version + ".tgz"
|
||||
|
||||
def npm_localfile(package, version=None):
|
||||
def npm_localfile(package, version):
|
||||
"""Get the local filename of a npm package"""
|
||||
if version is not None:
|
||||
filename = npm_filename(package, version)
|
||||
else:
|
||||
filename = package
|
||||
return os.path.join("npm2", filename)
|
||||
return os.path.join("npm2", npm_filename(package, version))
|
||||
|
||||
def npm_integrity(integrity):
|
||||
"""
|
||||
@@ -76,19 +72,23 @@ def npm_unpack(tarball, destdir, d):
|
||||
cmd += " --delay-directory-restore"
|
||||
cmd += " --strip-components=1"
|
||||
runfetchcmd(cmd, d, workdir=destdir)
|
||||
runfetchcmd("chmod -R +X '%s'" % (destdir), d, quiet=True, workdir=destdir)
|
||||
runfetchcmd("chmod -R +X %s" % (destdir), d, quiet=True, workdir=destdir)
|
||||
|
||||
class NpmEnvironment(object):
|
||||
"""
|
||||
Using a npm config file seems more reliable than using cli arguments.
|
||||
This class allows to create a controlled environment for npm commands.
|
||||
"""
|
||||
def __init__(self, d, configs=[], npmrc=None):
|
||||
def __init__(self, d, configs=None, npmrc=None):
|
||||
self.d = d
|
||||
|
||||
self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1)
|
||||
for key, value in configs:
|
||||
self.user_config.write("%s=%s\n" % (key, value))
|
||||
if configs:
|
||||
self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1)
|
||||
self.user_config_name = self.user_config.name
|
||||
for key, value in configs:
|
||||
self.user_config.write("%s=%s\n" % (key, value))
|
||||
else:
|
||||
self.user_config_name = "/dev/null"
|
||||
|
||||
if npmrc:
|
||||
self.global_config_name = npmrc
|
||||
@@ -109,7 +109,7 @@ class NpmEnvironment(object):
|
||||
workdir = tmpdir
|
||||
|
||||
def _run(cmd):
|
||||
cmd = "NPM_CONFIG_USERCONFIG=%s " % (self.user_config.name) + cmd
|
||||
cmd = "NPM_CONFIG_USERCONFIG=%s " % (self.user_config_name) + cmd
|
||||
cmd = "NPM_CONFIG_GLOBALCONFIG=%s " % (self.global_config_name) + cmd
|
||||
return runfetchcmd(cmd, d, workdir=workdir)
|
||||
|
||||
@@ -156,12 +156,12 @@ class Npm(FetchMethod):
|
||||
raise ParameterError("Invalid 'version' parameter", ud.url)
|
||||
|
||||
# Extract the 'registry' part of the url
|
||||
ud.registry = re.sub(r"^npm://", "https://", ud.url.split(";")[0])
|
||||
ud.registry = re.sub(r"^npm://", "http://", ud.url.split(";")[0])
|
||||
|
||||
# Using the 'downloadfilename' parameter as local filename
|
||||
# or the npm package name.
|
||||
if "downloadfilename" in ud.parm:
|
||||
ud.localfile = npm_localfile(d.expand(ud.parm["downloadfilename"]))
|
||||
ud.localfile = d.expand(ud.parm["downloadfilename"])
|
||||
else:
|
||||
ud.localfile = npm_localfile(ud.package, ud.version)
|
||||
|
||||
|
||||
@@ -30,8 +30,6 @@ from bb.fetch2.npm import npm_integrity
|
||||
from bb.fetch2.npm import npm_localfile
|
||||
from bb.fetch2.npm import npm_unpack
|
||||
from bb.utils import is_semver
|
||||
from bb.utils import lockfile
|
||||
from bb.utils import unlockfile
|
||||
|
||||
def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
"""
|
||||
@@ -88,11 +86,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
version = params.get("version", None)
|
||||
|
||||
# Handle registry sources
|
||||
if is_semver(version) and integrity:
|
||||
# Handle duplicate dependencies without url
|
||||
if not resolved:
|
||||
return
|
||||
|
||||
if is_semver(version) and resolved and integrity:
|
||||
localfile = npm_localfile(name, version)
|
||||
|
||||
uri = URI(resolved)
|
||||
@@ -117,7 +111,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
# Handle http tarball sources
|
||||
elif version.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(version))
|
||||
localfile = os.path.join("npm2", os.path.basename(version))
|
||||
|
||||
uri = URI(version)
|
||||
uri.params["downloadfilename"] = localfile
|
||||
@@ -131,8 +125,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
# Handle git sources
|
||||
elif version.startswith("git"):
|
||||
if version.startswith("github:"):
|
||||
version = "git+https://github.com/" + version[len("github:"):]
|
||||
regex = re.compile(r"""
|
||||
^
|
||||
git\+
|
||||
@@ -203,9 +195,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""
|
||||
@@ -11,7 +9,6 @@ Based on the svn "Fetch" implementation.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import bb
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
@@ -39,7 +36,6 @@ class Osc(FetchMethod):
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.oscdir = oscdir
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
@@ -47,13 +43,13 @@ class Osc(FetchMethod):
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = d.getVar("PV", False)
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d, '')
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d)
|
||||
if rev:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), relpath.replace('/', '.'), ud.revision))
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -63,49 +59,26 @@ class Osc(FetchMethod):
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc"
|
||||
|
||||
proto = ud.parm.get('protocol', 'https')
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
options = []
|
||||
|
||||
config = "-c %s" % self.generate_config(ud, d)
|
||||
|
||||
if getattr(ud, 'revision', ''):
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
coroot = self._strip_leading_slashes(ud.path)
|
||||
|
||||
if command == "fetch":
|
||||
osccmd = "%s %s -A %s://%s co %s/%s %s" % (basecmd, config, proto, ud.host, coroot, ud.module, " ".join(options))
|
||||
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
|
||||
elif command == "update":
|
||||
osccmd = "%s %s -A %s://%s up %s" % (basecmd, config, proto, ud.host, " ".join(options))
|
||||
elif command == "api_source":
|
||||
osccmd = "%s %s -A %s://%s api source/%s/%s" % (basecmd, config, proto, ud.host, coroot, ud.module)
|
||||
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid osc command %s" % command, ud.url)
|
||||
|
||||
return osccmd
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
"""
|
||||
Fetch latest revision for the given package
|
||||
"""
|
||||
api_source_cmd = self._buildosccommand(ud, d, "api_source")
|
||||
|
||||
output = runfetchcmd(api_source_cmd, d)
|
||||
match = re.match(r'<directory ?.* rev="(\d+)".*>', output)
|
||||
if match is None:
|
||||
raise FetchError("Unable to parse osc response", ud.url)
|
||||
return match.groups()[0]
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
slash_re = re.compile(r"/+")
|
||||
rev = getattr(ud, 'revision', "latest")
|
||||
return "osc:%s%s.%s.%s" % (ud.host, slash_re.sub(".", ud.path), name, rev)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch url
|
||||
@@ -113,7 +86,7 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(ud.moddir, os.R_OK):
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
@@ -141,23 +114,20 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(ud.oscdir, "oscrc")
|
||||
if not os.path.exists(ud.oscdir):
|
||||
bb.utils.mkdirhier(ud.oscdir)
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
f = open(config_path, 'w')
|
||||
proto = ud.parm.get('protocol', 'https')
|
||||
f.write("[general]\n")
|
||||
f.write("apiurl = %s://%s\n" % (proto, ud.host))
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s://%s]\n" % (proto, ud.host))
|
||||
f.write("[%s]\n" % ud.host)
|
||||
f.write("user = %s\n" % ud.parm["user"])
|
||||
f.write("pass = %s\n" % ud.parm["pswd"])
|
||||
f.close()
|
||||
|
||||
@@ -32,7 +32,6 @@ IETF secsh internet draft:
|
||||
|
||||
import re, os
|
||||
from bb.fetch2 import check_network_access, FetchMethod, ParameterError, runfetchcmd
|
||||
import urllib
|
||||
|
||||
|
||||
__pattern__ = re.compile(r'''
|
||||
@@ -41,9 +40,9 @@ __pattern__ = re.compile(r'''
|
||||
( # Optional username/password block
|
||||
(?P<user>\S+) # username
|
||||
(:(?P<pass>\S+))? # colon followed by the password (optional)
|
||||
)?
|
||||
(?P<cparam>(;[^;]+)*)? # connection parameters block (optional)
|
||||
@
|
||||
)?
|
||||
(?P<host>\S+?) # non-greedy match of the host
|
||||
(:(?P<port>[0-9]+))? # colon followed by the port (optional)
|
||||
/
|
||||
@@ -71,7 +70,6 @@ class SSH(FetchMethod):
|
||||
"git:// prefix with protocol=ssh", urldata.url)
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
path = urllib.parse.unquote(path)
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
@@ -98,11 +96,6 @@ class SSH(FetchMethod):
|
||||
fr += '@%s' % host
|
||||
else:
|
||||
fr = host
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
|
||||
fr += ':%s' % path
|
||||
|
||||
cmd = 'scp -B -r %s %s %s/' % (
|
||||
@@ -115,41 +108,3 @@ class SSH(FetchMethod):
|
||||
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
def checkstatus(self, fetch, urldata, d):
|
||||
"""
|
||||
Check the status of the url
|
||||
"""
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
user = m.group('user')
|
||||
password = m.group('pass')
|
||||
|
||||
if port:
|
||||
portarg = '-P %s' % port
|
||||
else:
|
||||
portarg = ''
|
||||
|
||||
if user:
|
||||
fr = user
|
||||
if password:
|
||||
fr += ':%s' % password
|
||||
fr += '@%s' % host
|
||||
else:
|
||||
fr = host
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
|
||||
cmd = 'ssh -o BatchMode=true %s %s [ -f %s ]' % (
|
||||
portarg,
|
||||
fr,
|
||||
path
|
||||
)
|
||||
|
||||
check_network_access(d, cmd, urldata.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
return True
|
||||
|
||||
@@ -106,22 +106,13 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
if 'downloadfilename' in ud.parm:
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --auth-no-challenge"
|
||||
if ud.parm.get("redirectauth", "1") == "1":
|
||||
# An undocumented feature of wget is that if the
|
||||
# username/password are specified on the URI, wget will only
|
||||
# send the Authorization header to the first host and not to
|
||||
# any hosts that it is redirected to. With the increasing
|
||||
# usage of temporary AWS URLs, this difference now matters as
|
||||
# AWS will reject any request that has authentication both in
|
||||
# the query parameters (from the redirect) and in the
|
||||
# Authorization header.
|
||||
fetchcmd += " --user=%s --password=%s" % (ud.user, ud.pswd)
|
||||
fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
|
||||
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
@@ -132,15 +123,6 @@ class Wget(FetchMethod):
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
bb.fetch2.verify_checksum(ud, d, localpath=localpath, fatal_nochecksum=False)
|
||||
|
||||
# Remove the ".tmp" and move the file into position atomically
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
@@ -236,7 +218,7 @@ class Wget(FetchMethod):
|
||||
# We let the request fail and expect it to be
|
||||
# tried once more ("try_again" in check_status()),
|
||||
# with the dead connection removed from the cache.
|
||||
# If it still fails, we give up, which can happen for bad
|
||||
# If it still fails, we give up, which can happend for bad
|
||||
# HTTP proxy settings.
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
@@ -323,7 +305,15 @@ class Wget(FetchMethod):
|
||||
# Avoid tramping the environment too much by using bb.utils.environment
|
||||
# to scope the changes to the build_opener request, which is when the
|
||||
# environment lookups happen.
|
||||
newenv = bb.fetch2.get_fetcher_environment(d)
|
||||
newenv = {}
|
||||
for name in bb.fetch2.FETCH_EXPORT_VARS:
|
||||
value = d.getVar(name)
|
||||
if not value:
|
||||
origenv = d.getVar("BB_ORIGENV")
|
||||
if origenv:
|
||||
value = origenv.getVar(name)
|
||||
if value:
|
||||
newenv[name] = value
|
||||
|
||||
with bb.utils.environment(**newenv):
|
||||
import ssl
|
||||
@@ -593,7 +583,7 @@ class Wget(FetchMethod):
|
||||
|
||||
# src.rpm extension was added only for rpm package. Can be removed if the rpm
|
||||
# packaged will always be considered as having to be manually upgraded
|
||||
psuffix_regex = r"(tar\.\w+|tgz|zip|xz|rpm|bz2|orig\.tar\.\w+|src\.tar\.\w+|src\.tgz|svnr\d+\.tar\.\w+|stable\.tar\.\w+|src\.rpm)"
|
||||
psuffix_regex = r"(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
|
||||
# match name, version and archive type of a package
|
||||
package_regex_comp = re.compile(r"(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
|
||||
|
||||
@@ -127,7 +127,7 @@ def create_bitbake_parser():
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="halt", default=True,
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
@@ -76,12 +76,7 @@ def getDiskData(BBDirs):
|
||||
return None
|
||||
|
||||
action = pathSpaceInodeRe.group(1)
|
||||
if action == "ABORT":
|
||||
# Emit a deprecation warning
|
||||
logger.warnonce("The BB_DISKMON_DIRS \"ABORT\" action has been renamed to \"HALT\", update configuration")
|
||||
action = "HALT"
|
||||
|
||||
if action not in ("HALT", "STOPTASKS", "WARN"):
|
||||
if action not in ("ABORT", "STOPTASKS", "WARN"):
|
||||
printErr("Unknown disk space monitor action: %s" % action)
|
||||
return None
|
||||
|
||||
@@ -182,7 +177,7 @@ class diskMonitor:
|
||||
# use them to avoid printing too many warning messages
|
||||
self.preFreeS = {}
|
||||
self.preFreeI = {}
|
||||
# This is for STOPTASKS and HALT, to avoid printing the message
|
||||
# This is for STOPTASKS and ABORT, to avoid printing the message
|
||||
# repeatedly while waiting for the tasks to finish
|
||||
self.checked = {}
|
||||
for k in self.devDict:
|
||||
@@ -224,8 +219,8 @@ class diskMonitor:
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(False)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
|
||||
elif action == "HALT" and not self.checked[k]:
|
||||
logger.error("Immediately halt since the disk space monitor action is \"HALT\"!")
|
||||
elif action == "ABORT" and not self.checked[k]:
|
||||
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
|
||||
@@ -250,8 +245,8 @@ class diskMonitor:
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(False)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
|
||||
elif action == "HALT" and not self.checked[k]:
|
||||
logger.error("Immediately halt since the disk space monitor action is \"HALT\"!")
|
||||
elif action == "ABORT" and not self.checked[k]:
|
||||
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
|
||||
|
||||
@@ -30,9 +30,7 @@ class BBLogFormatter(logging.Formatter):
|
||||
PLAIN = logging.INFO + 1
|
||||
VERBNOTE = logging.INFO + 2
|
||||
ERROR = logging.ERROR
|
||||
ERRORONCE = logging.ERROR - 1
|
||||
WARNING = logging.WARNING
|
||||
WARNONCE = logging.WARNING - 1
|
||||
CRITICAL = logging.CRITICAL
|
||||
|
||||
levelnames = {
|
||||
@@ -44,9 +42,7 @@ class BBLogFormatter(logging.Formatter):
|
||||
PLAIN : '',
|
||||
VERBNOTE: 'NOTE',
|
||||
WARNING : 'WARNING',
|
||||
WARNONCE : 'WARNING',
|
||||
ERROR : 'ERROR',
|
||||
ERRORONCE : 'ERROR',
|
||||
CRITICAL: 'ERROR',
|
||||
}
|
||||
|
||||
@@ -62,9 +58,7 @@ class BBLogFormatter(logging.Formatter):
|
||||
PLAIN : BASECOLOR,
|
||||
VERBNOTE: BASECOLOR,
|
||||
WARNING : YELLOW,
|
||||
WARNONCE : YELLOW,
|
||||
ERROR : RED,
|
||||
ERRORONCE : RED,
|
||||
CRITICAL: RED,
|
||||
}
|
||||
|
||||
@@ -127,22 +121,6 @@ class BBLogFilter(object):
|
||||
return True
|
||||
return False
|
||||
|
||||
class LogFilterShowOnce(logging.Filter):
|
||||
def __init__(self):
|
||||
self.seen_warnings = set()
|
||||
self.seen_errors = set()
|
||||
|
||||
def filter(self, record):
|
||||
if record.levelno == bb.msg.BBLogFormatter.WARNONCE:
|
||||
if record.msg in self.seen_warnings:
|
||||
return False
|
||||
self.seen_warnings.add(record.msg)
|
||||
if record.levelno == bb.msg.BBLogFormatter.ERRORONCE:
|
||||
if record.msg in self.seen_errors:
|
||||
return False
|
||||
self.seen_errors.add(record.msg)
|
||||
return True
|
||||
|
||||
class LogFilterGEQLevel(logging.Filter):
|
||||
def __init__(self, level):
|
||||
self.strlevel = str(level)
|
||||
@@ -228,7 +206,6 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers
|
||||
"""Standalone logger creation function"""
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
console.addFilter(bb.msg.LogFilterShowOnce())
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
@@ -316,17 +293,10 @@ def setLoggingConfig(defaultconfig, userconfigfile=None):
|
||||
|
||||
# Convert all level parameters to integers in case users want to use the
|
||||
# bitbake defined level names
|
||||
for name, h in logconfig["handlers"].items():
|
||||
for h in logconfig["handlers"].values():
|
||||
if "level" in h:
|
||||
h["level"] = bb.msg.stringToLevel(h["level"])
|
||||
|
||||
# Every handler needs its own instance of the once filter.
|
||||
once_filter_name = name + ".showonceFilter"
|
||||
logconfig.setdefault("filters", {})[once_filter_name] = {
|
||||
"()": "bb.msg.LogFilterShowOnce",
|
||||
}
|
||||
h.setdefault("filters", []).append(once_filter_name)
|
||||
|
||||
for l in logconfig["loggers"].values():
|
||||
if "level" in l:
|
||||
l["level"] = bb.msg.stringToLevel(l["level"])
|
||||
|
||||
@@ -113,8 +113,6 @@ def init(fn, data):
|
||||
return h['init'](data)
|
||||
|
||||
def init_parser(d):
|
||||
if hasattr(bb.parse, "siggen"):
|
||||
bb.parse.siggen.exit()
|
||||
bb.parse.siggen = bb.siggen.init(d)
|
||||
|
||||
def resolve_file(fn, d):
|
||||
|
||||
@@ -130,10 +130,6 @@ class DataNode(AstNode):
|
||||
else:
|
||||
val = groupd["value"]
|
||||
|
||||
if ":append" in key or ":remove" in key or ":prepend" in key:
|
||||
if op in ["append", "prepend", "postdot", "predot", "ques"]:
|
||||
bb.warn(key + " " + groupd[op] + " is not a recommended operator combination, please replace it.")
|
||||
|
||||
flag = None
|
||||
if 'flag' in groupd and groupd['flag'] is not None:
|
||||
flag = groupd['flag']
|
||||
@@ -223,7 +219,7 @@ class ExportFuncsNode(AstNode):
|
||||
for flag in [ "func", "python" ]:
|
||||
if data.getVarFlag(calledfunc, flag, False):
|
||||
data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
|
||||
for flag in ["dirs", "cleandirs", "fakeroot"]:
|
||||
for flag in [ "dirs" ]:
|
||||
if data.getVarFlag(func, flag, False):
|
||||
data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
|
||||
data.setVarFlag(func, "filename", "autogenerated")
|
||||
@@ -333,10 +329,6 @@ def runAnonFuncs(d):
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
# Found renamed variables. Exit immediately
|
||||
if d.getVar("_FAILPARSINGERRORHANDLED", False) == True:
|
||||
raise bb.BBHandledException()
|
||||
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
|
||||
@@ -44,36 +44,23 @@ def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
classtype = d.getVar("__bbclasstype", False)
|
||||
origfile = file
|
||||
for t in ["classes-" + classtype, "classes"]:
|
||||
file = origfile
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
file = os.path.join(t, '%s.bbclass' % file)
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
bb.parse.mark_dependency(d, af)
|
||||
if abs_fn:
|
||||
file = abs_fn
|
||||
|
||||
if os.path.exists(file):
|
||||
break
|
||||
|
||||
if not os.path.exists(file):
|
||||
raise ParseError("Could not inherit file %s" % (file), fn, lineno)
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
bb.parse.mark_dependency(d, af)
|
||||
if abs_fn:
|
||||
file = abs_fn
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.debug("Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
__inherit_cache.append( file )
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
try:
|
||||
bb.parse.handle(file, d, True)
|
||||
except (IOError, OSError) as exc:
|
||||
raise ParseError("Could not inherit file %s: %s" % (fn, exc.strerror), fn, lineno)
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
@@ -191,10 +178,10 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
if s and s[0] == '#':
|
||||
if len(__residue__) != 0 and __residue__[0][0] != "#":
|
||||
bb.fatal("There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it." % (lineno, fn, s))
|
||||
bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
|
||||
|
||||
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
|
||||
bb.fatal("There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (lineno - len(__residue__), fn, "\n".join(__residue__)))
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
|
||||
if s and s[-1] == '\\':
|
||||
__residue__.append(s[:-1])
|
||||
|
||||
@@ -48,7 +48,10 @@ __unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
|
||||
def init(data):
|
||||
return
|
||||
topdir = data.getVar('TOPDIR', False)
|
||||
if not topdir:
|
||||
data.setVar('TOPDIR', os.getcwd())
|
||||
|
||||
|
||||
def supports(fn, d):
|
||||
return fn[-5:] == ".conf"
|
||||
@@ -125,21 +128,16 @@ def handle(fn, data, include):
|
||||
s = f.readline()
|
||||
if not s:
|
||||
break
|
||||
origlineno = lineno
|
||||
origline = s
|
||||
w = s.strip()
|
||||
# skip empty lines
|
||||
if not w:
|
||||
continue
|
||||
s = s.rstrip()
|
||||
while s[-1] == '\\':
|
||||
line = f.readline()
|
||||
origline += line
|
||||
s2 = line.rstrip()
|
||||
s2 = f.readline().rstrip()
|
||||
lineno = lineno + 1
|
||||
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
|
||||
bb.fatal("There is a confusing multiline, partially commented expression starting on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (origlineno, fn, origline))
|
||||
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
s = s[:-1] + s2
|
||||
# skip comments
|
||||
if s[0] == '#':
|
||||
@@ -152,6 +150,8 @@ def handle(fn, data, include):
|
||||
if oldfile:
|
||||
data.setVar('FILE', oldfile)
|
||||
|
||||
f.close()
|
||||
|
||||
for f in confFilters:
|
||||
f(fn, data)
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
"""
|
||||
Decorator that starts a database transaction and creates a database
|
||||
cursor for performing queries. If no exception is thrown, the
|
||||
database results are committed. If an exception occurs, the database
|
||||
database results are commited. If an exception occurs, the database
|
||||
is rolled back. In all cases, the cursor is closed after the
|
||||
function ends.
|
||||
|
||||
@@ -208,7 +208,7 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplementedError()
|
||||
raise NotImplemented
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -62,7 +60,7 @@ class Popen(subprocess.Popen):
|
||||
"close_fds": True,
|
||||
"preexec_fn": subprocess_setup,
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"stderr": subprocess.STDOUT,
|
||||
"stdin": subprocess.PIPE,
|
||||
"shell": False,
|
||||
}
|
||||
@@ -144,7 +142,7 @@ def _logged_communicate(pipe, log, input, extrafiles):
|
||||
while pipe.poll() is None:
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
|
||||
# Process closed, drain all pipes...
|
||||
# Pocess closed, drain all pipes...
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
finally:
|
||||
log.flush()
|
||||
|
||||
@@ -148,7 +148,7 @@ class MultiStageProgressReporter:
|
||||
for tasks made up of python code spread across multiple
|
||||
classes / functions - the progress reporter object can
|
||||
be passed around or stored at the object level and calls
|
||||
to next_stage() and update() made wherever needed.
|
||||
to next_stage() and update() made whereever needed.
|
||||
"""
|
||||
def __init__(self, d, stage_weights, debug=False):
|
||||
"""
|
||||
|
||||
@@ -396,8 +396,8 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
return rproviders
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pat_key in dataCache.packages_dynamic:
|
||||
pattern = pat_key.replace(r'+', r"\+")
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
pattern = pattern.replace(r'+', r"\+")
|
||||
if pattern in regexp_cache:
|
||||
regexp = regexp_cache[pattern]
|
||||
else:
|
||||
@@ -408,7 +408,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
raise
|
||||
regexp_cache[pattern] = regexp
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pat_key]
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
@@ -24,7 +24,6 @@ import pickle
|
||||
from multiprocessing import Process
|
||||
import shlex
|
||||
import pprint
|
||||
import time
|
||||
|
||||
bblogger = logging.getLogger("BitBake")
|
||||
logger = logging.getLogger("BitBake.RunQueue")
|
||||
@@ -160,55 +159,6 @@ class RunQueueScheduler(object):
|
||||
self.buildable.append(tid)
|
||||
|
||||
self.rev_prio_map = None
|
||||
self.is_pressure_usable()
|
||||
|
||||
def is_pressure_usable(self):
|
||||
"""
|
||||
If monitoring pressure, return True if pressure files can be open and read. For example
|
||||
openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported)
|
||||
is returned.
|
||||
"""
|
||||
if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
|
||||
try:
|
||||
with open("/proc/pressure/cpu") as cpu_pressure_fds, \
|
||||
open("/proc/pressure/io") as io_pressure_fds, \
|
||||
open("/proc/pressure/memory") as memory_pressure_fds:
|
||||
|
||||
self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_pressure_time = time.time()
|
||||
self.check_pressure = True
|
||||
except:
|
||||
bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure")
|
||||
self.check_pressure = False
|
||||
else:
|
||||
self.check_pressure = False
|
||||
|
||||
def exceeds_max_pressure(self):
|
||||
"""
|
||||
Monitor the difference in total pressure at least once per second, if
|
||||
BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold.
|
||||
"""
|
||||
if self.check_pressure:
|
||||
with open("/proc/pressure/cpu") as cpu_pressure_fds, \
|
||||
open("/proc/pressure/io") as io_pressure_fds, \
|
||||
open("/proc/pressure/memory") as memory_pressure_fds:
|
||||
# extract "total" from /proc/pressure/{cpu|io}
|
||||
curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
|
||||
curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
|
||||
curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
|
||||
exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) > self.rq.max_cpu_pressure
|
||||
exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_pressure)) > self.rq.max_io_pressure
|
||||
exceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self.prev_memory_pressure)) > self.rq.max_memory_pressure
|
||||
now = time.time()
|
||||
if now - self.prev_pressure_time > 1.0:
|
||||
self.prev_cpu_pressure = curr_cpu_pressure
|
||||
self.prev_io_pressure = curr_io_pressure
|
||||
self.prev_memory_pressure = curr_memory_pressure
|
||||
self.prev_pressure_time = now
|
||||
return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure)
|
||||
return False
|
||||
|
||||
def next_buildable_task(self):
|
||||
"""
|
||||
@@ -222,12 +172,6 @@ class RunQueueScheduler(object):
|
||||
if not buildable:
|
||||
return None
|
||||
|
||||
# Bitbake requires that at least one task be active. Only check for pressure if
|
||||
# this is the case, otherwise the pressure limitation could result in no tasks
|
||||
# being active and no new tasks started thereby, at times, breaking the scheduler.
|
||||
if self.rq.stats.active and self.exceeds_max_pressure():
|
||||
return None
|
||||
|
||||
# Filter out tasks that have a max number of threads that have been exceeded
|
||||
skip_buildable = {}
|
||||
for running in self.rq.runq_running.difference(self.rq.runq_complete):
|
||||
@@ -441,9 +385,10 @@ class RunQueueData:
|
||||
self.rq = rq
|
||||
self.warn_multi_bb = False
|
||||
|
||||
self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
|
||||
self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets)
|
||||
self.setscene_ignore_tasks_checked = False
|
||||
self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
|
||||
self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
|
||||
self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets)
|
||||
self.setscenewhitelist_checked = False
|
||||
self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
|
||||
self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
|
||||
|
||||
@@ -541,7 +486,7 @@ class RunQueueData:
|
||||
msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
|
||||
msgs.append("\n")
|
||||
if len(valid_chains) > 10:
|
||||
msgs.append("Halted dependency loops search after 10 matches.\n")
|
||||
msgs.append("Aborted dependency loops search after 10 matches.\n")
|
||||
raise TooManyLoops
|
||||
continue
|
||||
scan = False
|
||||
@@ -602,7 +547,7 @@ class RunQueueData:
|
||||
next_points.append(revdep)
|
||||
task_done[revdep] = True
|
||||
endpoints = next_points
|
||||
if not next_points:
|
||||
if len(next_points) == 0:
|
||||
break
|
||||
|
||||
# Circular dependency sanity check
|
||||
@@ -644,7 +589,7 @@ class RunQueueData:
|
||||
|
||||
found = False
|
||||
for mc in self.taskData:
|
||||
if taskData[mc].taskentries:
|
||||
if len(taskData[mc].taskentries) > 0:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
@@ -828,7 +773,7 @@ class RunQueueData:
|
||||
# Find the dependency chain endpoints
|
||||
endpoints = set()
|
||||
for tid in self.runtaskentries:
|
||||
if not deps[tid]:
|
||||
if len(deps[tid]) == 0:
|
||||
endpoints.add(tid)
|
||||
# Iterate the chains collating dependencies
|
||||
while endpoints:
|
||||
@@ -839,11 +784,11 @@ class RunQueueData:
|
||||
cumulativedeps[dep].update(cumulativedeps[tid])
|
||||
if tid in deps[dep]:
|
||||
deps[dep].remove(tid)
|
||||
if not deps[dep]:
|
||||
if len(deps[dep]) == 0:
|
||||
next.add(dep)
|
||||
endpoints = next
|
||||
#for tid in deps:
|
||||
# if deps[tid]:
|
||||
# if len(deps[tid]) != 0:
|
||||
# bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
|
||||
|
||||
# Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
|
||||
@@ -1011,7 +956,7 @@ class RunQueueData:
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if self.cooker.configuration.runall:
|
||||
if not self.runtaskentries:
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
@@ -1036,7 +981,7 @@ class RunQueueData:
|
||||
delcount.add(tid)
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if not self.runtaskentries:
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
|
||||
|
||||
#
|
||||
@@ -1044,8 +989,8 @@ class RunQueueData:
|
||||
#
|
||||
|
||||
# Check to make sure we still have tasks to run
|
||||
if not self.runtaskentries:
|
||||
if not taskData[''].halt:
|
||||
if len(self.runtaskentries) == 0:
|
||||
if not taskData[''].abort:
|
||||
bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
|
||||
else:
|
||||
bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
|
||||
@@ -1068,7 +1013,7 @@ class RunQueueData:
|
||||
endpoints = []
|
||||
for tid in self.runtaskentries:
|
||||
revdeps = self.runtaskentries[tid].revdeps
|
||||
if not revdeps:
|
||||
if len(revdeps) == 0:
|
||||
endpoints.append(tid)
|
||||
for dep in revdeps:
|
||||
if dep in self.runtaskentries[tid].depends:
|
||||
@@ -1104,7 +1049,7 @@ class RunQueueData:
|
||||
for prov in prov_list:
|
||||
if len(prov_list[prov]) < 2:
|
||||
continue
|
||||
if prov in self.multi_provider_allowed:
|
||||
if prov in self.multi_provider_whitelist:
|
||||
continue
|
||||
seen_pn = []
|
||||
# If two versions of the same PN are being built its fatal, we don't support it.
|
||||
@@ -1114,12 +1059,12 @@ class RunQueueData:
|
||||
seen_pn.append(pn)
|
||||
else:
|
||||
bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
|
||||
msgs = ["Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))]
|
||||
msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
|
||||
#
|
||||
# Construct a list of things which uniquely depend on each provider
|
||||
# since this may help the user figure out which dependency is triggering this warning
|
||||
#
|
||||
msgs.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.")
|
||||
msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
|
||||
deplist = {}
|
||||
commondeps = None
|
||||
for provfn in prov_list[prov]:
|
||||
@@ -1139,12 +1084,12 @@ class RunQueueData:
|
||||
commondeps &= deps
|
||||
deplist[provfn] = deps
|
||||
for provfn in deplist:
|
||||
msgs.append("\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps)))
|
||||
msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
|
||||
#
|
||||
# Construct a list of provides and runtime providers for each recipe
|
||||
# (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
|
||||
#
|
||||
msgs.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.")
|
||||
msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
|
||||
provide_results = {}
|
||||
rprovide_results = {}
|
||||
commonprovs = None
|
||||
@@ -1171,18 +1116,29 @@ class RunQueueData:
|
||||
else:
|
||||
commonrprovs &= rprovides
|
||||
rprovide_results[provfn] = rprovides
|
||||
#msgs.append("\nCommon provides:\n %s" % ("\n ".join(commonprovs)))
|
||||
#msgs.append("\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs)))
|
||||
#msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
|
||||
#msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
|
||||
for provfn in prov_list[prov]:
|
||||
msgs.append("\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs)))
|
||||
msgs.append("\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs)))
|
||||
msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
|
||||
msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
|
||||
|
||||
if self.warn_multi_bb:
|
||||
logger.verbnote("".join(msgs))
|
||||
logger.verbnote(msg)
|
||||
else:
|
||||
logger.error("".join(msgs))
|
||||
logger.error(msg)
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
# Create a whitelist usable by the stamp checks
|
||||
self.stampfnwhitelist = {}
|
||||
for mc in self.taskData:
|
||||
self.stampfnwhitelist[mc] = []
|
||||
for entry in self.stampwhitelist.split():
|
||||
if entry not in self.taskData[mc].build_targets:
|
||||
continue
|
||||
fn = self.taskData.build_targets[entry][0]
|
||||
self.stampfnwhitelist[mc].append(fn)
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
# Iterate over the task list looking for tasks with a 'setscene' function
|
||||
@@ -1230,9 +1186,9 @@ class RunQueueData:
|
||||
# Iterate over the task list and call into the siggen code
|
||||
dealtwith = set()
|
||||
todeal = set(self.runtaskentries)
|
||||
while todeal:
|
||||
while len(todeal) > 0:
|
||||
for tid in todeal.copy():
|
||||
if not (self.runtaskentries[tid].depends - dealtwith):
|
||||
if len(self.runtaskentries[tid].depends - dealtwith) == 0:
|
||||
dealtwith.add(tid)
|
||||
todeal.remove(tid)
|
||||
self.prepare_task_hash(tid)
|
||||
@@ -1271,6 +1227,7 @@ class RunQueue:
|
||||
self.cfgData = cfgData
|
||||
self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
|
||||
|
||||
self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
|
||||
self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
|
||||
self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
|
||||
|
||||
@@ -1399,6 +1356,14 @@ class RunQueue:
|
||||
if taskname is None:
|
||||
taskname = tn
|
||||
|
||||
if self.stamppolicy == "perfile":
|
||||
fulldeptree = False
|
||||
else:
|
||||
fulldeptree = True
|
||||
stampwhitelist = []
|
||||
if self.stamppolicy == "whitelist":
|
||||
stampwhitelist = self.rqdata.stampfnwhitelist[mc]
|
||||
|
||||
stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
|
||||
|
||||
# If the stamp is missing, it's not current
|
||||
@@ -1430,7 +1395,7 @@ class RunQueue:
|
||||
continue
|
||||
if t3 and t3 > t2:
|
||||
continue
|
||||
if fn == fn2:
|
||||
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
|
||||
if not t2:
|
||||
logger.debug2('Stampfile %s does not exist', stampfile2)
|
||||
iscurrent = False
|
||||
@@ -1480,7 +1445,7 @@ class RunQueue:
|
||||
"""
|
||||
Run the tasks in a queue prepared by rqdata.prepare()
|
||||
Upon failure, optionally try to recover the build using any alternate providers
|
||||
(if the halt on failure configuration option isn't set)
|
||||
(if the abort on failure configuration option isn't set)
|
||||
"""
|
||||
|
||||
retval = True
|
||||
@@ -1533,10 +1498,10 @@ class RunQueue:
|
||||
self.rqexe = RunQueueExecute(self)
|
||||
|
||||
# If we don't have any setscene functions, skip execution
|
||||
if not self.rqdata.runq_setscene_tids:
|
||||
if len(self.rqdata.runq_setscene_tids) == 0:
|
||||
logger.info('No setscene tasks')
|
||||
for tid in self.rqdata.runtaskentries:
|
||||
if not self.rqdata.runtaskentries[tid].depends:
|
||||
if len(self.rqdata.runtaskentries[tid].depends) == 0:
|
||||
self.rqexe.setbuildable(tid)
|
||||
self.rqexe.tasks_notcovered.add(tid)
|
||||
self.rqexe.sqdone = True
|
||||
@@ -1730,7 +1695,7 @@ class RunQueue:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
h = self.rqdata.runtaskentries[tid].hash
|
||||
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc])
|
||||
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
|
||||
match = None
|
||||
for m in matches:
|
||||
if h in m:
|
||||
@@ -1755,9 +1720,6 @@ class RunQueueExecute:
|
||||
|
||||
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
|
||||
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
|
||||
self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
|
||||
self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
|
||||
self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
|
||||
|
||||
self.sq_buildable = set()
|
||||
self.sq_running = set()
|
||||
@@ -1792,29 +1754,6 @@ class RunQueueExecute:
|
||||
if self.number_tasks <= 0:
|
||||
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
|
||||
|
||||
lower_limit = 1.0
|
||||
upper_limit = 1000000.0
|
||||
if self.max_cpu_pressure:
|
||||
self.max_cpu_pressure = float(self.max_cpu_pressure)
|
||||
if self.max_cpu_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit))
|
||||
if self.max_cpu_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure))
|
||||
|
||||
if self.max_io_pressure:
|
||||
self.max_io_pressure = float(self.max_io_pressure)
|
||||
if self.max_io_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit))
|
||||
if self.max_io_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
|
||||
|
||||
if self.max_memory_pressure:
|
||||
self.max_memory_pressure = float(self.max_memory_pressure)
|
||||
if self.max_memory_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit))
|
||||
if self.max_memory_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
|
||||
|
||||
# List of setscene tasks which we've covered
|
||||
self.scenequeue_covered = set()
|
||||
# List of tasks which are covered (including setscene ones)
|
||||
@@ -1839,7 +1778,7 @@ class RunQueueExecute:
|
||||
bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
|
||||
(self.scheduler, ", ".join(obj.name for obj in schedulers)))
|
||||
|
||||
#if self.rqdata.runq_setscene_tids:
|
||||
#if len(self.rqdata.runq_setscene_tids) > 0:
|
||||
self.sqdata = SQData()
|
||||
build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
|
||||
|
||||
@@ -1880,7 +1819,7 @@ class RunQueueExecute:
|
||||
# worker must have died?
|
||||
pass
|
||||
|
||||
if self.failed_tids:
|
||||
if len(self.failed_tids) != 0:
|
||||
self.rq.state = runQueueFailed
|
||||
return
|
||||
|
||||
@@ -1896,7 +1835,7 @@ class RunQueueExecute:
|
||||
self.rq.read_workers()
|
||||
return self.rq.active_fds()
|
||||
|
||||
if self.failed_tids:
|
||||
if len(self.failed_tids) != 0:
|
||||
self.rq.state = runQueueFailed
|
||||
return True
|
||||
|
||||
@@ -1994,7 +1933,7 @@ class RunQueueExecute:
|
||||
self.stats.taskFailed()
|
||||
self.failed_tids.append(task)
|
||||
|
||||
fakeroot_log = []
|
||||
fakeroot_log = ""
|
||||
if fakerootlog and os.path.exists(fakerootlog):
|
||||
with open(fakerootlog) as fakeroot_log_file:
|
||||
fakeroot_failed = False
|
||||
@@ -2004,14 +1943,14 @@ class RunQueueExecute:
|
||||
fakeroot_failed = True
|
||||
if 'doing new pid setup and server start' in line:
|
||||
break
|
||||
fakeroot_log.append(line)
|
||||
fakeroot_log = line + fakeroot_log
|
||||
|
||||
if not fakeroot_failed:
|
||||
fakeroot_log = []
|
||||
fakeroot_log = None
|
||||
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData)
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
|
||||
|
||||
if self.rqdata.taskData[''].halt:
|
||||
if self.rqdata.taskData[''].abort:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
def task_skip(self, task, reason):
|
||||
@@ -2060,7 +1999,7 @@ class RunQueueExecute:
|
||||
if x not in self.tasks_scenequeue_done:
|
||||
logger.error("Task %s was never processed by the setscene code" % x)
|
||||
err = True
|
||||
if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable:
|
||||
if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
|
||||
logger.error("Task %s was never marked as buildable by the setscene code" % x)
|
||||
err = True
|
||||
return err
|
||||
@@ -2084,7 +2023,7 @@ class RunQueueExecute:
|
||||
# Find the next setscene to run
|
||||
for nexttask in self.sorted_setscene_tids:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
|
||||
if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
if nexttask not in self.rqdata.target_tids:
|
||||
logger.debug2("Skipping setscene for task %s" % nexttask)
|
||||
self.sq_task_skip(nexttask)
|
||||
@@ -2189,9 +2128,9 @@ class RunQueueExecute:
|
||||
if task is not None:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
|
||||
|
||||
if self.rqdata.setscene_ignore_tasks is not None:
|
||||
if self.check_setscene_ignore_tasks(task):
|
||||
self.task_fail(task, "setscene ignore_tasks")
|
||||
if self.rqdata.setscenewhitelist is not None:
|
||||
if self.check_setscenewhitelist(task):
|
||||
self.task_fail(task, "setscene whitelist")
|
||||
return True
|
||||
|
||||
if task in self.tasks_covered:
|
||||
@@ -2248,18 +2187,19 @@ class RunQueueExecute:
|
||||
if self.can_start_task():
|
||||
return True
|
||||
|
||||
if self.stats.active > 0 or self.sq_live:
|
||||
if self.stats.active > 0 or len(self.sq_live) > 0:
|
||||
self.rq.read_workers()
|
||||
return self.rq.active_fds()
|
||||
|
||||
# No more tasks can be run. If we have deferred setscene tasks we should run them.
|
||||
if self.sq_deferred:
|
||||
deferred_tid = list(self.sq_deferred.keys())[0]
|
||||
blocking_tid = self.sq_deferred.pop(deferred_tid)
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid))
|
||||
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
|
||||
if tid not in self.runq_complete:
|
||||
self.sq_task_failoutright(tid)
|
||||
return True
|
||||
|
||||
if self.failed_tids:
|
||||
if len(self.failed_tids) != 0:
|
||||
self.rq.state = runQueueFailed
|
||||
return True
|
||||
|
||||
@@ -2338,7 +2278,7 @@ class RunQueueExecute:
|
||||
covered.intersection_update(self.tasks_scenequeue_done)
|
||||
|
||||
for tid in notcovered | covered:
|
||||
if not self.rqdata.runtaskentries[tid].depends:
|
||||
if len(self.rqdata.runtaskentries[tid].depends) == 0:
|
||||
self.setbuildable(tid)
|
||||
elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
|
||||
self.setbuildable(tid)
|
||||
@@ -2380,9 +2320,6 @@ class RunQueueExecute:
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
if torehash:
|
||||
# Need to save after set_unihash above
|
||||
bb.parse.siggen.save_unitaskhashes()
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2400,7 +2337,7 @@ class RunQueueExecute:
|
||||
# Now iterate those tasks in dependency order to regenerate their taskhash/unihash
|
||||
next = set()
|
||||
for p in total:
|
||||
if not self.rqdata.runtaskentries[p].depends:
|
||||
if len(self.rqdata.runtaskentries[p].depends) == 0:
|
||||
next.add(p)
|
||||
elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
|
||||
next.add(p)
|
||||
@@ -2410,7 +2347,7 @@ class RunQueueExecute:
|
||||
current = next.copy()
|
||||
next = set()
|
||||
for tid in current:
|
||||
if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
|
||||
if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
|
||||
continue
|
||||
orighash = self.rqdata.runtaskentries[tid].hash
|
||||
dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
|
||||
@@ -2476,7 +2413,7 @@ class RunQueueExecute:
|
||||
self.tasks_scenequeue_done.remove(tid)
|
||||
for dep in self.sqdata.sq_covered_tasks[tid]:
|
||||
if dep in self.runq_complete and dep not in self.runq_tasksrun:
|
||||
bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep)
|
||||
bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep)
|
||||
self.failed_tids.append(tid)
|
||||
self.rq.state = runQueueCleanUp
|
||||
return
|
||||
@@ -2497,7 +2434,7 @@ class RunQueueExecute:
|
||||
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if tid not in self.sq_buildable:
|
||||
self.sq_buildable.add(tid)
|
||||
if not self.sqdata.sq_revdeps[tid]:
|
||||
if len(self.sqdata.sq_revdeps[tid]) == 0:
|
||||
self.sq_buildable.add(tid)
|
||||
|
||||
if tid in self.sqdata.outrightfail:
|
||||
@@ -2522,14 +2459,11 @@ class RunQueueExecute:
|
||||
|
||||
if update_tasks:
|
||||
self.sqdone = False
|
||||
for mc in sorted(self.sqdata.multiconfigs):
|
||||
for tid in sorted([t[0] for t in update_tasks]):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
for tid in [t[0] for t in update_tasks]:
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
for (tid, harddepfail, origvalid) in update_tasks:
|
||||
@@ -2588,11 +2522,11 @@ class RunQueueExecute:
|
||||
self.scenequeue_updatecounters(task)
|
||||
|
||||
def sq_check_taskfail(self, task):
|
||||
if self.rqdata.setscene_ignore_tasks is not None:
|
||||
if self.rqdata.setscenewhitelist is not None:
|
||||
realtask = task.split('_setscene')[0]
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks):
|
||||
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
|
||||
logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
@@ -2655,8 +2589,8 @@ class RunQueueExecute:
|
||||
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
|
||||
return taskdepdata
|
||||
|
||||
def check_setscene_ignore_tasks(self, tid):
|
||||
# Check task that is going to run against the ignore tasks list
|
||||
def check_setscenewhitelist(self, tid):
|
||||
# Check task that is going to run against the whitelist
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
# Ignore covered tasks
|
||||
if tid in self.tasks_covered:
|
||||
@@ -2670,15 +2604,14 @@ class RunQueueExecute:
|
||||
return False
|
||||
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks):
|
||||
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
|
||||
if tid in self.rqdata.runq_setscene_tids:
|
||||
msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)]
|
||||
msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
|
||||
else:
|
||||
msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)]
|
||||
msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
|
||||
for t in self.scenequeue_notcovered:
|
||||
msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash))
|
||||
msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
|
||||
logger.error("".join(msg))
|
||||
msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
|
||||
logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -2717,7 +2650,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
for tid in rqdata.runtaskentries:
|
||||
sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
|
||||
sq_revdeps_squash[tid] = set()
|
||||
if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids:
|
||||
if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
|
||||
#bb.warn("Added endpoint %s" % (tid))
|
||||
endpoints[tid] = set()
|
||||
|
||||
@@ -2751,15 +2684,16 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sq_revdeps_squash[point] = set()
|
||||
if point in rqdata.runq_setscene_tids:
|
||||
sq_revdeps_squash[point] = tasks
|
||||
tasks = set()
|
||||
continue
|
||||
for dep in rqdata.runtaskentries[point].depends:
|
||||
if point in sq_revdeps[dep]:
|
||||
sq_revdeps[dep].remove(point)
|
||||
if tasks:
|
||||
sq_revdeps_squash[dep] |= tasks
|
||||
if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids:
|
||||
if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
|
||||
newendpoints[dep] = task
|
||||
if newendpoints:
|
||||
if len(newendpoints) != 0:
|
||||
process_endpoints(newendpoints)
|
||||
|
||||
process_endpoints(endpoints)
|
||||
@@ -2771,7 +2705,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
# Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
|
||||
new = True
|
||||
for tid in rqdata.runtaskentries:
|
||||
if not rqdata.runtaskentries[tid].revdeps:
|
||||
if len(rqdata.runtaskentries[tid].revdeps) == 0:
|
||||
sqdata.unskippable.add(tid)
|
||||
sqdata.unskippable |= sqrq.cantskip
|
||||
while new:
|
||||
@@ -2780,7 +2714,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
for tid in sorted(orig, reverse=True):
|
||||
if tid in rqdata.runq_setscene_tids:
|
||||
continue
|
||||
if not rqdata.runtaskentries[tid].depends:
|
||||
if len(rqdata.runtaskentries[tid].depends) == 0:
|
||||
# These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
|
||||
sqrq.setbuildable(tid)
|
||||
sqdata.unskippable |= rqdata.runtaskentries[tid].depends
|
||||
@@ -2795,8 +2729,8 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
for taskcounter, tid in enumerate(rqdata.runtaskentries):
|
||||
if tid in rqdata.runq_setscene_tids:
|
||||
pass
|
||||
elif sq_revdeps_squash[tid]:
|
||||
bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.")
|
||||
elif len(sq_revdeps_squash[tid]) != 0:
|
||||
bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
|
||||
else:
|
||||
del sq_revdeps_squash[tid]
|
||||
rqdata.init_progress_reporter.update(taskcounter)
|
||||
@@ -2860,7 +2794,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sqdata.multiconfigs = set()
|
||||
for tid in sqdata.sq_revdeps:
|
||||
sqdata.multiconfigs.add(mc_from_tid(tid))
|
||||
if not sqdata.sq_revdeps[tid]:
|
||||
if len(sqdata.sq_revdeps[tid]) == 0:
|
||||
sqrq.sq_buildable.add(tid)
|
||||
|
||||
rqdata.init_progress_reporter.finish()
|
||||
@@ -3114,7 +3048,7 @@ class runQueuePipe():
|
||||
raise
|
||||
end = len(self.queue)
|
||||
found = True
|
||||
while found and self.queue:
|
||||
while found and len(self.queue):
|
||||
found = False
|
||||
index = self.queue.find(b"</event>")
|
||||
while index != -1 and self.queue.startswith(b"<event>"):
|
||||
@@ -3152,16 +3086,16 @@ class runQueuePipe():
|
||||
def close(self):
|
||||
while self.read():
|
||||
continue
|
||||
if self.queue:
|
||||
if len(self.queue) > 0:
|
||||
print("Warning, worker left partial message: %s" % self.queue)
|
||||
self.input.close()
|
||||
|
||||
def get_setscene_enforce_ignore_tasks(d, targets):
|
||||
def get_setscene_enforce_whitelist(d, targets):
|
||||
if d.getVar('BB_SETSCENE_ENFORCE') != '1':
|
||||
return None
|
||||
ignore_tasks = (d.getVar("BB_SETSCENE_ENFORCE_IGNORE_TASKS") or "").split()
|
||||
whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
|
||||
outlist = []
|
||||
for item in ignore_tasks[:]:
|
||||
for item in whitelist[:]:
|
||||
if item.startswith('%:'):
|
||||
for (mc, target, task, fn) in targets:
|
||||
outlist.append(target + ':' + item.split(':')[1])
|
||||
@@ -3169,12 +3103,12 @@ def get_setscene_enforce_ignore_tasks(d, targets):
|
||||
outlist.append(item)
|
||||
return outlist
|
||||
|
||||
def check_setscene_enforce_ignore_tasks(pn, taskname, ignore_tasks):
|
||||
def check_setscene_enforce_whitelist(pn, taskname, whitelist):
|
||||
import fnmatch
|
||||
if ignore_tasks is not None:
|
||||
if whitelist is not None:
|
||||
item = '%s:%s' % (pn, taskname)
|
||||
for ignore_tasks in ignore_tasks:
|
||||
if fnmatch.fnmatch(item, ignore_tasks):
|
||||
for whitelist_item in whitelist:
|
||||
if fnmatch.fnmatch(item, whitelist_item):
|
||||
return True
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -27,7 +27,6 @@ import re
|
||||
import datetime
|
||||
import pickle
|
||||
import traceback
|
||||
import gc
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
@@ -244,6 +243,9 @@ class ProcessServer():
|
||||
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
if len(threading.enumerate()) != 1:
|
||||
serverlog("More than one thread left?: " + str(threading.enumerate()))
|
||||
|
||||
serverlog("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
try:
|
||||
@@ -261,9 +263,6 @@ class ProcessServer():
|
||||
|
||||
self.cooker.post_serve()
|
||||
|
||||
if len(threading.enumerate()) != 1:
|
||||
serverlog("More than one thread left?: " + str(threading.enumerate()))
|
||||
|
||||
# Flush logs before we release the lock
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
@@ -327,10 +326,10 @@ class ProcessServer():
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
msg = ["Delaying shutdown due to active processes which appear to be holding bitbake.lock"]
|
||||
msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
|
||||
if procs:
|
||||
msg.append(":\n%s" % str(procs.decode("utf-8")))
|
||||
serverlog("".join(msg))
|
||||
msg += ":\n%s" % str(procs.decode("utf-8"))
|
||||
serverlog(msg)
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
@@ -437,7 +436,6 @@ class BitBakeProcessServerConnection(object):
|
||||
self.socket_connection = sock
|
||||
|
||||
def terminate(self):
|
||||
self.events.close()
|
||||
self.socket_connection.close()
|
||||
self.connection.connection.close()
|
||||
self.connection.recv.close()
|
||||
@@ -558,7 +556,7 @@ def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpc
|
||||
|
||||
server.run()
|
||||
finally:
|
||||
# Flush any messages/errors to the logfile before exit
|
||||
# Flush any ,essages/errors to the logfile before exit
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
@@ -663,18 +661,23 @@ class BBUIEventQueue:
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.daemon = True
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
with self.eventQueueLock:
|
||||
if len(self.eventQueue) == 0:
|
||||
return None
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
@@ -682,9 +685,10 @@ class BBUIEventQueue:
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
with self.eventQueueLock:
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
@@ -693,17 +697,13 @@ class BBUIEventQueue:
|
||||
bb.utils.set_process_name("UIEventQueue")
|
||||
while True:
|
||||
try:
|
||||
ready = self.reader.wait(0.25)
|
||||
if ready:
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except (EOFError, OSError, TypeError):
|
||||
self.reader.wait()
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except EOFError:
|
||||
# Easiest way to exit is to close the file descriptor to cause an exit
|
||||
break
|
||||
|
||||
def close(self):
|
||||
self.reader.close()
|
||||
self.t.join()
|
||||
|
||||
class ConnectionReader(object):
|
||||
|
||||
@@ -737,32 +737,10 @@ class ConnectionWriter(object):
|
||||
# Why bb.event needs this I have no idea
|
||||
self.event = self
|
||||
|
||||
def _send(self, obj):
|
||||
gc.disable()
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
gc.enable()
|
||||
|
||||
def send(self, obj):
|
||||
obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
|
||||
# See notes/code in CookerParser
|
||||
# We must not terminate holding this lock else processes will hang.
|
||||
# For SIGTERM, raising afterwards avoids this.
|
||||
# For SIGINT, we don't want to have written partial data to the pipe.
|
||||
# pthread_sigmask block/unblock would be nice but doesn't work, https://bugs.python.org/issue47139
|
||||
process = multiprocessing.current_process()
|
||||
if process and hasattr(process, "queue_signals"):
|
||||
with process.signal_threadlock:
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
try:
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self._send(obj)
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
|
||||
def fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
@@ -11,7 +11,6 @@ import hashlib
|
||||
import time
|
||||
import inspect
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
import bb
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -13,8 +11,6 @@ import pickle
|
||||
import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
import json
|
||||
import bb.compress.zstd
|
||||
from bb.checksum import FileChecksumCache
|
||||
from bb import runqueue
|
||||
import hashserv
|
||||
@@ -23,17 +19,6 @@ import hashserv.client
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv')
|
||||
|
||||
class SetEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, set):
|
||||
return dict(_set_object=list(sorted(obj)))
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
def SetDecoder(dct):
|
||||
if '_set_object' in dct:
|
||||
return set(dct['_set_object'])
|
||||
return dct
|
||||
|
||||
def init(d):
|
||||
siggens = [obj for obj in globals().values()
|
||||
if type(obj) is type and issubclass(obj, SignatureGenerator)]
|
||||
@@ -42,6 +27,7 @@ def init(d):
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
break
|
||||
else:
|
||||
logger.error("Invalid signature generator '%s', using default 'noop'\n"
|
||||
"Available generators: %s", desired,
|
||||
@@ -122,9 +108,6 @@ class SignatureGenerator(object):
|
||||
def save_unitaskhashes(self):
|
||||
return
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
return
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
@@ -160,9 +143,6 @@ class SignatureGenerator(object):
|
||||
|
||||
return DataCacheProxy()
|
||||
|
||||
def exit(self):
|
||||
return
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -179,8 +159,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.setscenetasks = set()
|
||||
self.basehash_ignore_vars = set((data.getVar("BB_BASEHASH_IGNORE_VARS") or "").split())
|
||||
self.taskhash_ignore_tasks = None
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
|
||||
self.taskwhitelist = None
|
||||
self.init_rundepcheck(data)
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
|
||||
if checksum_cache_file:
|
||||
@@ -195,18 +175,18 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.tidtopn = {}
|
||||
|
||||
def init_rundepcheck(self, data):
|
||||
self.taskhash_ignore_tasks = data.getVar("BB_TASKHASH_IGNORE_TASKS") or None
|
||||
if self.taskhash_ignore_tasks:
|
||||
self.twl = re.compile(self.taskhash_ignore_tasks)
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
|
||||
if self.taskwhitelist:
|
||||
self.twl = re.compile(self.taskwhitelist)
|
||||
else:
|
||||
self.twl = None
|
||||
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, self.basehash_ignore_vars)
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, self.basewhitelist)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, fn)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basewhitelist, fn)
|
||||
|
||||
for task in tasklist:
|
||||
tid = fn + ":" + task
|
||||
@@ -260,8 +240,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def rundep_check(self, fn, recipename, task, dep, depname, dataCaches):
|
||||
# Return True if we should keep the dependency, False to drop it
|
||||
# We only manipulate the dependencies for packages not in the ignore
|
||||
# list
|
||||
# We only manipulate the dependencies for packages not in the whitelist
|
||||
if self.twl and not self.twl.search(recipename):
|
||||
# then process the actual dependencies
|
||||
if self.twl.search(depname):
|
||||
@@ -336,8 +315,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data = data + "./" + f.split("/./")[1]
|
||||
data = data + cs
|
||||
|
||||
if tid in self.taints:
|
||||
@@ -363,9 +340,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def save_unitaskhashes(self):
|
||||
self.unihash_cache.save(self.unitaskhashes)
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
self.unihash_cache.copyfile(targetdir)
|
||||
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
|
||||
tid = fn + ":" + task
|
||||
@@ -383,27 +357,22 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
data = {}
|
||||
data['task'] = task
|
||||
data['basehash_ignore_vars'] = self.basehash_ignore_vars
|
||||
data['taskhash_ignore_tasks'] = self.taskhash_ignore_tasks
|
||||
data['basewhitelist'] = self.basewhitelist
|
||||
data['taskwhitelist'] = self.taskwhitelist
|
||||
data['taskdeps'] = self.taskdeps[fn][task]
|
||||
data['basehash'] = self.basehash[tid]
|
||||
data['gendeps'] = {}
|
||||
data['varvals'] = {}
|
||||
data['varvals'][task] = self.lookupcache[fn][task]
|
||||
for dep in self.taskdeps[fn][task]:
|
||||
if dep in self.basehash_ignore_vars:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
data['gendeps'][dep] = self.gendeps[fn][dep]
|
||||
data['varvals'][dep] = self.lookupcache[fn][dep]
|
||||
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
data['file_checksum_values'].append((os.path.basename(f), cs))
|
||||
data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[tid]]
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
@@ -427,11 +396,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
|
||||
sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)
|
||||
|
||||
fd, tmpfile = bb.utils.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
try:
|
||||
with bb.compress.zstd.open(fd, "wt", encoding="utf-8", num_threads=1) as f:
|
||||
json.dump(data, f, sort_keys=True, separators=(",", ":"), cls=SetEncoder)
|
||||
f.flush()
|
||||
with os.fdopen(fd, "wb") as stream:
|
||||
p = pickle.dump(data, stream, -1)
|
||||
stream.flush()
|
||||
os.chmod(tmpfile, 0o664)
|
||||
bb.utils.rename(tmpfile, sigfile)
|
||||
except (OSError, IOError) as err:
|
||||
@@ -499,18 +468,6 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self._client = hashserv.create_client(self.server)
|
||||
return self._client
|
||||
|
||||
def reset(self, data):
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().reset(data)
|
||||
|
||||
def exit(self):
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().exit()
|
||||
|
||||
def get_stampfile_hash(self, tid):
|
||||
if tid in self.taskhash:
|
||||
# If a unique hash is reported, use it as the stampfile hash. This
|
||||
@@ -606,7 +563,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if self.setscenetasks and tid not in self.setscenetasks:
|
||||
return
|
||||
|
||||
# This can happen if locked sigs are in action. Detect and just exit
|
||||
# This can happen if locked sigs are in action. Detect and just abort
|
||||
if taskhash != self.taskhash[tid]:
|
||||
return
|
||||
|
||||
@@ -817,16 +774,6 @@ def clean_basepaths_list(a):
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
# Handled renamed fields
|
||||
def handle_renames(data):
|
||||
if 'basewhitelist' in data:
|
||||
data['basehash_ignore_vars'] = data['basewhitelist']
|
||||
del data['basewhitelist']
|
||||
if 'taskwhitelist' in data:
|
||||
data['taskhash_ignore_tasks'] = data['taskwhitelist']
|
||||
del data['taskwhitelist']
|
||||
|
||||
|
||||
def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
output = []
|
||||
|
||||
@@ -847,21 +794,20 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
with open(b, 'rb') as f:
|
||||
p2 = pickle.Unpickler(f)
|
||||
b_data = p2.load()
|
||||
|
||||
for data in [a_data, b_data]:
|
||||
handle_renames(data)
|
||||
|
||||
def dict_diff(a, b, ignored_vars=set()):
|
||||
def dict_diff(a, b, whitelist=set()):
|
||||
sa = set(a.keys())
|
||||
sb = set(b.keys())
|
||||
common = sa & sb
|
||||
changed = set()
|
||||
for i in common:
|
||||
if a[i] != b[i] and i not in ignored_vars:
|
||||
if a[i] != b[i] and i not in whitelist:
|
||||
changed.add(i)
|
||||
added = sb - sa
|
||||
removed = sa - sb
|
||||
@@ -869,11 +815,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
def file_checksums_diff(a, b):
|
||||
from collections import Counter
|
||||
|
||||
# Convert lists back to tuples
|
||||
a = [(f[0], f[1]) for f in a]
|
||||
b = [(f[0], f[1]) for f in b]
|
||||
|
||||
# Handle old siginfo format
|
||||
if isinstance(a, dict):
|
||||
a = [(os.path.basename(f), cs) for f, cs in a.items()]
|
||||
if isinstance(b, dict):
|
||||
b = [(os.path.basename(f), cs) for f, cs in b.items()]
|
||||
# Compare lists, ensuring we can handle duplicate filenames if they exist
|
||||
removedcount = Counter(a)
|
||||
removedcount.subtract(b)
|
||||
@@ -900,15 +846,15 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
removed = [x[0] for x in removed]
|
||||
return changed, added, removed
|
||||
|
||||
if 'basehash_ignore_vars' in a_data and a_data['basehash_ignore_vars'] != b_data['basehash_ignore_vars']:
|
||||
output.append(color_format("{color_title}basehash_ignore_vars changed{color_default} from '%s' to '%s'") % (a_data['basehash_ignore_vars'], b_data['basehash_ignore_vars']))
|
||||
if a_data['basehash_ignore_vars'] and b_data['basehash_ignore_vars']:
|
||||
output.append("changed items: %s" % a_data['basehash_ignore_vars'].symmetric_difference(b_data['basehash_ignore_vars']))
|
||||
if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
|
||||
output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
if a_data['basewhitelist'] and b_data['basewhitelist']:
|
||||
output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
|
||||
|
||||
if 'taskhash_ignore_tasks' in a_data and a_data['taskhash_ignore_tasks'] != b_data['taskhash_ignore_tasks']:
|
||||
output.append(color_format("{color_title}taskhash_ignore_tasks changed{color_default} from '%s' to '%s'") % (a_data['taskhash_ignore_tasks'], b_data['taskhash_ignore_tasks']))
|
||||
if a_data['taskhash_ignore_tasks'] and b_data['taskhash_ignore_tasks']:
|
||||
output.append("changed items: %s" % a_data['taskhash_ignore_tasks'].symmetric_difference(b_data['taskhash_ignore_tasks']))
|
||||
if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
|
||||
output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
if a_data['taskwhitelist'] and b_data['taskwhitelist']:
|
||||
output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
|
||||
|
||||
if a_data['taskdeps'] != b_data['taskdeps']:
|
||||
output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
@@ -916,7 +862,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
if a_data['basehash'] != b_data['basehash'] and not collapsed:
|
||||
output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basehash_ignore_vars'] & b_data['basehash_ignore_vars'])
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
@@ -956,9 +902,9 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
|
||||
|
||||
if not 'file_checksum_values' in a_data:
|
||||
a_data['file_checksum_values'] = []
|
||||
a_data['file_checksum_values'] = {}
|
||||
if not 'file_checksum_values' in b_data:
|
||||
b_data['file_checksum_values'] = []
|
||||
b_data['file_checksum_values'] = {}
|
||||
|
||||
changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
|
||||
if changed:
|
||||
@@ -998,8 +944,8 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
a = clean_basepaths(a_data['runtaskhashes'])
|
||||
b = clean_basepaths(b_data['runtaskhashes'])
|
||||
a = a_data['runtaskhashes']
|
||||
b = b_data['runtaskhashes']
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
@@ -1010,7 +956,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (dep, b[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in sorted(removed):
|
||||
adep_found = False
|
||||
@@ -1020,11 +966,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (dep, a[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (dep, a[dep], b[dep]))
|
||||
output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
@@ -1034,7 +980,6 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
break
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
@@ -1072,8 +1017,6 @@ def calc_taskhash(sigdata):
|
||||
|
||||
for c in sigdata['file_checksum_values']:
|
||||
if c[1]:
|
||||
if "./" in c[0]:
|
||||
data = data + c[0]
|
||||
data = data + c[1]
|
||||
|
||||
if 'taint' in sigdata:
|
||||
@@ -1088,33 +1031,32 @@ def calc_taskhash(sigdata):
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
|
||||
handle_renames(a_data)
|
||||
output.append("basewhitelist: %s" % (a_data['basewhitelist']))
|
||||
|
||||
output.append("basehash_ignore_vars: %s" % (sorted(a_data['basehash_ignore_vars'])))
|
||||
|
||||
output.append("taskhash_ignore_tasks: %s" % (sorted(a_data['taskhash_ignore_tasks'] or [])))
|
||||
output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
|
||||
|
||||
output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
|
||||
|
||||
output.append("basehash: %s" % (a_data['basehash']))
|
||||
|
||||
for dep in sorted(a_data['gendeps']):
|
||||
output.append("List of dependencies for variable %s is %s" % (dep, sorted(a_data['gendeps'][dep])))
|
||||
for dep in a_data['gendeps']:
|
||||
output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
|
||||
|
||||
for dep in sorted(a_data['varvals']):
|
||||
for dep in a_data['varvals']:
|
||||
output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
|
||||
|
||||
if 'runtaskdeps' in a_data:
|
||||
output.append("Tasks this task depends on: %s" % (sorted(a_data['runtaskdeps'])))
|
||||
output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
|
||||
|
||||
if 'file_checksum_values' in a_data:
|
||||
output.append("This task depends on the checksums of files: %s" % (sorted(a_data['file_checksum_values'])))
|
||||
output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
|
||||
|
||||
if 'runtaskhashes' in a_data:
|
||||
for dep in sorted(a_data['runtaskhashes']):
|
||||
for dep in a_data['runtaskhashes']:
|
||||
output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
|
||||
|
||||
if 'taint' in a_data:
|
||||
|
||||
@@ -39,7 +39,7 @@ class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, halt = True, skiplist = None, allowincomplete = False):
|
||||
def __init__(self, abort = True, skiplist = None, allowincomplete = False):
|
||||
self.build_targets = {}
|
||||
self.run_targets = {}
|
||||
|
||||
@@ -57,7 +57,7 @@ class TaskData:
|
||||
self.failed_rdeps = []
|
||||
self.failed_fns = []
|
||||
|
||||
self.halt = halt
|
||||
self.abort = abort
|
||||
self.allowincomplete = allowincomplete
|
||||
|
||||
self.skiplist = skiplist
|
||||
@@ -328,7 +328,7 @@ class TaskData:
|
||||
try:
|
||||
self.add_provider_internal(cfgData, dataCache, item)
|
||||
except bb.providers.NoProvider:
|
||||
if self.halt:
|
||||
if self.abort:
|
||||
raise
|
||||
self.remove_buildtarget(item)
|
||||
|
||||
@@ -451,12 +451,12 @@ class TaskData:
|
||||
for target in self.build_targets:
|
||||
if fn in self.build_targets[target]:
|
||||
self.build_targets[target].remove(fn)
|
||||
if not self.build_targets[target]:
|
||||
if len(self.build_targets[target]) == 0:
|
||||
self.remove_buildtarget(target, missing_list)
|
||||
for target in self.run_targets:
|
||||
if fn in self.run_targets[target]:
|
||||
self.run_targets[target].remove(fn)
|
||||
if not self.run_targets[target]:
|
||||
if len(self.run_targets[target]) == 0:
|
||||
self.remove_runtarget(target, missing_list)
|
||||
|
||||
def remove_buildtarget(self, target, missing_list=None):
|
||||
@@ -479,7 +479,7 @@ class TaskData:
|
||||
fn = tid.rsplit(":",1)[0]
|
||||
self.fail_fn(fn, missing_list)
|
||||
|
||||
if self.halt and target in self.external_targets:
|
||||
if self.abort and target in self.external_targets:
|
||||
logger.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list)
|
||||
raise bb.providers.NoProvider(target)
|
||||
|
||||
@@ -516,7 +516,7 @@ class TaskData:
|
||||
self.add_provider_internal(cfgData, dataCache, target)
|
||||
added = added + 1
|
||||
except bb.providers.NoProvider:
|
||||
if self.halt and target in self.external_targets and not self.allowincomplete:
|
||||
if self.abort and target in self.external_targets and not self.allowincomplete:
|
||||
raise
|
||||
if not self.allowincomplete:
|
||||
self.remove_buildtarget(target)
|
||||
|
||||
@@ -318,7 +318,7 @@ d.getVar(a(), False)
|
||||
"filename": "example.bb",
|
||||
})
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"]))
|
||||
|
||||
@@ -365,7 +365,7 @@ esac
|
||||
self.d.setVarFlags("FOO", {"func": True})
|
||||
self.setEmptyVars(execs)
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "inverted"] + execs))
|
||||
|
||||
@@ -375,7 +375,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "oe_libinstall")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -384,7 +384,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -399,7 +399,7 @@ esac
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([expr,
|
||||
'TESTVAR{anothervalue} = Set',
|
||||
@@ -412,24 +412,6 @@ esac
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
|
||||
|
||||
def test_contains_vardeps_excluded(self):
|
||||
# Check the ignored_vars option to build_dependencies is handled by contains functionality
|
||||
varval = '${TESTVAR2} ${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)}'
|
||||
self.d.setVar('ANOTHERVAR', varval)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
self.d.setVar('TESTVAR2', 'testval3')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(["TESTVAR"]), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
# Check the vardepsexclude flag is handled by contains functionality
|
||||
self.d.setVarFlag('ANOTHERVAR', 'vardepsexclude', 'TESTVAR')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
|
||||
<html>
|
||||
<head>
|
||||
<title>Index of /debian/pool/main/m/minicom</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Index of /debian/pool/main/m/minicom</h1>
|
||||
<table>
|
||||
<tr><th valign="top"><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th></tr>
|
||||
<tr><th colspan="4"><hr></th></tr>
|
||||
<tr><td valign="top"><img src="/icons/back.gif" alt="[PARENTDIR]"></td><td><a href="/debian/pool/main/m/">Parent Directory</a></td><td> </td><td align="right"> - </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1.debian.tar.xz">minicom_2.7-1+deb8u1.debian.tar.xz</a></td><td align="right">2017-04-24 08:22 </td><td align="right"> 14K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1.dsc">minicom_2.7-1+deb8u1.dsc</a></td><td align="right">2017-04-24 08:22 </td><td align="right">1.9K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1_amd64.deb">minicom_2.7-1+deb8u1_amd64.deb</a></td><td align="right">2017-04-25 21:10 </td><td align="right">257K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1_armel.deb">minicom_2.7-1+deb8u1_armel.deb</a></td><td align="right">2017-04-26 00:58 </td><td align="right">246K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1_armhf.deb">minicom_2.7-1+deb8u1_armhf.deb</a></td><td align="right">2017-04-26 00:58 </td><td align="right">245K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1+deb8u1_i386.deb">minicom_2.7-1+deb8u1_i386.deb</a></td><td align="right">2017-04-25 21:41 </td><td align="right">258K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1.debian.tar.xz">minicom_2.7-1.1.debian.tar.xz</a></td><td align="right">2017-04-22 09:34 </td><td align="right"> 14K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1.dsc">minicom_2.7-1.1.dsc</a></td><td align="right">2017-04-22 09:34 </td><td align="right">1.9K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_amd64.deb">minicom_2.7-1.1_amd64.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">261K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_arm64.deb">minicom_2.7-1.1_arm64.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">250K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_armel.deb">minicom_2.7-1.1_armel.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">255K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_armhf.deb">minicom_2.7-1.1_armhf.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">254K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_i386.deb">minicom_2.7-1.1_i386.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">266K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_mips.deb">minicom_2.7-1.1_mips.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">258K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_mips64el.deb">minicom_2.7-1.1_mips64el.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">259K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_mipsel.deb">minicom_2.7-1.1_mipsel.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">259K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_ppc64el.deb">minicom_2.7-1.1_ppc64el.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">253K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7-1.1_s390x.deb">minicom_2.7-1.1_s390x.deb</a></td><td align="right">2017-04-22 15:29 </td><td align="right">261K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_amd64.deb">minicom_2.7.1-1+b1_amd64.deb</a></td><td align="right">2018-05-06 08:14 </td><td align="right">262K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_arm64.deb">minicom_2.7.1-1+b1_arm64.deb</a></td><td align="right">2018-05-06 07:58 </td><td align="right">250K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_armel.deb">minicom_2.7.1-1+b1_armel.deb</a></td><td align="right">2018-05-06 08:45 </td><td align="right">253K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_armhf.deb">minicom_2.7.1-1+b1_armhf.deb</a></td><td align="right">2018-05-06 10:42 </td><td align="right">253K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_i386.deb">minicom_2.7.1-1+b1_i386.deb</a></td><td align="right">2018-05-06 08:55 </td><td align="right">266K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_mips.deb">minicom_2.7.1-1+b1_mips.deb</a></td><td align="right">2018-05-06 08:14 </td><td align="right">258K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_mipsel.deb">minicom_2.7.1-1+b1_mipsel.deb</a></td><td align="right">2018-05-06 12:13 </td><td align="right">259K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_ppc64el.deb">minicom_2.7.1-1+b1_ppc64el.deb</a></td><td align="right">2018-05-06 09:10 </td><td align="right">260K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b1_s390x.deb">minicom_2.7.1-1+b1_s390x.deb</a></td><td align="right">2018-05-06 08:14 </td><td align="right">257K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1+b2_mips64el.deb">minicom_2.7.1-1+b2_mips64el.deb</a></td><td align="right">2018-05-06 09:41 </td><td align="right">260K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1.debian.tar.xz">minicom_2.7.1-1.debian.tar.xz</a></td><td align="right">2017-08-13 15:40 </td><td align="right"> 14K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.7.1-1.dsc">minicom_2.7.1-1.dsc</a></td><td align="right">2017-08-13 15:40 </td><td align="right">1.8K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="minicom_2.7.1.orig.tar.gz">minicom_2.7.1.orig.tar.gz</a></td><td align="right">2017-08-13 15:40 </td><td align="right">855K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="minicom_2.7.orig.tar.gz">minicom_2.7.orig.tar.gz</a></td><td align="right">2014-01-01 09:36 </td><td align="right">843K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2.debian.tar.xz">minicom_2.8-2.debian.tar.xz</a></td><td align="right">2021-06-15 03:47 </td><td align="right"> 14K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2.dsc">minicom_2.8-2.dsc</a></td><td align="right">2021-06-15 03:47 </td><td align="right">1.8K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_amd64.deb">minicom_2.8-2_amd64.deb</a></td><td align="right">2021-06-15 03:58 </td><td align="right">280K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_arm64.deb">minicom_2.8-2_arm64.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">275K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_armel.deb">minicom_2.8-2_armel.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">271K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_armhf.deb">minicom_2.8-2_armhf.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">272K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_i386.deb">minicom_2.8-2_i386.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">285K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_mips64el.deb">minicom_2.8-2_mips64el.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">277K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_mipsel.deb">minicom_2.8-2_mipsel.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">278K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_ppc64el.deb">minicom_2.8-2_ppc64el.deb</a></td><td align="right">2021-06-15 04:13 </td><td align="right">286K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8-2_s390x.deb">minicom_2.8-2_s390x.deb</a></td><td align="right">2021-06-15 03:58 </td><td align="right">275K</td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="minicom_2.8.orig.tar.bz2">minicom_2.8.orig.tar.bz2</a></td><td align="right">2021-01-03 12:44 </td><td align="right">598K</td></tr>
|
||||
<tr><th colspan="4"><hr></th></tr>
|
||||
</table>
|
||||
<address>Apache Server at ftp.debian.org Port 80</address>
|
||||
</body></html>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -119,7 +119,7 @@ EXTRA_OECONF:class-target = "b"
|
||||
EXTRA_OECONF:append = " c"
|
||||
"""
|
||||
|
||||
def test_parse_overrides2(self):
|
||||
def test_parse_overrides(self):
|
||||
f = self.parsehelper(self.overridetest2)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
d.appendVar("EXTRA_OECONF", " d")
|
||||
@@ -164,7 +164,6 @@ python () {
|
||||
# become unset/disappear.
|
||||
#
|
||||
def test_parse_classextend_contamination(self):
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
cls = self.parsehelper(self.classextend_bbclass, suffix=".bbclass")
|
||||
#clsname = os.path.basename(cls.name).replace(".bbclass", "")
|
||||
self.classextend = self.classextend.replace("###CLASS###", cls.name)
|
||||
@@ -195,26 +194,3 @@ deltask ${EMPTYVAR}
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
# Second line of comment \\
|
||||
|
||||
"""
|
||||
def test_parse_broken_multiline_comment(self):
|
||||
f = self.parsehelper(self.broken_multiline_comment)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
comment_in_var = """
|
||||
VAR = " \\
|
||||
SOMEVAL \\
|
||||
# some comment \\
|
||||
SOMEOTHERVAL \\
|
||||
"
|
||||
"""
|
||||
def test_parse_comment_in_var(self):
|
||||
f = self.parsehelper(self.comment_in_var)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
@@ -12,6 +12,6 @@ STAMP = "${TMPDIR}/stamps/${PN}"
|
||||
T = "${TMPDIR}/workdir/${PN}/temp"
|
||||
BB_NUMBER_THREADS = "4"
|
||||
|
||||
BB_BASEHASH_IGNORE_VARS = "BB_CURRENT_MC BB_HASHSERVE TMPDIR TOPDIR SLOWTASKS SSTATEVALID FILE BB_CURRENTTASK"
|
||||
BB_HASHBASE_WHITELIST = "BB_CURRENT_MC BB_HASHSERVE TMPDIR TOPDIR SLOWTASKS SSTATEVALID FILE"
|
||||
|
||||
include conf/multiconfig/${BB_CURRENT_MC}.conf
|
||||
|
||||
@@ -29,14 +29,13 @@ class RunQueueTests(unittest.TestCase):
|
||||
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False):
|
||||
env = os.environ.copy()
|
||||
env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests"))
|
||||
env["BB_ENV_PASSTHROUGH_ADDITIONS"] = "SSTATEVALID SLOWTASKS TOPDIR"
|
||||
env["BB_ENV_EXTRAWHITE"] = "SSTATEVALID SLOWTASKS"
|
||||
env["SSTATEVALID"] = sstatevalid
|
||||
env["SLOWTASKS"] = slowtasks
|
||||
env["TOPDIR"] = builddir
|
||||
if extraenv:
|
||||
for k in extraenv:
|
||||
env[k] = extraenv[k]
|
||||
env["BB_ENV_PASSTHROUGH_ADDITIONS"] = env["BB_ENV_PASSTHROUGH_ADDITIONS"] + " " + k
|
||||
env["BB_ENV_EXTRAWHITE"] = env["BB_ENV_EXTRAWHITE"] + " " + k
|
||||
try:
|
||||
output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir)
|
||||
print(output)
|
||||
@@ -59,8 +58,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected = ['a1:' + x for x in self.alltasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_single_setscenevalid(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1"]
|
||||
@@ -71,8 +68,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:populate_sysroot', 'a1:build']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_intermediate_setscenevalid(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1"]
|
||||
@@ -82,8 +77,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:populate_sysroot_setscene', 'a1:build']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_intermediate_notcovered(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1"]
|
||||
@@ -93,8 +86,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_all_setscenevalid(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1"]
|
||||
@@ -104,8 +95,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_no_settasks(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1", "-c", "patch"]
|
||||
@@ -114,8 +103,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected = ['a1:fetch', 'a1:unpack', 'a1:patch']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_mix_covered_notcovered(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1:do_patch", "a1:do_populate_sysroot"]
|
||||
@@ -124,7 +111,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected = ['a1:fetch', 'a1:unpack', 'a1:patch', 'a1:populate_sysroot_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
# Test targets with intermediate setscene tasks alongside a target with no intermediate setscene tasks
|
||||
def test_mixed_direct_tasks_setscene_tasks(self):
|
||||
@@ -136,8 +122,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
# This test slows down the execution of do_package_setscene until after other real tasks have
|
||||
# started running which tests for a bug where tasks were being lost from the buildable list of real
|
||||
# tasks if they weren't in tasks_covered or tasks_notcovered
|
||||
@@ -152,14 +136,12 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:populate_sysroot', 'a1:build']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_setscene_ignore_tasks(self):
|
||||
def test_setscenewhitelist(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "a1"]
|
||||
extraenv = {
|
||||
"BB_SETSCENE_ENFORCE" : "1",
|
||||
"BB_SETSCENE_ENFORCE_IGNORE_TASKS" : "a1:do_package_write_rpm a1:do_build"
|
||||
"BB_SETSCENE_ENFORCE_WHITELIST" : "a1:do_package_write_rpm a1:do_build"
|
||||
}
|
||||
sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_populate_lic a1:do_populate_sysroot"
|
||||
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
|
||||
@@ -167,8 +149,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:populate_sysroot_setscene', 'a1:package_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
# Tests for problems with dependencies between setscene tasks
|
||||
def test_no_setscenevalid_harddeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
@@ -182,8 +162,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'd1:populate_sysroot', 'd1:build']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_no_setscenevalid_withdeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "b1"]
|
||||
@@ -194,8 +172,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected.remove('a1:package_qa')
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_single_a1_setscenevalid_withdeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "b1"]
|
||||
@@ -206,8 +182,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'a1:populate_sysroot'] + ['b1:' + x for x in self.alltasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_single_b1_setscenevalid_withdeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "b1"]
|
||||
@@ -219,8 +193,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected.remove('b1:package')
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_intermediate_setscenevalid_withdeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "b1"]
|
||||
@@ -231,8 +203,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected.remove('b1:package')
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_all_setscenevalid_withdeps(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
cmd = ["bitbake", "b1"]
|
||||
@@ -243,8 +213,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene']
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_multiconfig_setscene_optimise(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
@@ -264,8 +232,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
expected.remove(x)
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_multiconfig_bbmask(self):
|
||||
# This test validates that multiconfigs can independently mask off
|
||||
# recipes they do not want with BBMASK. It works by having recipes
|
||||
@@ -282,8 +248,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
cmd = ["bitbake", "mc:mc-1:fails-mc2", "mc:mc_2:fails-mc1"]
|
||||
self.run_bitbakecmd(cmd, tempdir, "", extraenv=extraenv)
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_multiconfig_mcdepends(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
@@ -314,8 +278,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
["mc_2:a1:%s" % t for t in rerun_tasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
|
||||
def test_hashserv_single(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
@@ -341,6 +304,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
|
||||
def test_hashserv_double(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
@@ -365,6 +329,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
|
||||
def test_hashserv_multiple_setscene(self):
|
||||
# Runs e1:do_package_setscene twice
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
@@ -396,6 +361,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
|
||||
def shutdown(self, tempdir):
|
||||
# Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup
|
||||
while (os.path.exists(tempdir + "/hashserve.sock") or os.path.exists(tempdir + "cache/hashserv.db-wal") or os.path.exists(tempdir + "/bitbake.lock")):
|
||||
while (os.path.exists(tempdir + "/hashserve.sock") or os.path.exists(tempdir + "cache/hashserv.db-wal")):
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
|
||||
@@ -448,7 +448,7 @@ class Tinfoil:
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
def run_command(self, command, *params, handle_events=True):
|
||||
def run_command(self, command, *params):
|
||||
"""
|
||||
Run a command on the server (as implemented in bb.command).
|
||||
Note that there are two types of command - synchronous and
|
||||
@@ -468,7 +468,7 @@ class Tinfoil:
|
||||
try:
|
||||
result = self.server_connection.connection.runCommand(commandline)
|
||||
finally:
|
||||
while handle_events:
|
||||
while True:
|
||||
event = self.wait_event()
|
||||
if not event:
|
||||
break
|
||||
@@ -493,7 +493,7 @@ class Tinfoil:
|
||||
Wait for an event from the server for the specified time.
|
||||
A timeout of 0 means don't wait if there are no events in the queue.
|
||||
Returns the next event in the queue or None if the timeout was
|
||||
reached. Note that in order to receive any events you will
|
||||
reached. Note that in order to recieve any events you will
|
||||
first need to set the internal event mask using set_event_mask()
|
||||
(otherwise whatever event mask the UI set up will be in effect).
|
||||
"""
|
||||
@@ -761,7 +761,7 @@ class Tinfoil:
|
||||
if parseprogress:
|
||||
parseprogress.update(event.progress)
|
||||
else:
|
||||
bb.warn("Got ProcessProgress event for something that never started?")
|
||||
bb.warn("Got ProcessProgress event for someting that never started?")
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessFinished):
|
||||
if self.quiet > 1:
|
||||
|
||||
@@ -45,7 +45,7 @@ from pprint import pformat
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from django.db import transaction
|
||||
from django.db import transaction, connection
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
@@ -227,12 +227,6 @@ class ORMWrapper(object):
|
||||
build.completed_on = timezone.now()
|
||||
build.outcome = outcome
|
||||
build.save()
|
||||
|
||||
# We force a sync point here to force the outcome status commit,
|
||||
# which resolves a race condition with the build completion takedown
|
||||
transaction.set_autocommit(True)
|
||||
transaction.set_autocommit(False)
|
||||
|
||||
signal_runbuilds()
|
||||
|
||||
def update_target_set_license_manifest(self, target, license_manifest_path):
|
||||
@@ -489,14 +483,14 @@ class ORMWrapper(object):
|
||||
|
||||
# we already created the root directory, so ignore any
|
||||
# entry for it
|
||||
if not path:
|
||||
if len(path) == 0:
|
||||
continue
|
||||
|
||||
parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1])
|
||||
if not parent_path:
|
||||
if len(parent_path) == 0:
|
||||
parent_path = "/"
|
||||
parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
Target_File.objects.create(
|
||||
tf_obj = Target_File.objects.create(
|
||||
target = target_obj,
|
||||
path = path,
|
||||
size = size,
|
||||
@@ -561,7 +555,7 @@ class ORMWrapper(object):
|
||||
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
|
||||
Target_File.objects.create(
|
||||
tf_obj = Target_File.objects.create(
|
||||
target = target_obj,
|
||||
path = path,
|
||||
size = size,
|
||||
@@ -577,7 +571,7 @@ class ORMWrapper(object):
|
||||
assert isinstance(build_obj, Build)
|
||||
assert isinstance(target_obj, Target)
|
||||
|
||||
errormsg = []
|
||||
errormsg = ""
|
||||
for p in packagedict:
|
||||
# Search name swtiches round the installed name vs package name
|
||||
# by default installed name == package name
|
||||
@@ -639,10 +633,10 @@ class ORMWrapper(object):
|
||||
packagefile_objects.append(Package_File( package = packagedict[p]['object'],
|
||||
path = targetpath,
|
||||
size = targetfilesize))
|
||||
if packagefile_objects:
|
||||
if len(packagefile_objects):
|
||||
Package_File.objects.bulk_create(packagefile_objects)
|
||||
except KeyError as e:
|
||||
errormsg.append(" stpi: Key error, package %s key %s \n" % (p, e))
|
||||
errormsg += " stpi: Key error, package %s key %s \n" % ( p, e )
|
||||
|
||||
# save disk installed size
|
||||
packagedict[p]['object'].installed_size = packagedict[p]['size']
|
||||
@@ -679,13 +673,13 @@ class ORMWrapper(object):
|
||||
logger.warning("Could not add dependency to the package %s "
|
||||
"because %s is an unknown package", p, px)
|
||||
|
||||
if packagedeps_objs:
|
||||
if len(packagedeps_objs) > 0:
|
||||
Package_Dependency.objects.bulk_create(packagedeps_objs)
|
||||
else:
|
||||
logger.info("No package dependencies created")
|
||||
|
||||
if errormsg:
|
||||
logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", "".join(errormsg))
|
||||
if len(errormsg) > 0:
|
||||
logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", errormsg)
|
||||
|
||||
def save_target_image_file_information(self, target_obj, file_name, file_size):
|
||||
Target_Image_File.objects.create(target=target_obj,
|
||||
@@ -773,7 +767,7 @@ class ORMWrapper(object):
|
||||
packagefile_objects.append(Package_File( package = bp_object,
|
||||
path = path,
|
||||
size = package_info['FILES_INFO'][path] ))
|
||||
if packagefile_objects:
|
||||
if len(packagefile_objects):
|
||||
Package_File.objects.bulk_create(packagefile_objects)
|
||||
|
||||
def _po_byname(p):
|
||||
@@ -815,7 +809,7 @@ class ORMWrapper(object):
|
||||
packagedeps_objs.append(Package_Dependency( package = bp_object,
|
||||
depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RCONFLICTS))
|
||||
|
||||
if packagedeps_objs:
|
||||
if len(packagedeps_objs) > 0:
|
||||
Package_Dependency.objects.bulk_create(packagedeps_objs)
|
||||
|
||||
return bp_object
|
||||
@@ -832,7 +826,7 @@ class ORMWrapper(object):
|
||||
desc = vardump[root_var]['doc']
|
||||
if desc is None:
|
||||
desc = ''
|
||||
if desc:
|
||||
if len(desc):
|
||||
HelpText.objects.get_or_create(build=build_obj,
|
||||
area=HelpText.VARIABLE,
|
||||
key=k, text=desc)
|
||||
@@ -852,7 +846,7 @@ class ORMWrapper(object):
|
||||
file_name = vh['file'],
|
||||
line_number = vh['line'],
|
||||
operation = vh['op']))
|
||||
if varhist_objects:
|
||||
if len(varhist_objects):
|
||||
VariableHistory.objects.bulk_create(varhist_objects)
|
||||
|
||||
|
||||
@@ -899,6 +893,9 @@ class BuildInfoHelper(object):
|
||||
self.task_order = 0
|
||||
self.autocommit_step = 1
|
||||
self.server = server
|
||||
# we use manual transactions if the database doesn't autocommit on us
|
||||
if not connection.features.autocommits_when_autocommit_is_off:
|
||||
transaction.set_autocommit(False)
|
||||
self.orm_wrapper = ORMWrapper()
|
||||
self.has_build_history = has_build_history
|
||||
self.tmp_dir = self.server.runCommand(["getVariable", "TMPDIR"])[0]
|
||||
@@ -1062,6 +1059,27 @@ class BuildInfoHelper(object):
|
||||
|
||||
return recipe_info
|
||||
|
||||
def _get_path_information(self, task_object):
|
||||
self._ensure_build()
|
||||
|
||||
assert isinstance(task_object, Task)
|
||||
build_stats_format = "{tmpdir}/buildstats/{buildname}/{package}/"
|
||||
build_stats_path = []
|
||||
|
||||
for t in self.internal_state['targets']:
|
||||
buildname = self.internal_state['build'].build_name
|
||||
pe, pv = task_object.recipe.version.split(":",1)
|
||||
if len(pe) > 0:
|
||||
package = task_object.recipe.name + "-" + pe + "_" + pv
|
||||
else:
|
||||
package = task_object.recipe.name + "-" + pv
|
||||
|
||||
build_stats_path.append(build_stats_format.format(tmpdir=self.tmp_dir,
|
||||
buildname=buildname,
|
||||
package=package))
|
||||
|
||||
return build_stats_path
|
||||
|
||||
|
||||
################################
|
||||
## external available methods to store information
|
||||
@@ -1295,11 +1313,12 @@ class BuildInfoHelper(object):
|
||||
task_information['outcome'] = Task.OUTCOME_FAILED
|
||||
del self.internal_state['taskdata'][identifier]
|
||||
|
||||
# we force a sync point here, to get the progress bar to show
|
||||
if self.autocommit_step % 3 == 0:
|
||||
transaction.set_autocommit(True)
|
||||
transaction.set_autocommit(False)
|
||||
self.autocommit_step += 1
|
||||
if not connection.features.autocommits_when_autocommit_is_off:
|
||||
# we force a sync point here, to get the progress bar to show
|
||||
if self.autocommit_step % 3 == 0:
|
||||
transaction.set_autocommit(True)
|
||||
transaction.set_autocommit(False)
|
||||
self.autocommit_step += 1
|
||||
|
||||
self.orm_wrapper.get_update_task_object(task_information, True) # must exist
|
||||
|
||||
@@ -1385,7 +1404,7 @@ class BuildInfoHelper(object):
|
||||
assert 'pn' in event._depgraph
|
||||
assert 'tdepends' in event._depgraph
|
||||
|
||||
errormsg = []
|
||||
errormsg = ""
|
||||
|
||||
# save layer version priorities
|
||||
if 'layer-priorities' in event._depgraph.keys():
|
||||
@@ -1477,7 +1496,7 @@ class BuildInfoHelper(object):
|
||||
elif dep in self.internal_state['recipes']:
|
||||
dependency = self.internal_state['recipes'][dep]
|
||||
else:
|
||||
errormsg.append(" stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep))
|
||||
errormsg += " stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep)
|
||||
continue
|
||||
recipe_dep = Recipe_Dependency(recipe=target,
|
||||
depends_on=dependency,
|
||||
@@ -1518,8 +1537,8 @@ class BuildInfoHelper(object):
|
||||
taskdeps_objects.append(Task_Dependency( task = target, depends_on = dep ))
|
||||
Task_Dependency.objects.bulk_create(taskdeps_objects)
|
||||
|
||||
if errormsg:
|
||||
logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", "".join(errormsg))
|
||||
if len(errormsg) > 0:
|
||||
logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", errormsg)
|
||||
|
||||
|
||||
def store_build_package_information(self, event):
|
||||
@@ -1599,7 +1618,7 @@ class BuildInfoHelper(object):
|
||||
|
||||
if 'backlog' in self.internal_state:
|
||||
# if we have a backlog of events, do our best to save them here
|
||||
if self.internal_state['backlog']:
|
||||
if len(self.internal_state['backlog']):
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
logger.debug("buildinfohelper: Saving stored event %s "
|
||||
% tempevent)
|
||||
@@ -1971,6 +1990,8 @@ class BuildInfoHelper(object):
|
||||
# Do not skip command line build events
|
||||
self.store_log_event(tempevent,False)
|
||||
|
||||
if not connection.features.autocommits_when_autocommit_is_off:
|
||||
transaction.set_autocommit(True)
|
||||
|
||||
# unset the brbe; this is to prevent subsequent command-line builds
|
||||
# being incorrectly attached to the previous Toaster-triggered build;
|
||||
|
||||
@@ -25,7 +25,7 @@ from itertools import groupby
|
||||
|
||||
from bb.ui import uihelper
|
||||
|
||||
featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS, bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
|
||||
featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS]
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
interactive = sys.stdout.isatty()
|
||||
@@ -228,9 +228,7 @@ class TerminalFilter(object):
|
||||
|
||||
def keepAlive(self, t):
|
||||
if not self.cuu:
|
||||
print("Bitbake still alive (no events for %ds). Active tasks:" % t)
|
||||
for t in self.helper.running_tasks:
|
||||
print(t)
|
||||
print("Bitbake still alive (%ds)" % t)
|
||||
sys.stdout.flush()
|
||||
|
||||
def updateFooter(self):
|
||||
@@ -252,68 +250,58 @@ class TerminalFilter(object):
|
||||
return
|
||||
tasks = []
|
||||
for t in runningpids:
|
||||
start_time = activetasks[t].get("starttime", None)
|
||||
if start_time:
|
||||
msg = "%s - %s (pid %s)" % (activetasks[t]["title"], self.elapsed(currenttime - start_time), activetasks[t]["pid"])
|
||||
else:
|
||||
msg = "%s (pid %s)" % (activetasks[t]["title"], activetasks[t]["pid"])
|
||||
progress = activetasks[t].get("progress", None)
|
||||
if progress is not None:
|
||||
pbar = activetasks[t].get("progressbar", None)
|
||||
rate = activetasks[t].get("rate", None)
|
||||
start_time = activetasks[t].get("starttime", None)
|
||||
if not pbar or pbar.bouncing != (progress < 0):
|
||||
if progress < 0:
|
||||
pbar = BBProgress("0: %s" % msg, 100, widgets=[' ', progressbar.BouncingSlider(), ''], extrapos=3, resize_handler=self.sigwinch_handle)
|
||||
pbar = BBProgress("0: %s (pid %s)" % (activetasks[t]["title"], activetasks[t]["pid"]), 100, widgets=[' ', progressbar.BouncingSlider(), ''], extrapos=3, resize_handler=self.sigwinch_handle)
|
||||
pbar.bouncing = True
|
||||
else:
|
||||
pbar = BBProgress("0: %s" % msg, 100, widgets=[' ', progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=5, resize_handler=self.sigwinch_handle)
|
||||
pbar = BBProgress("0: %s (pid %s)" % (activetasks[t]["title"], activetasks[t]["pid"]), 100, widgets=[' ', progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=5, resize_handler=self.sigwinch_handle)
|
||||
pbar.bouncing = False
|
||||
activetasks[t]["progressbar"] = pbar
|
||||
tasks.append((pbar, msg, progress, rate, start_time))
|
||||
tasks.append((pbar, progress, rate, start_time))
|
||||
else:
|
||||
tasks.append(msg)
|
||||
start_time = activetasks[t].get("starttime", None)
|
||||
if start_time:
|
||||
tasks.append("%s - %s (pid %s)" % (activetasks[t]["title"], self.elapsed(currenttime - start_time), activetasks[t]["pid"]))
|
||||
else:
|
||||
tasks.append("%s (pid %s)" % (activetasks[t]["title"], activetasks[t]["pid"]))
|
||||
|
||||
if self.main.shutdown:
|
||||
content = pluralise("Waiting for %s running task to finish",
|
||||
"Waiting for %s running tasks to finish", len(activetasks))
|
||||
if not self.quiet:
|
||||
content += ':'
|
||||
content = "Waiting for %s running tasks to finish:" % len(activetasks)
|
||||
print(content)
|
||||
else:
|
||||
scene_tasks = "%s of %s" % (self.helper.setscene_current, self.helper.setscene_total)
|
||||
cur_tasks = "%s of %s" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
|
||||
content = ''
|
||||
if not self.quiet:
|
||||
msg = "Setscene tasks: %s" % scene_tasks
|
||||
content += msg + "\n"
|
||||
print(msg)
|
||||
|
||||
if self.quiet:
|
||||
msg = "Running tasks (%s, %s)" % (scene_tasks, cur_tasks)
|
||||
content = "Running tasks (%s of %s/%s of %s)" % (self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
elif not len(activetasks):
|
||||
msg = "No currently running tasks (%s)" % cur_tasks
|
||||
content = "No currently running tasks (%s of %s/%s of %s)" % (self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
else:
|
||||
msg = "Currently %2s running tasks (%s)" % (len(activetasks), cur_tasks)
|
||||
content = "Currently %2s running tasks (%s of %s/%s of %s)" % (len(activetasks), self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
maxtask = self.helper.tasknumber_total
|
||||
if not self.main_progress or self.main_progress.maxval != maxtask:
|
||||
widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar()]
|
||||
self.main_progress = BBProgress("Running tasks", maxtask, widgets=widgets, resize_handler=self.sigwinch_handle)
|
||||
self.main_progress.start(False)
|
||||
self.main_progress.setmessage(msg)
|
||||
progress = max(0, self.helper.tasknumber_current - 1)
|
||||
content += self.main_progress.update(progress)
|
||||
self.main_progress.setmessage(content)
|
||||
progress = self.helper.tasknumber_current - 1
|
||||
if progress < 0:
|
||||
progress = 0
|
||||
content = self.main_progress.update(progress)
|
||||
print('')
|
||||
lines = self.getlines(content)
|
||||
if not self.quiet:
|
||||
for tasknum, task in enumerate(tasks[:(self.rows - 1 - lines)]):
|
||||
lines = 1 + int(len(content) / (self.columns + 1))
|
||||
if self.quiet == 0:
|
||||
for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
|
||||
if isinstance(task, tuple):
|
||||
pbar, msg, progress, rate, start_time = task
|
||||
pbar, progress, rate, start_time = task
|
||||
if not pbar.start_time:
|
||||
pbar.start(False)
|
||||
if start_time:
|
||||
pbar.start_time = start_time
|
||||
pbar.setmessage('%s: %s' % (tasknum, msg))
|
||||
pbar.setmessage('%s:%s' % (tasknum, pbar.msg.split(':', 1)[1]))
|
||||
pbar.setextra(rate)
|
||||
if progress > -1:
|
||||
content = pbar.update(progress)
|
||||
@@ -323,17 +311,11 @@ class TerminalFilter(object):
|
||||
else:
|
||||
content = "%s: %s" % (tasknum, task)
|
||||
print(content)
|
||||
lines = lines + self.getlines(content)
|
||||
lines = lines + 1 + int(len(content) / (self.columns + 1))
|
||||
self.footer_present = lines
|
||||
self.lastpids = runningpids[:]
|
||||
self.lastcount = self.helper.tasknumber_current
|
||||
|
||||
def getlines(self, content):
|
||||
lines = 0
|
||||
for line in content.split("\n"):
|
||||
lines = lines + 1 + int(len(line) / (self.columns + 1))
|
||||
return lines
|
||||
|
||||
def finish(self):
|
||||
if self.stdinbackup:
|
||||
fd = sys.stdin.fileno()
|
||||
@@ -623,8 +605,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
warnings = 0
|
||||
taskfailures = []
|
||||
|
||||
printintervaldelta = 10 * 60 # 10 minutes
|
||||
printinterval = printintervaldelta
|
||||
printinterval = 5000
|
||||
lastprint = time.time()
|
||||
|
||||
termfilter = tf(main, helper, console_handlers, params.options.quiet)
|
||||
@@ -634,7 +615,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
try:
|
||||
if (lastprint + printinterval) <= time.time():
|
||||
termfilter.keepAlive(printinterval)
|
||||
printinterval += printintervaldelta
|
||||
printinterval += 5000
|
||||
event = eventHandler.waitEvent(0)
|
||||
if event is None:
|
||||
if main.shutdown > 1:
|
||||
@@ -665,8 +646,8 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if isinstance(event, logging.LogRecord):
|
||||
lastprint = time.time()
|
||||
printinterval = printintervaldelta
|
||||
if event.levelno >= bb.msg.BBLogFormatter.ERRORONCE:
|
||||
printinterval = 5000
|
||||
if event.levelno >= bb.msg.BBLogFormatter.ERROR:
|
||||
errors = errors + 1
|
||||
return_value = 1
|
||||
elif event.levelno == bb.msg.BBLogFormatter.WARNING:
|
||||
@@ -680,10 +661,10 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
continue
|
||||
|
||||
# Prefix task messages with recipe/task
|
||||
if event.taskpid in helper.pidmap and event.levelno not in [bb.msg.BBLogFormatter.PLAIN, bb.msg.BBLogFormatter.WARNONCE, bb.msg.BBLogFormatter.ERRORONCE]:
|
||||
if event.taskpid in helper.pidmap and event.levelno != bb.msg.BBLogFormatter.PLAIN:
|
||||
taskinfo = helper.running_tasks[helper.pidmap[event.taskpid]]
|
||||
event.msg = taskinfo['title'] + ': ' + event.msg
|
||||
if hasattr(event, 'fn') and event.levelno not in [bb.msg.BBLogFormatter.WARNONCE, bb.msg.BBLogFormatter.ERRORONCE]:
|
||||
if hasattr(event, 'fn'):
|
||||
event.msg = event.fn + ': ' + event.msg
|
||||
logging.getLogger(event.name).handle(event)
|
||||
continue
|
||||
@@ -877,6 +858,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
state_force_shutdown()
|
||||
|
||||
main.shutdown = main.shutdown + 1
|
||||
pass
|
||||
except Exception as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
@@ -893,11 +875,11 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
for failure in taskfailures:
|
||||
summary += "\n %s" % failure
|
||||
if warnings:
|
||||
summary += pluralise("\nSummary: There was %s WARNING message.",
|
||||
"\nSummary: There were %s WARNING messages.", warnings)
|
||||
summary += pluralise("\nSummary: There was %s WARNING message shown.",
|
||||
"\nSummary: There were %s WARNING messages shown.", warnings)
|
||||
if return_value and errors:
|
||||
summary += pluralise("\nSummary: There was %s ERROR message, returning a non-zero exit code.",
|
||||
"\nSummary: There were %s ERROR messages, returning a non-zero exit code.", errors)
|
||||
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
|
||||
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
|
||||
if summary and params.options.quiet == 0:
|
||||
print(summary)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ class BBUIEventQueue:
|
||||
for count_tries in range(5):
|
||||
ret = self.BBServer.registerEventHandler(self.host, self.port)
|
||||
|
||||
if isinstance(ret, collections.abc.Iterable):
|
||||
if isinstance(ret, collections.Iterable):
|
||||
self.EventHandle, error = ret
|
||||
else:
|
||||
self.EventHandle = ret
|
||||
@@ -73,13 +73,13 @@ class BBUIEventQueue:
|
||||
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if not self.eventQueue:
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if not self.eventQueue:
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
|
||||
@@ -16,8 +16,7 @@ import bb.msg
|
||||
import multiprocessing
|
||||
import fcntl
|
||||
import importlib
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
from importlib import machinery
|
||||
import itertools
|
||||
import subprocess
|
||||
import glob
|
||||
@@ -27,11 +26,6 @@ import errno
|
||||
import signal
|
||||
import collections
|
||||
import copy
|
||||
import ctypes
|
||||
import random
|
||||
import socket
|
||||
import struct
|
||||
import tempfile
|
||||
from subprocess import getstatusoutput
|
||||
from contextlib import contextmanager
|
||||
from ctypes import cdll
|
||||
@@ -257,7 +251,7 @@ def explode_dep_versions(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
|
||||
skip null value and items appeared in dependency string multiple times
|
||||
skip null value and items appeared in dependancy string multiple times
|
||||
and return a dictionary of dependencies and versions.
|
||||
"""
|
||||
r = explode_dep_versions2(s)
|
||||
@@ -385,7 +379,7 @@ def _print_exception(t, value, tb, realfile, text, context):
|
||||
|
||||
error.append("Exception: %s" % ''.join(exception))
|
||||
|
||||
# If the exception is from spawning a task, let's be helpful and display
|
||||
# If the exception is from spwaning a task, let's be helpful and display
|
||||
# the output (which hopefully includes stderr).
|
||||
if isinstance(value, subprocess.CalledProcessError) and value.output:
|
||||
error.append("Subprocess output:")
|
||||
@@ -406,7 +400,7 @@ def better_exec(code, context, text = None, realfile = "<code>", pythonexception
|
||||
code = better_compile(code, realfile, realfile)
|
||||
try:
|
||||
exec(code, get_context(), context)
|
||||
except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError, bb.process.ExecutionError):
|
||||
except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError):
|
||||
# Error already shown so passthrough, no need for traceback
|
||||
raise
|
||||
except Exception as e:
|
||||
@@ -433,14 +427,12 @@ def better_eval(source, locals, extraglobals = None):
|
||||
return eval(source, ctx, locals)
|
||||
|
||||
@contextmanager
|
||||
def fileslocked(files, *args, **kwargs):
|
||||
def fileslocked(files):
|
||||
"""Context manager for locking and unlocking file locks."""
|
||||
locks = []
|
||||
if files:
|
||||
for lockfile in files:
|
||||
l = bb.utils.lockfile(lockfile, *args, **kwargs)
|
||||
if l is not None:
|
||||
locks.append(l)
|
||||
locks.append(bb.utils.lockfile(lockfile))
|
||||
|
||||
try:
|
||||
yield
|
||||
@@ -459,16 +451,13 @@ def lockfile(name, shared=False, retry=True, block=False):
|
||||
consider the possibility of sending a signal to the process to break
|
||||
out - at which point you want block=True rather than retry=True.
|
||||
"""
|
||||
basename = os.path.basename(name)
|
||||
if len(basename) > 255:
|
||||
root, ext = os.path.splitext(basename)
|
||||
basename = root[:255 - len(ext)] + ext
|
||||
if len(name) > 255:
|
||||
root, ext = os.path.splitext(name)
|
||||
name = root[:255 - len(ext)] + ext
|
||||
|
||||
dirname = os.path.dirname(name)
|
||||
mkdirhier(dirname)
|
||||
|
||||
name = os.path.join(dirname, basename)
|
||||
|
||||
if not os.access(dirname, os.W_OK):
|
||||
logger.error("Unable to acquire lock '%s', directory is not writable",
|
||||
name)
|
||||
@@ -547,7 +536,7 @@ def md5_file(filename):
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
import hashlib
|
||||
return _hasher(hashlib.new('MD5', usedforsecurity=False), filename)
|
||||
return _hasher(hashlib.md5(), filename)
|
||||
|
||||
def sha256_file(filename):
|
||||
"""
|
||||
@@ -598,8 +587,8 @@ def preserved_envvars():
|
||||
v = [
|
||||
'BBPATH',
|
||||
'BB_PRESERVE_ENV',
|
||||
'BB_ENV_PASSTHROUGH',
|
||||
'BB_ENV_PASSTHROUGH_ADDITIONS',
|
||||
'BB_ENV_WHITELIST',
|
||||
'BB_ENV_EXTRAWHITE',
|
||||
]
|
||||
return v + preserved_envvars_exported()
|
||||
|
||||
@@ -630,21 +619,21 @@ def filter_environment(good_vars):
|
||||
|
||||
def approved_variables():
|
||||
"""
|
||||
Determine and return the list of variables which are approved
|
||||
Determine and return the list of whitelisted variables which are approved
|
||||
to remain in the environment.
|
||||
"""
|
||||
if 'BB_PRESERVE_ENV' in os.environ:
|
||||
return os.environ.keys()
|
||||
approved = []
|
||||
if 'BB_ENV_PASSTHROUGH' in os.environ:
|
||||
approved = os.environ['BB_ENV_PASSTHROUGH'].split()
|
||||
approved.extend(['BB_ENV_PASSTHROUGH'])
|
||||
if 'BB_ENV_WHITELIST' in os.environ:
|
||||
approved = os.environ['BB_ENV_WHITELIST'].split()
|
||||
approved.extend(['BB_ENV_WHITELIST'])
|
||||
else:
|
||||
approved = preserved_envvars()
|
||||
if 'BB_ENV_PASSTHROUGH_ADDITIONS' in os.environ:
|
||||
approved.extend(os.environ['BB_ENV_PASSTHROUGH_ADDITIONS'].split())
|
||||
if 'BB_ENV_PASSTHROUGH_ADDITIONS' not in approved:
|
||||
approved.extend(['BB_ENV_PASSTHROUGH_ADDITIONS'])
|
||||
if 'BB_ENV_EXTRAWHITE' in os.environ:
|
||||
approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
|
||||
if 'BB_ENV_EXTRAWHITE' not in approved:
|
||||
approved.extend(['BB_ENV_EXTRAWHITE'])
|
||||
return approved
|
||||
|
||||
def clean_environment():
|
||||
@@ -698,8 +687,8 @@ def remove(path, recurse=False, ionice=False):
|
||||
return
|
||||
if recurse:
|
||||
for name in glob.glob(path):
|
||||
if _check_unsafe_delete_path(name):
|
||||
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name)
|
||||
if _check_unsafe_delete_path(path):
|
||||
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
|
||||
# shutil.rmtree(name) would be ideal but its too slow
|
||||
cmd = []
|
||||
if ionice:
|
||||
@@ -757,7 +746,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: Stating source file failed...", e)
|
||||
print("movefile: Stating source file failed...", e)
|
||||
return None
|
||||
|
||||
destexists = 1
|
||||
@@ -785,7 +774,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
os.unlink(src)
|
||||
return os.lstat(dest)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e)
|
||||
print("movefile: failed to properly create symlink:", dest, "->", target, e)
|
||||
return None
|
||||
|
||||
renamefailed = 1
|
||||
@@ -802,7 +791,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
except Exception as e:
|
||||
if e.errno != errno.EXDEV:
|
||||
# Some random error.
|
||||
logger.warning("movefile: Failed to move", src, "to", dest, e)
|
||||
print("movefile: Failed to move", src, "to", dest, e)
|
||||
return None
|
||||
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
|
||||
|
||||
@@ -814,13 +803,13 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
bb.utils.rename(destpath + "#new", destpath)
|
||||
didcopy = 1
|
||||
except Exception as e:
|
||||
logger.warning('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
print('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
return None
|
||||
else:
|
||||
#we don't yet handle special, so we need to fall back to /bin/mv
|
||||
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
|
||||
if a[0] != 0:
|
||||
logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
|
||||
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
|
||||
return None # failure
|
||||
try:
|
||||
if didcopy:
|
||||
@@ -828,7 +817,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.unlink(src)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
print("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
return None
|
||||
|
||||
if newmtime:
|
||||
@@ -1605,74 +1594,6 @@ def set_process_name(name):
|
||||
except:
|
||||
pass
|
||||
|
||||
def enable_loopback_networking():
|
||||
# From bits/ioctls.h
|
||||
SIOCGIFFLAGS = 0x8913
|
||||
SIOCSIFFLAGS = 0x8914
|
||||
SIOCSIFADDR = 0x8916
|
||||
SIOCSIFNETMASK = 0x891C
|
||||
|
||||
# if.h
|
||||
IFF_UP = 0x1
|
||||
IFF_RUNNING = 0x40
|
||||
|
||||
# bits/socket.h
|
||||
AF_INET = 2
|
||||
|
||||
# char ifr_name[IFNAMSIZ=16]
|
||||
ifr_name = struct.pack("@16s", b"lo")
|
||||
def netdev_req(fd, req, data = b""):
|
||||
# Pad and add interface name
|
||||
data = ifr_name + data + (b'\x00' * (16 - len(data)))
|
||||
# Return all data after interface name
|
||||
return fcntl.ioctl(fd, req, data)[16:]
|
||||
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) as sock:
|
||||
fd = sock.fileno()
|
||||
|
||||
# struct sockaddr_in ifr_addr { unsigned short family; uint16_t sin_port ; uint32_t in_addr; }
|
||||
req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 127, 0, 0, 1)
|
||||
netdev_req(fd, SIOCSIFADDR, req)
|
||||
|
||||
# short ifr_flags
|
||||
flags = struct.unpack_from('@h', netdev_req(fd, SIOCGIFFLAGS))[0]
|
||||
flags |= IFF_UP | IFF_RUNNING
|
||||
netdev_req(fd, SIOCSIFFLAGS, struct.pack('@h', flags))
|
||||
|
||||
# struct sockaddr_in ifr_netmask
|
||||
req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 255, 0, 0, 0)
|
||||
netdev_req(fd, SIOCSIFNETMASK, req)
|
||||
|
||||
def disable_network(uid=None, gid=None):
|
||||
"""
|
||||
Disable networking in the current process if the kernel supports it, else
|
||||
just return after logging to debug. To do this we need to create a new user
|
||||
namespace, then map back to the original uid/gid.
|
||||
"""
|
||||
libc = ctypes.CDLL('libc.so.6')
|
||||
|
||||
# From sched.h
|
||||
# New user namespace
|
||||
CLONE_NEWUSER = 0x10000000
|
||||
# New network namespace
|
||||
CLONE_NEWNET = 0x40000000
|
||||
|
||||
if uid is None:
|
||||
uid = os.getuid()
|
||||
if gid is None:
|
||||
gid = os.getgid()
|
||||
|
||||
ret = libc.unshare(CLONE_NEWNET | CLONE_NEWUSER)
|
||||
if ret != 0:
|
||||
logger.debug("System doesn't support disabling network without admin privs")
|
||||
return
|
||||
with open("/proc/self/uid_map", "w") as f:
|
||||
f.write("%s %s 1" % (uid, uid))
|
||||
with open("/proc/self/setgroups", "w") as f:
|
||||
f.write("deny")
|
||||
with open("/proc/self/gid_map", "w") as f:
|
||||
f.write("%s %s 1" % (gid, gid))
|
||||
|
||||
def export_proxies(d):
|
||||
""" export common proxies variables from datastore to environment """
|
||||
import os
|
||||
@@ -1699,9 +1620,7 @@ def load_plugins(logger, plugins, pluginpath):
|
||||
logger.debug('Loading plugin %s' % name)
|
||||
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
|
||||
if spec:
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
return spec.loader.load_module()
|
||||
|
||||
logger.debug('Loading plugins from %s...' % pluginpath)
|
||||
|
||||
@@ -1780,40 +1699,5 @@ def environment(**envvars):
|
||||
for var in envvars:
|
||||
if var in backup:
|
||||
os.environ[var] = backup[var]
|
||||
elif var in os.environ:
|
||||
else:
|
||||
del os.environ[var]
|
||||
|
||||
def is_local_uid(uid=''):
|
||||
"""
|
||||
Check whether uid is a local one or not.
|
||||
Can't use pwd module since it gets all UIDs, not local ones only.
|
||||
"""
|
||||
if not uid:
|
||||
uid = os.getuid()
|
||||
with open('/etc/passwd', 'r') as f:
|
||||
for line in f:
|
||||
line_split = line.split(':')
|
||||
if len(line_split) < 3:
|
||||
continue
|
||||
if str(uid) == line_split[2]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
|
||||
"""
|
||||
Generates a unique filename, independent of time.
|
||||
|
||||
mkstemp() in glibc (at least) generates unique file names based on the
|
||||
current system time. When combined with highly parallel builds, and
|
||||
operating over NFS (e.g. shared sstate/downloads) this can result in
|
||||
conflicts and race conditions.
|
||||
|
||||
This function adds additional entropy to the file name so that a collision
|
||||
is independent of time and thus extremely unlikely.
|
||||
"""
|
||||
entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20))
|
||||
if prefix:
|
||||
prefix = prefix + entropy
|
||||
else:
|
||||
prefix = tempfile.gettempprefix() + entropy
|
||||
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -55,7 +53,7 @@ class ActionPlugin(LayerPlugin):
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
bb.fatal("Parse failure with the specified layer added, exiting.")
|
||||
bb.fatal("Parse failure with the specified layer added, aborting.")
|
||||
else:
|
||||
for item in notadded:
|
||||
sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -57,12 +55,11 @@ are overlayed will also be listed, with a " (skipped)" suffix.
|
||||
# Check for overlayed .bbclass files
|
||||
classes = collections.defaultdict(list)
|
||||
for layerdir in self.bblayers:
|
||||
for c in ["classes-global", "classes-recipe", "classes"]:
|
||||
classdir = os.path.join(layerdir, c)
|
||||
if os.path.exists(classdir):
|
||||
for classfile in os.listdir(classdir):
|
||||
if os.path.splitext(classfile)[1] == '.bbclass':
|
||||
classes[classfile].append(classdir)
|
||||
classdir = os.path.join(layerdir, 'classes')
|
||||
if os.path.exists(classdir):
|
||||
for classfile in os.listdir(classdir):
|
||||
if os.path.splitext(classfile)[1] == '.bbclass':
|
||||
classes[classfile].append(classdir)
|
||||
|
||||
# Locating classes and other files is a bit more complicated than recipes -
|
||||
# layer priority is not a factor; instead BitBake uses the first matching
|
||||
@@ -125,14 +122,9 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
if inherits:
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
|
||||
for classname in inherits:
|
||||
found = False
|
||||
for c in ["classes-global", "classes-recipe", "classes"]:
|
||||
cfile = c + '/%s.bbclass' % classname
|
||||
if bb.utils.which(bbpath, cfile, history=False):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
logger.error('No class named %s found in BBPATH', classname)
|
||||
classfile = 'classes/%s.bbclass' % classname
|
||||
if not bb.utils.which(bbpath, classfile, history=False):
|
||||
logger.error('No class named %s found in BBPATH', classfile)
|
||||
sys.exit(1)
|
||||
|
||||
pkg_pn = self.tinfoil.cooker.recipecaches[mc].pkg_pn
|
||||
@@ -180,7 +172,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
logger.plain(" %s %s%s", layer.ljust(20), ver, skipped)
|
||||
|
||||
global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
|
||||
cls_re = re.compile('classes.*/')
|
||||
cls_re = re.compile('classes/')
|
||||
|
||||
preffiles = []
|
||||
show_unique_pn = []
|
||||
@@ -413,7 +405,7 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
self.check_cross_depends("RRECOMMENDS", layername, f, best, args.filenames, ignore_layers)
|
||||
|
||||
# The inherit class
|
||||
cls_re = re.compile('classes.*/')
|
||||
cls_re = re.compile('classes/')
|
||||
if f in self.tinfoil.cooker_data.inherits:
|
||||
inherits = self.tinfoil.cooker_data.inherits[f]
|
||||
for cls in inherits:
|
||||
|
||||
@@ -401,12 +401,6 @@ class SourceGenerator(NodeVisitor):
|
||||
def visit_Num(self, node):
|
||||
self.write(repr(node.n))
|
||||
|
||||
def visit_Constant(self, node):
|
||||
# Python 3.8 deprecated visit_Num(), visit_Str(), visit_Bytes(),
|
||||
# visit_NameConstant() and visit_Ellipsis(). They can be removed once we
|
||||
# require 3.8+.
|
||||
self.write(repr(node.value))
|
||||
|
||||
def visit_Tuple(self, node):
|
||||
self.write('(')
|
||||
idx = -1
|
||||
|
||||
@@ -1278,7 +1278,7 @@ class Recipe(LayerIndexItemObj_LayerBranch):
|
||||
filename, filepath, pn, pv, layerbranch,
|
||||
summary="", description="", section="", license="",
|
||||
homepage="", bugtracker="", provides="", bbclassextend="",
|
||||
inherits="", disallowed="", updated=None):
|
||||
inherits="", blacklisted="", updated=None):
|
||||
self.id = id
|
||||
self.filename = filename
|
||||
self.filepath = filepath
|
||||
@@ -1294,7 +1294,7 @@ class Recipe(LayerIndexItemObj_LayerBranch):
|
||||
self.bbclassextend = bbclassextend
|
||||
self.inherits = inherits
|
||||
self.updated = updated or datetime.datetime.today().isoformat()
|
||||
self.disallowed = disallowed
|
||||
self.blacklisted = blacklisted
|
||||
if isinstance(layerbranch, LayerBranch):
|
||||
self.layerbranch = layerbranch
|
||||
else:
|
||||
|
||||
@@ -279,7 +279,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
summary=pn, description=pn, section='?',
|
||||
license='?', homepage='?', bugtracker='?',
|
||||
provides='?', bbclassextend='?', inherits='?',
|
||||
disallowed='?', layerbranch=depBranchId)
|
||||
blacklisted='?', layerbranch=depBranchId)
|
||||
|
||||
index = addElement("recipes", [recipe], index)
|
||||
|
||||
|
||||
@@ -2797,8 +2797,11 @@ class ParserReflect(object):
|
||||
# Compute a signature over the grammar
|
||||
def signature(self):
|
||||
try:
|
||||
import hashlib
|
||||
sig = hashlib.new('MD5', usedforsecurity=False)
|
||||
from hashlib import md5
|
||||
except ImportError:
|
||||
from md5 import md5
|
||||
try:
|
||||
sig = md5()
|
||||
if self.start:
|
||||
sig.update(self.start.encode('latin-1'))
|
||||
if self.prec:
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ from collections import deque
|
||||
from datetime import datetime, timedelta
|
||||
import time
|
||||
import re
|
||||
import asyncore
|
||||
import glob
|
||||
import locale
|
||||
import subprocess
|
||||
@@ -595,24 +596,14 @@ class _ProcessEvent:
|
||||
@type event: Event object
|
||||
@return: By convention when used from the ProcessEvent class:
|
||||
- Returning False or None (default value) means keep on
|
||||
executing next chained functors (see chain.py example).
|
||||
executing next chained functors (see chain.py example).
|
||||
- Returning True instead means do not execute next
|
||||
processing functions.
|
||||
@rtype: bool
|
||||
@raise ProcessEventError: Event object undispatchable,
|
||||
unknown event.
|
||||
"""
|
||||
stripped_mask = event.mask & ~IN_ISDIR
|
||||
# Bitbake hack - we see event masks of 0x6, i.e., IN_MODIFY & IN_ATTRIB.
|
||||
# The kernel inotify code can set more than one of the bits in the mask,
|
||||
# fsnotify_change() in linux/fsnotify.h is quite clear that IN_ATTRIB,
|
||||
# IN_MODIFY and IN_ACCESS can arrive together.
|
||||
# This breaks the code below which assume only one mask bit is ever
|
||||
# set in an event. We don't care about attrib or access in bitbake so
|
||||
# drop those.
|
||||
if stripped_mask & IN_MODIFY:
|
||||
stripped_mask &= ~(IN_ATTRIB | IN_ACCESS)
|
||||
|
||||
stripped_mask = event.mask - (event.mask & IN_ISDIR)
|
||||
maskname = EventsCodes.ALL_VALUES.get(stripped_mask)
|
||||
if maskname is None:
|
||||
raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask)
|
||||
@@ -1484,6 +1475,35 @@ class ThreadedNotifier(threading.Thread, Notifier):
|
||||
self.loop()
|
||||
|
||||
|
||||
class AsyncNotifier(asyncore.file_dispatcher, Notifier):
|
||||
"""
|
||||
This notifier inherits from asyncore.file_dispatcher in order to be able to
|
||||
use pyinotify along with the asyncore framework.
|
||||
|
||||
"""
|
||||
def __init__(self, watch_manager, default_proc_fun=None, read_freq=0,
|
||||
threshold=0, timeout=None, channel_map=None):
|
||||
"""
|
||||
Initializes the async notifier. The only additional parameter is
|
||||
'channel_map' which is the optional asyncore private map. See
|
||||
Notifier class for the meaning of the others parameters.
|
||||
|
||||
"""
|
||||
Notifier.__init__(self, watch_manager, default_proc_fun, read_freq,
|
||||
threshold, timeout)
|
||||
asyncore.file_dispatcher.__init__(self, self._fd, channel_map)
|
||||
|
||||
def handle_read(self):
|
||||
"""
|
||||
When asyncore tells us we can read from the fd, we proceed processing
|
||||
events. This method can be overridden for handling a notification
|
||||
differently.
|
||||
|
||||
"""
|
||||
self.read_events()
|
||||
self.process_events()
|
||||
|
||||
|
||||
class TornadoAsyncNotifier(Notifier):
|
||||
"""
|
||||
Tornado ioloop adapter.
|
||||
|
||||
@@ -200,7 +200,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
localdirpath = os.path.join(localdirname, dirpath)
|
||||
logger.debug("localhostbecontroller: localdirpath expects '%s'" % localdirpath)
|
||||
if not os.path.exists(localdirpath):
|
||||
raise BuildSetupException("Cannot find layer git path '%s' in checked out repository '%s:%s'. Exiting." % (localdirpath, giturl, commit))
|
||||
raise BuildSetupException("Cannot find layer git path '%s' in checked out repository '%s:%s'. Aborting." % (localdirpath, giturl, commit))
|
||||
|
||||
if name != "bitbake":
|
||||
layerlist.append("%03d:%s" % (index,localdirpath.rstrip("/")))
|
||||
@@ -467,7 +467,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
logger.debug("localhostbecontroller: waiting for bblock content to appear")
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise BuildSetupException("Cannot find bitbake server lock file '%s'. Exiting." % bblock)
|
||||
raise BuildSetupException("Cannot find bitbake server lock file '%s'. Aborting." % bblock)
|
||||
|
||||
with open(bblock) as fplock:
|
||||
for line in fplock:
|
||||
|
||||
@@ -180,77 +180,6 @@ class Command(BaseCommand):
|
||||
except Exception as e:
|
||||
logger.warning("runbuilds: schedule exception %s" % str(e))
|
||||
|
||||
# Test to see if a build pre-maturely died due to a bitbake crash
|
||||
def check_dead_builds(self):
|
||||
do_cleanup = False
|
||||
try:
|
||||
for br in BuildRequest.objects.filter(state=BuildRequest.REQ_INPROGRESS):
|
||||
# Get the build directory
|
||||
if br.project.builddir:
|
||||
builddir = br.project.builddir
|
||||
else:
|
||||
builddir = '%s-toaster-%d' % (br.environment.builddir,br.project.id)
|
||||
# Check log to see if there is a recent traceback
|
||||
toaster_ui_log = os.path.join(builddir, 'toaster_ui.log')
|
||||
test_file = os.path.join(builddir, '._toaster_check.txt')
|
||||
os.system("tail -n 50 %s > %s" % (os.path.join(builddir, 'toaster_ui.log'),test_file))
|
||||
traceback_text = ''
|
||||
is_traceback = False
|
||||
with open(test_file,'r') as test_file_fd:
|
||||
test_file_tail = test_file_fd.readlines()
|
||||
for line in test_file_tail:
|
||||
if line.startswith('Traceback (most recent call last):'):
|
||||
traceback_text = line
|
||||
is_traceback = True
|
||||
elif line.startswith('NOTE: ToasterUI waiting for events'):
|
||||
# Ignore any traceback before new build start
|
||||
traceback_text = ''
|
||||
is_traceback = False
|
||||
elif line.startswith('Note: Toaster traceback auto-stop'):
|
||||
# Ignore any traceback before this previous traceback catch
|
||||
traceback_text = ''
|
||||
is_traceback = False
|
||||
elif is_traceback:
|
||||
traceback_text += line
|
||||
# Test the results
|
||||
is_stop = False
|
||||
if is_traceback:
|
||||
# Found a traceback
|
||||
errtype = 'Bitbake crash'
|
||||
errmsg = 'Bitbake crash\n' + traceback_text
|
||||
state = BuildRequest.REQ_FAILED
|
||||
# Clean up bitbake files
|
||||
bitbake_lock = os.path.join(builddir, 'bitbake.lock')
|
||||
if os.path.isfile(bitbake_lock):
|
||||
os.remove(bitbake_lock)
|
||||
bitbake_sock = os.path.join(builddir, 'bitbake.sock')
|
||||
if os.path.isfile(bitbake_sock):
|
||||
os.remove(bitbake_sock)
|
||||
if os.path.isfile(test_file):
|
||||
os.remove(test_file)
|
||||
# Add note to ignore this traceback on next check
|
||||
os.system('echo "Note: Toaster traceback auto-stop" >> %s' % toaster_ui_log)
|
||||
is_stop = True
|
||||
# Add more tests here
|
||||
#elif ...
|
||||
# Stop the build request?
|
||||
if is_stop:
|
||||
brerror = BRError(
|
||||
req = br,
|
||||
errtype = errtype,
|
||||
errmsg = errmsg,
|
||||
traceback = traceback_text,
|
||||
)
|
||||
brerror.save()
|
||||
br.state = state
|
||||
br.save()
|
||||
do_cleanup = True
|
||||
# Do cleanup
|
||||
if do_cleanup:
|
||||
self.cleanup()
|
||||
except Exception as e:
|
||||
logger.error("runbuilds: Error in check_dead_builds %s" % e)
|
||||
|
||||
def handle(self, **options):
|
||||
pidfile_path = os.path.join(os.environ.get("BUILDDIR", "."),
|
||||
".runbuilds.pid")
|
||||
@@ -258,18 +187,10 @@ class Command(BaseCommand):
|
||||
with open(pidfile_path, 'w') as pidfile:
|
||||
pidfile.write("%s" % os.getpid())
|
||||
|
||||
# Clean up any stale/failed builds from previous Toaster run
|
||||
self.runbuild()
|
||||
|
||||
signal.signal(signal.SIGUSR1, lambda sig, frame: None)
|
||||
|
||||
while True:
|
||||
sigset = signal.sigtimedwait([signal.SIGUSR1], 5)
|
||||
if sigset:
|
||||
for sig in sigset:
|
||||
# Consume each captured pending event
|
||||
self.runbuild()
|
||||
else:
|
||||
# Check for build exceptions
|
||||
self.check_dead_builds()
|
||||
|
||||
signal.pause()
|
||||
self.runbuild()
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
# Generated by Django 3.2.12 on 2022-03-06 03:28
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('bldcontrol', '0007_brlayers_optional_gitinfo'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='brbitbake',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='brerror',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='brlayer',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='brtarget',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='brvariable',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='buildenvironment',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='buildrequest',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
]
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,445 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Generate Toaster Fixtures for 'poky.xml' and 'oe-core.xml'
|
||||
#
|
||||
# Copyright (C) 2022 Wind River Systems
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Edit the 'current_releases' table for each new release cycle
|
||||
#
|
||||
# Usage: ./get_fixtures all
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
verbose = False
|
||||
|
||||
####################################
|
||||
# Releases
|
||||
#
|
||||
# https://wiki.yoctoproject.org/wiki/Releases
|
||||
#
|
||||
# NOTE: for the current releases table, it helps to keep continuing releases
|
||||
# in the same table positions since this minimizes the patch diff for review.
|
||||
# The order of the table does not matter since Toaster presents them sorted.
|
||||
#
|
||||
# Traditionally, the two most current releases are included in addition to the
|
||||
# 'master' branch and the local installation's 'HEAD'.
|
||||
# It is also policy to include all active LTS releases.
|
||||
#
|
||||
|
||||
# [Codename, Yocto Project Version, Release Date, Current Version, Support Level, Poky Version, BitBake branch]
|
||||
current_releases = [
|
||||
# Release slot #1
|
||||
['Kirkstone','3.5','April 2022','','Future - Long Term Support (until Apr. 2024)','27.0','1.54'],
|
||||
# ['Dunfell','3.1','April 2021','3.1.5 (March 2022)','Stable - Support for 13 months (until Apr. 2022)','23.0','1.46'],
|
||||
# Release slot #2 'local'
|
||||
['HEAD','HEAD','','Local Yocto Project','HEAD','','HEAD'],
|
||||
# Release slot #3 'master'
|
||||
['Master','master','','Yocto Project master','master','','master'],
|
||||
# Release slot #4
|
||||
['Honister','3.4','October 2021','3.4.2 (February 2022)','Support for 7 months (until May 2022)','26.0','1.52'],
|
||||
# ['Gatesgarth','3.2','Oct 2020','3.2.4 (May 2021)','EOL','24.0','1.48'],
|
||||
# Optional Release slot #4
|
||||
['Hardknott','3.3','April 2021','3.3.5 (March 2022)','Stable - Support for 13 months (until Apr. 2022)','25.0','1.50'],
|
||||
]
|
||||
|
||||
default_poky_layers = [
|
||||
'openembedded-core',
|
||||
'meta-poky',
|
||||
'meta-yocto-bsp',
|
||||
]
|
||||
|
||||
default_oe_core_layers = [
|
||||
'openembedded-core',
|
||||
]
|
||||
|
||||
####################################
|
||||
# Templates
|
||||
|
||||
prolog_template = '''\
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<django-objects version="1.0">
|
||||
<!-- Set the project default value for DISTRO -->
|
||||
<object model="orm.toastersetting" pk="1">
|
||||
<field type="CharField" name="name">DEFCONF_DISTRO</field>
|
||||
<field type="CharField" name="value">{{distro}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
#<!-- Bitbake versions which correspond to the metadata release -->')
|
||||
bitbakeversion_poky_template = '''\
|
||||
<object model="orm.bitbakeversion" pk="{{bitbake_id}}">
|
||||
<field type="CharField" name="name">{{name}}</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">{{branch}}</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
'''
|
||||
bitbakeversion_oecore_template = '''\
|
||||
<object model="orm.bitbakeversion" pk="{{bitbake_id}}">
|
||||
<field type="CharField" name="name">{{name}}</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">{{bitbakeversion}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
# <!-- Releases available -->
|
||||
releases_available_template = '''\
|
||||
<object model="orm.release" pk="{{ra_count}}">
|
||||
<field type="CharField" name="name">{{name}}</field>
|
||||
<field type="CharField" name="description">{{description}}</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">{{ra_count}}</field>
|
||||
<field type="CharField" name="branch_name">{{release}}</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds {{help_source}}.</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
# <!-- Default project layers for each release -->
|
||||
default_layers_template = '''\
|
||||
<object model="orm.releasedefaultlayer" pk="{{rdl_count}}">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">{{release_id}}</field>
|
||||
<field type="CharField" name="layer_name">{{layer}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
default_layers_preface = '''\
|
||||
<!-- Default layers provided by poky
|
||||
openembedded-core
|
||||
meta-poky
|
||||
meta-yocto-bsp
|
||||
-->
|
||||
'''
|
||||
|
||||
layer_poky_template = '''\
|
||||
<object model="orm.layer" pk="{{layer_id}}">
|
||||
<field type="CharField" name="name">{{layer}}</field>
|
||||
<field type="CharField" name="layer_index_url"></field>
|
||||
<field type="CharField" name="vcs_url">{{vcs_url}}</field>
|
||||
<field type="CharField" name="vcs_web_url">{{vcs_web_url}}</field>
|
||||
<field type="CharField" name="vcs_web_tree_base_url">{{vcs_web_tree_base_url}}</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">{{vcs_web_file_base_url}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
layer_oe_core_template = '''\
|
||||
<object model="orm.layer" pk="{{layer_id}}">
|
||||
<field type="CharField" name="name">{{layer}}</field>
|
||||
<field type="CharField" name="vcs_url">{{vcs_url}}</field>
|
||||
<field type="CharField" name="vcs_web_url">{{vcs_web_url}}</field>
|
||||
<field type="CharField" name="vcs_web_tree_base_url">{{vcs_web_tree_base_url}}</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">{{vcs_web_file_base_url}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
layer_version_template = '''\
|
||||
<object model="orm.layer_version" pk="{{lv_count}}">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">{{layer_id}}</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">{{release_id}}</field>
|
||||
<field type="CharField" name="branch">{{branch}}</field>
|
||||
<field type="CharField" name="dirpath">{{dirpath}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
layer_version_HEAD_template = '''\
|
||||
<object model="orm.layer_version" pk="{{lv_count}}">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">{{layer_id}}</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">{{release_id}}</field>
|
||||
<field type="CharField" name="branch">{{branch}}</field>
|
||||
<field type="CharField" name="commit">{{commit}}</field>
|
||||
<field type="CharField" name="dirpath">{{dirpath}}</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
layer_version_oe_core_template = '''\
|
||||
<object model="orm.layer_version" pk="1">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">2</field>
|
||||
<field type="CharField" name="local_path">OE-CORE-LAYER-DIR</field>
|
||||
<field type="CharField" name="branch">HEAD</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
</object>
|
||||
'''
|
||||
|
||||
epilog_template = '''\
|
||||
</django-objects>
|
||||
'''
|
||||
|
||||
#################################
|
||||
# Helper Routines
|
||||
#
|
||||
|
||||
def print_str(str,fd):
|
||||
# Avoid extra newline at end
|
||||
if str and (str[-1] == '\n'):
|
||||
str = str[0:-1]
|
||||
print(str,file=fd)
|
||||
|
||||
def print_template(template,params,fd):
|
||||
for line in template.split('\n'):
|
||||
p = line.find('{{')
|
||||
while p > 0:
|
||||
q = line.find('}}')
|
||||
key = line[p+2:q]
|
||||
if key in params:
|
||||
line = line[0:p] + params[key] + line[q+2:]
|
||||
else:
|
||||
line = line[0:p] + '?' + key + '?' + line[q+2:]
|
||||
p = line.find('{{')
|
||||
if line:
|
||||
print(line,file=fd)
|
||||
|
||||
#################################
|
||||
# Generate poky.xml
|
||||
#
|
||||
|
||||
def generate_poky():
|
||||
fd = open('poky.xml','w')
|
||||
|
||||
params = {}
|
||||
params['distro'] = 'poky'
|
||||
print_template(prolog_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(' <!-- Bitbake versions which correspond to the metadata release -->',fd)
|
||||
for i,release in enumerate(current_releases):
|
||||
params = {}
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('HEAD')): # 'master',
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['name'] = params['release']
|
||||
params['bitbake_id'] = str(i+1)
|
||||
params['branch'] = params['release']
|
||||
print_template(bitbakeversion_poky_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str('',fd)
|
||||
print_str(' <!-- Releases available -->',fd)
|
||||
for i,release in enumerate(current_releases):
|
||||
params = {}
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('HEAD')): #'master',
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['h_release'] = '?h={{release}}'
|
||||
params['name'] = params['release']
|
||||
params['ra_count'] = str(i+1)
|
||||
params['branch'] = params['release']
|
||||
|
||||
if 'HEAD' == params['release']:
|
||||
params['help_source'] = 'with the version of the Yocto Project you have cloned or downloaded to your computer'
|
||||
params['description'] = 'Local Yocto Project'
|
||||
params['name'] = 'local'
|
||||
else:
|
||||
params['help_source'] = 'using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/{{h_release}}">Yocto Project {{Release}} branch</a>'
|
||||
params['description'] = 'Yocto Project {{release_version}} "{{Release}}"'
|
||||
if 'master' == params['release']:
|
||||
params['h_release'] = ''
|
||||
params['description'] = 'Yocto Project master'
|
||||
|
||||
print_template(releases_available_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(' <!-- Default project layers for each release -->',fd)
|
||||
rdl_count = 1
|
||||
for i,release in enumerate(current_releases):
|
||||
for j,layer in enumerate(default_poky_layers):
|
||||
params = {}
|
||||
params['layer'] = layer
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('master','HEAD')):
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['release_id'] = str(i+1)
|
||||
params['rdl_count'] = str(rdl_count)
|
||||
params['branch'] = params['release']
|
||||
print_template(default_layers_template,params,fd)
|
||||
rdl_count += 1
|
||||
print_str('',fd)
|
||||
|
||||
print_str(default_layers_preface,fd)
|
||||
lv_count = 1
|
||||
for i,layer in enumerate(default_poky_layers):
|
||||
params = {}
|
||||
params['layer'] = layer
|
||||
params['layer_id'] = str(i+1)
|
||||
params['vcs_url'] = 'git://git.yoctoproject.org/poky'
|
||||
params['vcs_web_url'] = 'https://git.yoctoproject.org/cgit/cgit.cgi/poky'
|
||||
params['vcs_web_tree_base_url'] = 'https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%'
|
||||
params['vcs_web_file_base_url'] = 'https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%'
|
||||
|
||||
if i:
|
||||
print_str('',fd)
|
||||
print_template(layer_poky_template,params,fd)
|
||||
for j,release in enumerate(current_releases):
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('master','HEAD')):
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['release_id'] = str(j+1)
|
||||
params['lv_count'] = str(lv_count)
|
||||
params['branch'] = params['release']
|
||||
params['commit'] = params['release']
|
||||
|
||||
params['dirpath'] = params['layer']
|
||||
if params['layer'] in ('openembedded-core'): #'openembedded-core',
|
||||
params['dirpath'] = 'meta'
|
||||
|
||||
if 'HEAD' == params['release']:
|
||||
print_template(layer_version_HEAD_template,params,fd)
|
||||
else:
|
||||
print_template(layer_version_template,params,fd)
|
||||
lv_count += 1
|
||||
|
||||
print_str(epilog_template,fd)
|
||||
fd.close()
|
||||
|
||||
#################################
|
||||
# Generate oe-core.xml
|
||||
#
|
||||
|
||||
def generate_oe_core():
|
||||
fd = open('oe-core.xml','w')
|
||||
|
||||
params = {}
|
||||
params['distro'] = 'nodistro'
|
||||
print_template(prolog_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(' <!-- Bitbake versions which correspond to the metadata release -->',fd)
|
||||
for i,release in enumerate(current_releases):
|
||||
params = {}
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['bitbakeversion'] = release[6]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('HEAD')): # 'master',
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['name'] = params['release']
|
||||
params['bitbake_id'] = str(i+1)
|
||||
params['branch'] = params['release']
|
||||
print_template(bitbakeversion_oecore_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(' <!-- Releases available -->',fd)
|
||||
for i,release in enumerate(current_releases):
|
||||
params = {}
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('HEAD')): #'master',
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['h_release'] = '?h={{release}}'
|
||||
params['name'] = params['release']
|
||||
params['ra_count'] = str(i+1)
|
||||
params['branch'] = params['release']
|
||||
|
||||
if 'HEAD' == params['release']:
|
||||
params['help_source'] = 'with the version of OpenEmbedded that you have cloned or downloaded to your computer'
|
||||
params['description'] = 'Local Openembedded'
|
||||
params['name'] = 'local'
|
||||
else:
|
||||
params['help_source'] = 'using the tip of the <a href=\\"https://cgit.openembedded.org/openembedded-core/log/{{h_release}}\\">OpenEmbedded {{Release}}</a> branch'
|
||||
params['description'] = 'Openembedded {{Release}}'
|
||||
if 'master' == params['release']:
|
||||
params['h_release'] = ''
|
||||
params['description'] = 'OpenEmbedded core master'
|
||||
params['Release'] = params['release']
|
||||
|
||||
print_template(releases_available_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(' <!-- Default layers for each release -->',fd)
|
||||
rdl_count = 1
|
||||
for i,release in enumerate(current_releases):
|
||||
for j,layer in enumerate(default_oe_core_layers):
|
||||
params = {}
|
||||
params['layer'] = layer
|
||||
params['release'] = release[0]
|
||||
params['Release'] = release[0]
|
||||
params['release_version'] = release[1]
|
||||
if not (params['release'] in ('master','HEAD')):
|
||||
params['release'] = params['release'][0].lower() + params['release'][1:]
|
||||
params['release_id'] = str(i+1)
|
||||
params['rdl_count'] = str(rdl_count)
|
||||
params['branch'] = params['release']
|
||||
print_template(default_layers_template,params,fd)
|
||||
rdl_count += 1
|
||||
print_str('',fd)
|
||||
|
||||
print_str('',fd)
|
||||
print_str(' <!-- Layer for the Local release -->',fd)
|
||||
lv_count = 1
|
||||
for i,layer in enumerate(default_oe_core_layers):
|
||||
params = {}
|
||||
params['layer'] = layer
|
||||
params['layer_id'] = str(i+1)
|
||||
params['vcs_url'] = 'git://git.openembedded.org/openembedded-core'
|
||||
params['vcs_web_url'] = 'https://cgit.openembedded.org/openembedded-core'
|
||||
params['vcs_web_tree_base_url'] = 'https://cgit.openembedded.org/openembedded-core/tree/%path%?h=%branch%'
|
||||
params['vcs_web_file_base_url'] = 'https://cgit.openembedded.org/openembedded-core/tree/%path%?h=%branch%'
|
||||
if i:
|
||||
print_str('',fd)
|
||||
print_template(layer_oe_core_template,params,fd)
|
||||
|
||||
print_template(layer_version_oe_core_template,params,fd)
|
||||
print_str('',fd)
|
||||
|
||||
print_str(epilog_template,fd)
|
||||
fd.close()
|
||||
|
||||
#################################
|
||||
# Help
|
||||
#
|
||||
|
||||
def list_releases():
|
||||
print("Release ReleaseVer BitbakeVer Support Level")
|
||||
print("========== =========== ========== ==============================================")
|
||||
for release in current_releases:
|
||||
print("%10s %10s %11s %s" % (release[0],release[1],release[6],release[4]))
|
||||
|
||||
#################################
|
||||
# main
|
||||
#
|
||||
|
||||
def main(argv):
|
||||
global verbose
|
||||
|
||||
parser = argparse.ArgumentParser(description='gen_fixtures.py: table generate the fixture files')
|
||||
parser.add_argument('--poky', '-p', action='store_const', const='poky', dest='command', help='Generate the poky.xml file')
|
||||
parser.add_argument('--oe-core', '-o', action='store_const', const='oe_core', dest='command', help='Generate the oe-core.xml file')
|
||||
parser.add_argument('--all', '-a', action='store_const', const='all', dest='command', help='Generate all fixture files')
|
||||
parser.add_argument('--list', '-l', action='store_const', const='list', dest='command', help='List the release table')
|
||||
parser.add_argument('--verbose', '-v', action='store_true', dest='verbose', help='Enable verbose debugging output')
|
||||
args = parser.parse_args()
|
||||
|
||||
verbose = args.verbose
|
||||
if 'poky' == args.command:
|
||||
generate_poky()
|
||||
elif 'oe_core' == args.command:
|
||||
generate_oe_core()
|
||||
elif 'all' == args.command:
|
||||
generate_poky()
|
||||
generate_oe_core()
|
||||
elif 'all' == args.command:
|
||||
list_releases()
|
||||
elif 'list' == args.command:
|
||||
list_releases()
|
||||
|
||||
else:
|
||||
print("No command for 'gen_fixtures.py' selected")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">1.54</field>
|
||||
<field type="CharField" name="branch">1.46</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
<field type="CharField" name="name">HEAD</field>
|
||||
@@ -23,23 +23,18 @@
|
||||
<field type="CharField" name="branch">master</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">honister</field>
|
||||
<field type="CharField" name="name">gatesgarth</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">1.52</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="5">
|
||||
<field type="CharField" name="name">hardknott</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">1.50</field>
|
||||
<field type="CharField" name="branch">1.48</field>
|
||||
</object>
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Openembedded Kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="description">Openembedded Dunfell</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=kirkstone\">OpenEmbedded Kirkstone</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">dunfell</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=dunfell\">OpenEmbedded Dunfell</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -56,18 +51,11 @@
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/\">OpenEmbedded master</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">honister</field>
|
||||
<field type="CharField" name="description">Openembedded Honister</field>
|
||||
<field type="CharField" name="name">gatesgarth</field>
|
||||
<field type="CharField" name="description">Openembedded Gatesgarth</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">honister</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=honister\">OpenEmbedded Honister</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="5">
|
||||
<field type="CharField" name="name">hardknott</field>
|
||||
<field type="CharField" name="description">Openembedded Hardknott</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">5</field>
|
||||
<field type="CharField" name="branch_name">hardknott</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=hardknott\">OpenEmbedded Hardknott</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">gatesgarth</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=gatesgarth\">OpenEmbedded Gatesgarth</a> branch.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default layers for each release -->
|
||||
@@ -87,10 +75,6 @@
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="5">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
|
||||
|
||||
<!-- Layer for the Local release -->
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
@@ -26,26 +26,20 @@
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">honister</field>
|
||||
<field type="CharField" name="name">gatesgarth</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">honister</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="5">
|
||||
<field type="CharField" name="name">hardknott</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">hardknott</field>
|
||||
<field type="CharField" name="branch">gatesgarth</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Yocto Project 4.0 "Kirkstone"</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.1 "Dunfell"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=kirkstone">Yocto Project Kirkstone branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">dunfell</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=dunfell">Yocto Project Dunfell branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -62,18 +56,11 @@
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/">Yocto Project Master branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">honister</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.4 "Honister"</field>
|
||||
<field type="CharField" name="name">gatesgarth</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.2 "Gatesgarth"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">honister</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=honister">Yocto Project Honister branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="5">
|
||||
<field type="CharField" name="name">hardknott</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.3 "Hardknott"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">5</field>
|
||||
<field type="CharField" name="branch_name">hardknott</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=hardknott">Yocto Project Hardknott branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">gatesgarth</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=gatesgarth">Yocto Project Gatesgarth branch</a>.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default project layers for each release -->
|
||||
@@ -125,18 +112,6 @@
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="13">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="14">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="layer_name">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="15">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="layer_name">meta-yocto-bsp</field>
|
||||
</object>
|
||||
|
||||
<!-- Default layers provided by poky
|
||||
openembedded-core
|
||||
@@ -155,7 +130,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="2">
|
||||
@@ -177,14 +152,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">honister</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="5">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">hardknott</field>
|
||||
<field type="CharField" name="branch">gatesgarth</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
|
||||
@@ -196,14 +164,14 @@
|
||||
<field type="CharField" name="vcs_web_tree_base_url">https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="6">
|
||||
<object model="orm.layer_version" pk="5">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="7">
|
||||
<object model="orm.layer_version" pk="6">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">2</field>
|
||||
@@ -211,25 +179,18 @@
|
||||
<field type="CharField" name="commit">HEAD</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="8">
|
||||
<object model="orm.layer_version" pk="7">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="9">
|
||||
<object model="orm.layer_version" pk="8">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">honister</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="10">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">hardknott</field>
|
||||
<field type="CharField" name="branch">gatesgarth</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
|
||||
@@ -241,14 +202,14 @@
|
||||
<field type="CharField" name="vcs_web_tree_base_url">https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="11">
|
||||
<object model="orm.layer_version" pk="9">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="12">
|
||||
<object model="orm.layer_version" pk="10">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">2</field>
|
||||
@@ -256,25 +217,18 @@
|
||||
<field type="CharField" name="commit">HEAD</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="13">
|
||||
<object model="orm.layer_version" pk="11">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="14">
|
||||
<object model="orm.layer_version" pk="12">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">honister</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="15">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">hardknott</field>
|
||||
<field type="CharField" name="branch">gatesgarth</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
</django-objects>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<field type="CharField" name="value">${TOPDIR}/../sstate-cache</field>
|
||||
</object>
|
||||
<object model="orm.toastersetting" pk="6">
|
||||
<field type="CharField" name="name">DEFCONF_IMAGE_INSTALL:append</field>
|
||||
<field type="CharField" name="name">DEFCONF_IMAGE_INSTALL_append</field>
|
||||
<field type="CharField" name="value"></field>
|
||||
</object>
|
||||
<object model="orm.toastersetting" pk="7">
|
||||
|
||||
@@ -87,13 +87,13 @@ class Command(BaseCommand):
|
||||
|
||||
# update branches; only those that we already have names listed in the
|
||||
# Releases table
|
||||
allowed_branch_names = [rel.branch_name
|
||||
for rel in Release.objects.all()]
|
||||
if len(allowed_branch_names) == 0:
|
||||
whitelist_branch_names = [rel.branch_name
|
||||
for rel in Release.objects.all()]
|
||||
if len(whitelist_branch_names) == 0:
|
||||
raise Exception("Failed to make list of branches to fetch")
|
||||
|
||||
logger.info("Fetching metadata for %s",
|
||||
" ".join(allowed_branch_names))
|
||||
" ".join(whitelist_branch_names))
|
||||
|
||||
# We require a non-empty bb.data, but we can fake it with a dictionary
|
||||
layerindex = layerindexlib.LayerIndex({"DUMMY" : "VALUE"})
|
||||
@@ -101,8 +101,8 @@ class Command(BaseCommand):
|
||||
http_progress = Spinner()
|
||||
http_progress.start()
|
||||
|
||||
if allowed_branch_names:
|
||||
url_branches = ";branch=%s" % ','.join(allowed_branch_names)
|
||||
if whitelist_branch_names:
|
||||
url_branches = ";branch=%s" % ','.join(whitelist_branch_names)
|
||||
else:
|
||||
url_branches = ""
|
||||
layerindex.load_layerindex("%s%s" % (self.apiurl, url_branches))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user