mirror of
https://git.yoctoproject.org/poky
synced 2026-02-02 23:08:43 +01:00
Compare commits
1 Commits
5.2_M2
...
uninative-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8095c99ab |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -31,9 +31,4 @@ pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
_toaster_clones/
|
||||
downloads/
|
||||
sstate-cache/
|
||||
toaster.sqlite
|
||||
.vscode/
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
@@ -1,2 +1,2 @@
|
||||
# Template settings
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf/templates/default}
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
|
||||
|
||||
@@ -41,7 +41,6 @@ Component/Subsystem Maintainers
|
||||
* devtool: Saul Wold
|
||||
* eSDK: Saul Wold
|
||||
* overlayfs: Vyacheslav Yurkov
|
||||
* Patchtest: Trevor Gamblin
|
||||
|
||||
Maintainers needed
|
||||
------------------
|
||||
@@ -53,6 +52,8 @@ Maintainers needed
|
||||
* error reporting system/web UI
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Prelink-cross
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
|
||||
35
Makefile
Normal file
35
Makefile
Normal file
@@ -0,0 +1,35 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
endif
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile.sphinx clean publish
|
||||
|
||||
publish: Makefile.sphinx html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/
|
||||
cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/
|
||||
cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR)
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile.sphinx
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -16,13 +16,9 @@ which can be found at:
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/dev/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,22 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla]
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -13,24 +13,19 @@ Bitbake plain documentation can be found under the doc directory or its integrat
|
||||
html version at the Yocto Project website:
|
||||
https://docs.yoctoproject.org
|
||||
|
||||
Bitbake requires Python version 3.8 or newer.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to bitbake-devel@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
If you're sending a patch related to the BitBake manual, make sure you copy
|
||||
the Yocto Project documentation mailing list:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org --cc docs@lists.yoctoproject.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/bitbake-devel
|
||||
@@ -39,25 +34,10 @@ Source code:
|
||||
|
||||
https://git.openembedded.org/bitbake/
|
||||
|
||||
Testing
|
||||
-------
|
||||
Testing:
|
||||
|
||||
Bitbake has a testsuite located in lib/bb/tests/ whichs aim to try and prevent regressions.
|
||||
You can run this with "bitbake-selftest". In particular the fetcher is well covered since
|
||||
it has so many corner cases. The datastore has many tests too. Testing with the testsuite is
|
||||
recommended before submitting patches, particularly to the fetcher and datastore. We also
|
||||
appreciate new test cases and may require them for more obscure issues.
|
||||
|
||||
To run the tests "zstd" and "git" must be installed.
|
||||
|
||||
The assumption is made that this testsuite is run from an initialized OpenEmbedded build
|
||||
environment (i.e. `source oe-init-build-env` is used). If this is not the case, run the
|
||||
testsuite as follows:
|
||||
|
||||
export PATH=$(pwd)/bin:$PATH
|
||||
bin/bitbake-selftest
|
||||
|
||||
The testsuite can alternatively be executed using pytest, e.g. obtained from PyPI (in this
|
||||
case, the PATH is configured automatically):
|
||||
|
||||
pytest
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -25,9 +25,10 @@ except RuntimeError as exc:
|
||||
from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "2.9.1"
|
||||
__version__ = "2.0.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-layers
|
||||
@@ -11,7 +11,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
import argparse
|
||||
import logging
|
||||
@@ -28,7 +27,6 @@ logger = bb.msg.logger_create(myname)
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
@@ -54,7 +52,6 @@ def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
sys.exit(2)
|
||||
return result
|
||||
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
|
||||
@@ -66,23 +63,19 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
if not sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif sig1 not in sigfiles:
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif sig2 not in sigfiles:
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[latestsigs[0]]['path']]
|
||||
if len(latestsigs) > 1:
|
||||
latestfiles.append(sigfiles[latestsigs[1]]['path'])
|
||||
|
||||
return latestfiles
|
||||
|
||||
@@ -95,12 +88,12 @@ def recursecb(key, hash1, hash2):
|
||||
recout = []
|
||||
if not hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif hash1 not in hashfiles:
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif hash2 not in hashfiles:
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, color=color)
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
@@ -117,36 +110,36 @@ parser.add_argument('-D', '--debug',
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
@@ -164,8 +157,7 @@ if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0],
|
||||
options.sigargs[1])
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
@@ -174,8 +166,7 @@ if options.taskargs:
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (
|
||||
options.taskargs[0], options.taskargs[1]))
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
|
||||
@@ -16,7 +16,6 @@ bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.providers
|
||||
import bb.tinfoil
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -26,41 +25,26 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
|
||||
parser.add_argument('--ignore-undefined', help='Suppress any errors related to undefined variables', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.value:
|
||||
if args.unexpand:
|
||||
sys.exit("--unexpand only makes sense with --value")
|
||||
if args.unexpand and not args.value:
|
||||
print("--unexpand only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
if args.flag:
|
||||
sys.exit("--flag only makes sense with --value")
|
||||
if args.flag and not args.value:
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
quiet = args.quiet or args.value
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
try:
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
except bb.providers.NoProvider as e:
|
||||
sys.exit(str(e))
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
# Expand keys and run anonymous functions to get identical result to
|
||||
# "bitbake -e"
|
||||
d = tinfoil.finalizeData()
|
||||
|
||||
value = None
|
||||
d = tinfoil.config_data
|
||||
if args.flag:
|
||||
value = d.getVarFlag(args.variable, args.flag, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The flag '{args.flag}' is not defined for variable '{args.variable}'")
|
||||
else:
|
||||
value = d.getVar(args.variable, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The variable '{args.variable}' is not defined")
|
||||
if args.value:
|
||||
print(str(value if value is not None else ""))
|
||||
print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
|
||||
elif args.value:
|
||||
print(str(d.getVar(args.variable, expand=(not args.unexpand))))
|
||||
else:
|
||||
bb.data.emit_var(args.variable, d=d, all=True)
|
||||
|
||||
@@ -14,10 +14,6 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
import statistics
|
||||
import textwrap
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -40,42 +36,18 @@ except ImportError:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
import bb.asyncrpc
|
||||
|
||||
DEFAULT_ADDRESS = 'unix://./hashserve.sock'
|
||||
METHOD = 'stress.test.method'
|
||||
|
||||
def print_user(u):
|
||||
print(f"Username: {u['username']}")
|
||||
if "permissions" in u:
|
||||
print("Permissions: " + " ".join(u["permissions"]))
|
||||
if "token" in u:
|
||||
print(f"Token: {u['token']}")
|
||||
|
||||
|
||||
def main():
|
||||
def handle_get(args, client):
|
||||
result = client.get_taskhash(args.method, args.taskhash, all_properties=True)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_get_outhash(args, client):
|
||||
result = client.get_outhash(args.method, args.outhash, args.taskhash)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_stats(args, client):
|
||||
if args.reset:
|
||||
s = client.reset_stats()
|
||||
else:
|
||||
s = client.get_stats()
|
||||
print(json.dumps(s, sort_keys=True, indent=4))
|
||||
pprint.pprint(s)
|
||||
return 0
|
||||
|
||||
def handle_stress(args, client):
|
||||
@@ -83,59 +55,47 @@ def main():
|
||||
nonlocal found_hashes
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
nonlocal times
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
|
||||
times.append(elapsed)
|
||||
pbar.update()
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
missed_hashes = 0
|
||||
lock = threading.Lock()
|
||||
times = []
|
||||
total_requests = args.clients * args.requests
|
||||
start_time = time.perf_counter()
|
||||
with ProgressBar(total=args.clients * args.requests) as pbar:
|
||||
with ProgressBar(total=total_requests) as pbar:
|
||||
threads = [threading.Thread(target=thread_main, args=(pbar, lock), daemon=False) for _ in range(args.clients)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
total_elapsed = time.perf_counter() - start_time
|
||||
|
||||
elapsed = time.perf_counter() - start_time
|
||||
with lock:
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
stddev = statistics.pstdev(times)
|
||||
|
||||
print(f"Number of clients: {args.clients}")
|
||||
print(f"Requests per client: {args.requests}")
|
||||
print(f"Number of requests: {len(times)}")
|
||||
print(f"Total elapsed time: {total_elapsed:.3f}s")
|
||||
print(f"Total request rate: {len(times)/total_elapsed:.3f} req/s")
|
||||
print(f"Average request time: {mean:.3f}s")
|
||||
print(f"Median request time: {median:.3f}s")
|
||||
print(f"Request time std dev: {stddev:.3f}s")
|
||||
print(f"Maximum request time: {max(times):.3f}s")
|
||||
print(f"Minimum request time: {min(times):.3f}s")
|
||||
print(f"Hashes found: {found_hashes}")
|
||||
print(f"Hashes missed: {missed_hashes}")
|
||||
print("%d requests in %.1fs. %.1f requests per second" % (total_requests, elapsed, total_requests / elapsed))
|
||||
print("Average request time %.8fs" % (elapsed / total_requests))
|
||||
print("Max request time was %.8fs" % max_time)
|
||||
print("Found %d hashes, missed %d" % (found_hashes, missed_hashes))
|
||||
|
||||
if args.report:
|
||||
with ProgressBar(total=args.requests) as pbar:
|
||||
@@ -153,152 +113,12 @@ def main():
|
||||
with lock:
|
||||
pbar.update()
|
||||
|
||||
def handle_remove(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
if where:
|
||||
result = client.remove(where)
|
||||
print("Removed %d row(s)" % (result["count"]))
|
||||
else:
|
||||
print("No query specified")
|
||||
|
||||
def handle_clean_unused(args, client):
|
||||
result = client.clean_unused(args.max_age)
|
||||
print("Removed %d rows" % (result["count"]))
|
||||
return 0
|
||||
|
||||
def handle_refresh_token(args, client):
|
||||
r = client.refresh_token(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_set_user_permissions(args, client):
|
||||
r = client.set_user_perms(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_user(args, client):
|
||||
r = client.get_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_all_users(args, client):
|
||||
users = client.get_all_users()
|
||||
print("{username:20}| {permissions}".format(username="Username", permissions="Permissions"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for u in users:
|
||||
print("{username:20}| {permissions}".format(username=u["username"], permissions=" ".join(u["permissions"])))
|
||||
|
||||
def handle_new_user(args, client):
|
||||
r = client.new_user(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_delete_user(args, client):
|
||||
r = client.delete_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_db_usage(args, client):
|
||||
usage = client.get_db_usage()
|
||||
print(usage)
|
||||
tables = sorted(usage.keys())
|
||||
print("{name:20}| {rows:20}".format(name="Table name", rows="Rows"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for t in tables:
|
||||
print("{name:20}| {rows:<20}".format(name=t, rows=usage[t]["rows"]))
|
||||
print()
|
||||
|
||||
total_rows = sum(t["rows"] for t in usage.values())
|
||||
print(f"Total rows: {total_rows}")
|
||||
|
||||
def handle_get_db_query_columns(args, client):
|
||||
columns = client.get_db_query_columns()
|
||||
print("\n".join(sorted(columns)))
|
||||
|
||||
def handle_gc_status(args, client):
|
||||
result = client.gc_status()
|
||||
if not result["mark"]:
|
||||
print("No Garbage collection in progress")
|
||||
return 0
|
||||
|
||||
print("Current Mark: %s" % result["mark"])
|
||||
print("Total hashes to keep: %d" % result["keep"])
|
||||
print("Total hashes to remove: %s" % result["remove"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
result = client.gc_mark(args.mark, where)
|
||||
print("New hashes marked: %d" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_gc_sweep(args, client):
|
||||
result = client.gc_sweep(args.mark)
|
||||
print("Removed %d rows" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_unihash_exists(args, client):
|
||||
result = client.unihash_exists(args.unihash)
|
||||
if args.quiet:
|
||||
return 0 if result else 1
|
||||
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
def handle_ping(args, client):
|
||||
times = []
|
||||
for i in range(1, args.count + 1):
|
||||
if not args.quiet:
|
||||
print(f"Ping {i} of {args.count}... ", end="")
|
||||
start_time = time.perf_counter()
|
||||
client.ping()
|
||||
elapsed = time.perf_counter() - start_time
|
||||
times.append(elapsed)
|
||||
if not args.quiet:
|
||||
print(f"{elapsed:.3f}s")
|
||||
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
std_dev = statistics.pstdev(times)
|
||||
|
||||
if not args.quiet:
|
||||
print("------------------------")
|
||||
print(f"Number of pings: {len(times)}")
|
||||
print(f"Average round trip time: {mean:.3f}s")
|
||||
print(f"Median round trip time: {median:.3f}s")
|
||||
print(f"Round trip time std dev: {std_dev:.3f}s")
|
||||
print(f"Min time is: {min(times):.3f}s")
|
||||
print(f"Max time is: {max(times):.3f}s")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Hash Equivalence Client',
|
||||
epilog=textwrap.dedent(
|
||||
"""
|
||||
Possible ADDRESS options are:
|
||||
unix://PATH Connect to UNIX domain socket at PATH
|
||||
ws://HOST[:PORT] Connect to websocket at HOST:PORT (default port is 80)
|
||||
wss://HOST[:PORT] Connect to secure websocket at HOST:PORT (default port is 443)
|
||||
HOST:PORT Connect to TCP server at HOST:PORT
|
||||
"""
|
||||
),
|
||||
)
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME")
|
||||
parser.add_argument('--password', '-p', metavar="TOKEN", help="Authenticate using token TOKEN")
|
||||
parser.add_argument('--become', '-b', metavar="USERNAME", help="Impersonate user USERNAME (if allowed) when performing actions")
|
||||
parser.add_argument('--no-netrc', '-n', action="store_false", dest="netrc", help="Do not use .netrc")
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
get_parser = subparsers.add_parser('get', help="Get the unihash for a taskhash")
|
||||
get_parser.add_argument("method", help="Method to query")
|
||||
get_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_parser.set_defaults(func=handle_get)
|
||||
|
||||
get_outhash_parser = subparsers.add_parser('get-outhash', help="Get output hash information")
|
||||
get_outhash_parser.add_argument("method", help="Method to query")
|
||||
get_outhash_parser.add_argument("outhash", help="Output hash to query")
|
||||
get_outhash_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_outhash_parser.set_defaults(func=handle_get_outhash)
|
||||
|
||||
stats_parser = subparsers.add_parser('stats', help='Show server stats')
|
||||
stats_parser.add_argument('--reset', action='store_true',
|
||||
help='Reset server stats')
|
||||
@@ -317,69 +137,6 @@ def main():
|
||||
help='Include string in outhash')
|
||||
stress_parser.set_defaults(func=handle_stress)
|
||||
|
||||
remove_parser = subparsers.add_parser('remove', help="Remove hash entries")
|
||||
remove_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Remove entries from table where KEY == VALUE")
|
||||
remove_parser.set_defaults(func=handle_remove)
|
||||
|
||||
clean_unused_parser = subparsers.add_parser('clean-unused', help="Remove unused database entries")
|
||||
clean_unused_parser.add_argument("max_age", metavar="SECONDS", type=int, help="Remove unused entries older than SECONDS old")
|
||||
clean_unused_parser.set_defaults(func=handle_clean_unused)
|
||||
|
||||
refresh_token_parser = subparsers.add_parser('refresh-token', help="Refresh auth token")
|
||||
refresh_token_parser.add_argument("--username", "-u", help="Refresh the token for another user (if authorized)")
|
||||
refresh_token_parser.set_defaults(func=handle_refresh_token)
|
||||
|
||||
set_user_perms_parser = subparsers.add_parser('set-user-perms', help="Set new permissions for user")
|
||||
set_user_perms_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
set_user_perms_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
set_user_perms_parser.set_defaults(func=handle_set_user_permissions)
|
||||
|
||||
get_user_parser = subparsers.add_parser('get-user', help="Get user")
|
||||
get_user_parser.add_argument("--username", "-u", help="Username")
|
||||
get_user_parser.set_defaults(func=handle_get_user)
|
||||
|
||||
get_all_users_parser = subparsers.add_parser('get-all-users', help="List all users")
|
||||
get_all_users_parser.set_defaults(func=handle_get_all_users)
|
||||
|
||||
new_user_parser = subparsers.add_parser('new-user', help="Create new user")
|
||||
new_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
new_user_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
new_user_parser.set_defaults(func=handle_new_user)
|
||||
|
||||
delete_user_parser = subparsers.add_parser('delete-user', help="Delete user")
|
||||
delete_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
delete_user_parser.set_defaults(func=handle_delete_user)
|
||||
|
||||
db_usage_parser = subparsers.add_parser('get-db-usage', help="Database Usage")
|
||||
db_usage_parser.set_defaults(func=handle_get_db_usage)
|
||||
|
||||
db_query_columns_parser = subparsers.add_parser('get-db-query-columns', help="Show columns that can be used in database queries")
|
||||
db_query_columns_parser.set_defaults(func=handle_get_db_query_columns)
|
||||
|
||||
gc_status_parser = subparsers.add_parser("gc-status", help="Show garbage collection status")
|
||||
gc_status_parser.set_defaults(func=handle_gc_status)
|
||||
|
||||
gc_mark_parser = subparsers.add_parser('gc-mark', help="Mark hashes to be kept for garbage collection")
|
||||
gc_mark_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Keep entries in table where KEY == VALUE")
|
||||
gc_mark_parser.set_defaults(func=handle_gc_mark)
|
||||
|
||||
gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked")
|
||||
gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_sweep_parser.set_defaults(func=handle_gc_sweep)
|
||||
|
||||
unihash_exists_parser = subparsers.add_parser('unihash-exists', help="Check if a unihash is known to the server")
|
||||
unihash_exists_parser.add_argument("--quiet", action="store_true", help="Don't print status. Instead, exit with 0 if unihash exists and 1 if it does not")
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
ping_parser = subparsers.add_parser('ping', help="Ping server")
|
||||
ping_parser.add_argument("-n", "--count", type=int, help="Number of pings. Default is %(default)s", default=10)
|
||||
ping_parser.add_argument("-q", "--quiet", action="store_true", help="Don't print each ping; only print results")
|
||||
ping_parser.set_defaults(func=handle_ping)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
@@ -393,30 +150,11 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
login = args.login
|
||||
password = args.password
|
||||
|
||||
if login is None and args.netrc:
|
||||
try:
|
||||
n = netrc.netrc()
|
||||
auth = n.authenticators(args.address)
|
||||
if auth is not None:
|
||||
login, _, password = auth
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except netrc.NetrcParseError as e:
|
||||
sys.stderr.write(f"Error parsing {e.filename}:{e.lineno}: {e.msg}\n")
|
||||
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
try:
|
||||
with hashserv.create_client(args.address, login, password) as client:
|
||||
if args.become:
|
||||
client.become_user(args.become)
|
||||
return func(args, client)
|
||||
except bb.asyncrpc.InvokeError as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
return func(args, client)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -11,169 +11,56 @@ import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
from hashserv.server import DEFAULT_ANON_PERMS
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DEFAULT_BIND = "unix://./hashserve.sock"
|
||||
DEFAULT_BIND = 'unix://./hashserve.sock'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hash Equivalence Reference Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
epilog="""
|
||||
The bind address may take one of the following formats:
|
||||
unix://PATH - Bind to unix domain socket at PATH
|
||||
ws://ADDRESS:PORT - Bind to websocket on ADDRESS:PORT
|
||||
ADDRESS:PORT - Bind to raw TCP socket on ADDRESS:PORT
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Reference Server. Version=%s' % VERSION,
|
||||
epilog='''The bind address is the path to a unix domain socket if it is
|
||||
prefixed with "unix://". Otherwise, it is an IP address
|
||||
and port in form ADDRESS:PORT. To bind to all addresses, leave
|
||||
the ADDRESS empty, e.g. "--bind :8686". To bind to a specific
|
||||
IPv6 address, enclose the address in "[]", e.g.
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
To bind to all addresses, leave the ADDRESS empty, e.g. "--bind :8686" or
|
||||
"--bind ws://:8686". To bind to a specific IPv6 address, enclose the address in
|
||||
"[]", e.g. "--bind [::1]:8686" or "--bind ws://[::1]:8686"
|
||||
|
||||
Note that the default Anonymous permissions are designed to not break existing
|
||||
server instances when upgrading, but are not particularly secure defaults. If
|
||||
you want to use authentication, it is recommended that you use "--anon-perms
|
||||
@read" to only give anonymous users read access, or "--anon-perms @none" to
|
||||
give un-authenticated users no access at all.
|
||||
|
||||
Setting "--anon-perms @all" or "--anon-perms @user-admin" is not allowed, since
|
||||
this would allow anonymous users to manage all users accounts, which is a bad
|
||||
idea.
|
||||
|
||||
If you are using user authentication, you should run your server in websockets
|
||||
mode with an SSL terminating load balancer in front of it (as this server does
|
||||
not implement SSL). Otherwise all usernames and passwords will be transmitted
|
||||
in the clear. When configured this way, clients can connect using a secure
|
||||
websocket, as in "wss://SERVER:PORT"
|
||||
|
||||
The following permissions are supported by the server:
|
||||
|
||||
@none - No permissions
|
||||
@read - The ability to read equivalent hashes from the server
|
||||
@report - The ability to report equivalent hashes to the server
|
||||
@db-admin - Manage the hash database(s). This includes cleaning the
|
||||
database, removing hashes, etc.
|
||||
@user-admin - The ability to manage user accounts. This includes, creating
|
||||
users, deleting users, resetting login tokens, and assigning
|
||||
permissions.
|
||||
@all - All possible permissions, including any that may be added
|
||||
in the future
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--bind",
|
||||
default=os.environ.get("HASHSERVER_BIND", DEFAULT_BIND),
|
||||
help='Bind address (default $HASHSERVER_BIND, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--database",
|
||||
default=os.environ.get("HASHSERVER_DB", "./hashserv.db"),
|
||||
help='Database file (default $HASHSERVER_DB, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default=os.environ.get("HASHSERVER_LOG_LEVEL", "WARNING"),
|
||||
help='Set logging level (default $HASHSERVER_LOG_LEVEL, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("HASHSERVER_UPSTREAM", None),
|
||||
help="Upstream hashserv to pull hashes from ($HASHSERVER_UPSTREAM)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="Disallow write operations from clients ($HASHSERVER_READ_ONLY)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-username",
|
||||
default=os.environ.get("HASHSERVER_DB_USERNAME", None),
|
||||
help="Database username ($HASHSERVER_DB_USERNAME)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-password",
|
||||
default=os.environ.get("HASHSERVER_DB_PASSWORD", None),
|
||||
help="Database password ($HASHSERVER_DB_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--anon-perms",
|
||||
metavar="PERM[,PERM[,...]]",
|
||||
default=os.environ.get("HASHSERVER_ANON_PERMS", ",".join(DEFAULT_ANON_PERMS)),
|
||||
help='Permissions to give anonymous users (default $HASHSERVER_ANON_PERMS, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-user",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_USER", None),
|
||||
help="Create default admin user with name ADMIN_USER ($HASHSERVER_ADMIN_USER)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-password",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reuseport",
|
||||
action="store_true",
|
||||
help="Enable SO_REUSEPORT, allowing multiple servers to bind to the same port for load balancing",
|
||||
)
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger("hashserv")
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError(
|
||||
"Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log
|
||||
)
|
||||
raise ValueError('Invalid log level: %s' % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
read_only = (os.environ.get("HASHSERVER_READ_ONLY", "0") == "1") or args.read_only
|
||||
if "," in args.anon_perms:
|
||||
anon_perms = args.anon_perms.split(",")
|
||||
else:
|
||||
anon_perms = args.anon_perms.split()
|
||||
|
||||
server = hashserv.create_server(
|
||||
args.bind,
|
||||
args.database,
|
||||
upstream=args.upstream,
|
||||
read_only=read_only,
|
||||
db_username=args.db_username,
|
||||
db_password=args.db_password,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
reuseport=args.reuseport,
|
||||
)
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -18,14 +18,13 @@ import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
toolname = os.path.basename(__file__).split(".")[0]
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create(toolname, sys.stdout)
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -34,7 +33,7 @@ def main():
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Forced execution: can be specified multiple times. -F will force add without recipe parse verification and -FF will additionally force the run withput layer parsing.', action='count', default=0)
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -58,27 +57,22 @@ def main():
|
||||
level=logger.getEffectiveLevel())
|
||||
|
||||
plugins = []
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
|
||||
if global_args.force > 1:
|
||||
bbpaths = []
|
||||
else:
|
||||
tinfoil.prepare(True)
|
||||
bbpaths = tinfoil.config_data.getVar('BBPATH').split(':')
|
||||
|
||||
for path in ([topdir] + bbpaths):
|
||||
pluginbasepath = {"bitbake-layers":'bblayers', 'bitbake-config-build':'bbconfigbuild'}[toolname]
|
||||
pluginpath = os.path.join(path, 'lib', pluginbasepath)
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init') and global_args.force <= 1:
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
plugin.register_commands(subparsers)
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
|
||||
if not registered:
|
||||
logger.error("No commands registered - missing plugins?")
|
||||
@@ -92,6 +86,8 @@ def main():
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
finally:
|
||||
tinfoil.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,103 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import os
|
||||
import sys,logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "2.0.0"
|
||||
__version__="1.0.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRHOST_DEFAULT='0.0.0.0'
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser = optparse.OptionParser(
|
||||
version="Bitbake PR Service Core version %s, %%prog version %s" % (prserv.__version__, __version__),
|
||||
usage = "%prog < --start | --stop > [options]")
|
||||
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--file",
|
||||
default="prserv.sqlite3",
|
||||
help="database filename (default: prserv.sqlite3)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default="prserv.log",
|
||||
help="log filename(default: prserv.log)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
default="INFO",
|
||||
help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--start",
|
||||
action="store_true",
|
||||
help="start daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stop",
|
||||
action="store_true",
|
||||
help="stop daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
help="ip address to bind",
|
||||
default=PRHOST_DEFAULT,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=PRPORT_DEFAULT,
|
||||
help="port number (default: 8585)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("PRSERV_UPSTREAM", None),
|
||||
help="Upstream PR service (host:port)",
|
||||
)
|
||||
parser.add_option("-f", "--file", help="database filename(default: prserv.sqlite3)", action="store",
|
||||
dest="dbfile", type="string", default="prserv.sqlite3")
|
||||
parser.add_option("-l", "--log", help="log filename(default: prserv.log)", action="store",
|
||||
dest="logfile", type="string", default="prserv.log")
|
||||
parser.add_option("--loglevel", help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
action = "store", type="string", dest="loglevel", default = "INFO")
|
||||
parser.add_option("--start", help="start daemon",
|
||||
action="store_true", dest="start")
|
||||
parser.add_option("--stop", help="stop daemon",
|
||||
action="store_true", dest="stop")
|
||||
parser.add_option("--host", help="ip address to bind", action="store",
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default: 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
parser.add_option("-r", "--read-only", help="open database in read-only mode",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(
|
||||
args.file,
|
||||
args.host,
|
||||
args.port,
|
||||
os.path.abspath(args.log),
|
||||
args.read_only,
|
||||
args.upstream
|
||||
)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
if options.start:
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile), options.read_only)
|
||||
elif options.stop:
|
||||
ret=prserv.serv.stop_daemon(options.host, options.port)
|
||||
else:
|
||||
ret=parser.print_help()
|
||||
return ret
|
||||
|
||||
@@ -15,7 +15,6 @@ import unittest
|
||||
try:
|
||||
import bb
|
||||
import hashserv
|
||||
import prserv
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
@@ -28,12 +27,12 @@ tests = ["bb.tests.codeparser",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.persist_data",
|
||||
"bb.tests.runqueue",
|
||||
"bb.tests.siggen",
|
||||
"bb.tests.utils",
|
||||
"bb.tests.compression",
|
||||
"hashserv.tests",
|
||||
"prserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
|
||||
@@ -12,12 +12,11 @@ warnings.simplefilter("default")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 11 or not sys.argv[1].startswith("decafbad"):
|
||||
if len(sys.argv) != 10 or not sys.argv[1].startswith("decafbad"):
|
||||
print("bitbake-server is meant for internal execution by bitbake itself, please don't use it standalone.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -29,8 +28,7 @@ logfile = sys.argv[4]
|
||||
lockname = sys.argv[5]
|
||||
sockname = sys.argv[6]
|
||||
timeout = float(sys.argv[7])
|
||||
profile = bool(int(sys.argv[8]))
|
||||
xmlrpcinterface = (sys.argv[9], int(sys.argv[10]))
|
||||
xmlrpcinterface = (sys.argv[8], int(sys.argv[9]))
|
||||
if xmlrpcinterface[0] == "None":
|
||||
xmlrpcinterface = (None, xmlrpcinterface[1])
|
||||
|
||||
@@ -38,9 +36,9 @@ if xmlrpcinterface[0] == "None":
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
|
||||
with open(logfile, 'a+') as so:
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
@@ -51,5 +49,5 @@ logger = logging.getLogger("BitBake")
|
||||
handler = bb.event.LogHandler()
|
||||
logger.addHandler(handler)
|
||||
|
||||
bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface, profile)
|
||||
bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface)
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -21,15 +19,11 @@ import traceback
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
import fcntl
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
# Remove when we have a minimum of python 3.10
|
||||
if not hasattr(fcntl, 'F_SETPIPE_SZ'):
|
||||
fcntl.F_SETPIPE_SZ = 1031
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
@@ -49,6 +43,7 @@ if sys.argv[1].startswith("decafbadbad"):
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
import fcntl
|
||||
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
||||
fl |= os.O_SYNC
|
||||
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
|
||||
@@ -60,12 +55,6 @@ logger = logging.getLogger("BitBake")
|
||||
|
||||
worker_pipe = sys.stdout.fileno()
|
||||
bb.utils.nonblockingfd(worker_pipe)
|
||||
# Try to make the pipe buffers larger as it is much more efficient. If we can't
|
||||
# e.g. out of buffer space (/proc/sys/fs/pipe-user-pages-soft) then just pass over.
|
||||
try:
|
||||
fcntl.fcntl(worker_pipe, fcntl.F_SETPIPE_SZ, 512 * 1024)
|
||||
except:
|
||||
pass
|
||||
# Need to guard against multiprocessing being used in child processes
|
||||
# and multiple processes trying to write to the parent at the same time
|
||||
worker_pipe_lock = None
|
||||
@@ -101,21 +90,21 @@ def worker_fire_prepickled(event):
|
||||
worker_thread_exit = False
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = bytearray()
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int.extend(worker_queue.get(True, 1))
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
del worker_queue_int[0:written]
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
|
||||
raise
|
||||
@@ -131,10 +120,11 @@ def worker_child_fire(event, d):
|
||||
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
try:
|
||||
with bb.utils.lock_timeout(worker_pipe_lock):
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe_lock.acquire()
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe_lock.release()
|
||||
except IOError:
|
||||
sigterm_handler(None, None)
|
||||
raise
|
||||
@@ -153,17 +143,7 @@ def sigterm_handler(signum, frame):
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
|
||||
fn = runtask['fn']
|
||||
task = runtask['task']
|
||||
taskname = runtask['taskname']
|
||||
taskhash = runtask['taskhash']
|
||||
unihash = runtask['unihash']
|
||||
appends = runtask['appends']
|
||||
layername = runtask['layername']
|
||||
taskdepdata = runtask['taskdepdata']
|
||||
quieterrors = runtask['quieterrors']
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
@@ -175,7 +155,8 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
|
||||
taskdep = runtask['taskdep']
|
||||
|
||||
taskdep = workerdata["taskdeps"][fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
umask = taskdep['umask'][taskname]
|
||||
elif workerdata["umask"]:
|
||||
@@ -187,25 +168,25 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
dry_run = cfg.dry_run or runtask['dry_run']
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (runtask['fakerootenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
|
||||
fakedirs = (runtask['fakerootdirs'] or "").split()
|
||||
fakedirs = (workerdata["fakerootdirs"][fn] or "").split()
|
||||
for p in fakedirs:
|
||||
bb.utils.mkdirhier(p)
|
||||
logger.debug2('Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (runtask['fakerootnoenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
envvars = (workerdata["fakerootnoenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -247,16 +228,15 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
|
||||
# No stdin & stdout
|
||||
# stdout is used as a status report channel and must not be used by child processes.
|
||||
dumbio = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(dumbio, sys.stdin.fileno())
|
||||
os.dup2(dumbio, sys.stdout.fileno())
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
if umask is not None:
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
try:
|
||||
bb_cache = bb.cache.NoCache(databuilder)
|
||||
(realfn, virtual, mc) = bb.cache.virtualfn2realfn(fn)
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
@@ -275,14 +255,13 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
bb.parse.siggen.set_taskhashes(workerdata["newhashes"])
|
||||
ret = 0
|
||||
|
||||
the_data = databuilder.parseRecipe(fn, appends, layername)
|
||||
the_data = bb_cache.loadDataFull(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', taskhash)
|
||||
the_data.setVar('BB_UNIHASH', unihash)
|
||||
bb.parse.siggen.setup_datacache_from_datastore(fn, the_data)
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
|
||||
if not bb.utils.to_boolean(the_data.getVarFlag(taskname, 'network')):
|
||||
if not the_data.getVarFlag(taskname, 'network', False):
|
||||
if bb.utils.is_local_uid(uid):
|
||||
logger.debug("Attempting to disable network for %s" % taskname)
|
||||
bb.utils.disable_network(uid, gid)
|
||||
@@ -317,10 +296,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if not quieterrors:
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
@@ -362,12 +337,12 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(512*1024) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
@@ -395,7 +370,7 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
@@ -429,7 +404,7 @@ class BitbakeWorker(object):
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue.extend(r)
|
||||
self.queue = self.queue + r
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
@@ -449,30 +424,18 @@ class BitbakeWorker(object):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
opening_tag = b"<" + item + b">"
|
||||
if not self.queue.startswith(opening_tag):
|
||||
return
|
||||
|
||||
tag_len = len(opening_tag)
|
||||
if len(self.queue) < tag_len + 4:
|
||||
# we need to receive more data
|
||||
return
|
||||
header = self.queue[tag_len:tag_len + 4]
|
||||
payload_len = int.from_bytes(header, 'big')
|
||||
# closing tag has length (tag_len + 1)
|
||||
if len(self.queue) < tag_len * 2 + 1 + payload_len:
|
||||
# we need to receive more data
|
||||
return
|
||||
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
if index != -1:
|
||||
try:
|
||||
func(self.queue[(tag_len + 4):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(b"</") + len(item) + len(b">")):]
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
while index != -1:
|
||||
try:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
@@ -492,7 +455,6 @@ class BitbakeWorker(object):
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcdata[mc].setVar("PRSERV_HOST", self.workerdata["prhost"])
|
||||
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.workerdata["hashservaddr"])
|
||||
self.databuilder.mcdata[mc].setVar("__bbclasstype", "recipe")
|
||||
|
||||
def handle_newtaskhashes(self, data):
|
||||
self.workerdata["newhashes"] = pickle.loads(data)
|
||||
@@ -510,15 +472,11 @@ class BitbakeWorker(object):
|
||||
sys.exit(0)
|
||||
|
||||
def handle_runtask(self, data):
|
||||
runtask = pickle.loads(data)
|
||||
|
||||
fn = runtask['fn']
|
||||
task = runtask['task']
|
||||
taskname = runtask['taskname']
|
||||
|
||||
fn, task, taskname, taskhash, unihash, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
|
||||
workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
|
||||
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, self.extraconfigdata, runtask)
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -24,17 +22,15 @@ warnings.simplefilter("default")
|
||||
version = 1.0
|
||||
|
||||
|
||||
git_cmd = ['git', '-c', 'safe.bareRepository=all']
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(git_cmd + ['rev-parse', '--git-dir']).rstrip()
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(git_cmd + ['fetch', '--unshallow'])
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
@@ -43,21 +39,21 @@ def main():
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(git_cmd + ['rev-list'] + args.revisions).splitlines()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines()
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(git_cmd + ['rev-parse', '{}^@'.format(rev)]):
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(git_cmd + ['fsck', '--unreachable'])
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
@@ -74,12 +70,12 @@ def process_args():
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(git_cmd + ['rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
@@ -97,7 +93,7 @@ def make_shallow(shallow_file, revisions, refs):
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(git_cmd + ['for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
@@ -115,8 +111,8 @@ def filter_refs(refs):
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(git_cmd + ['update-ref', '--no-deref', '--stdin', '-z'],
|
||||
input=''.join('delete ' + l + '\0\0' for l in to_remove))
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
@@ -128,7 +124,7 @@ def follow_history_intersections(revisions, refs):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(git_cmd + ['rev-parse', '%s^@' % rev]).splitlines()
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
@@ -136,12 +132,12 @@ def follow_history_intersections(revisions, refs):
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(git_cmd + ['merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(git_cmd + ['merge-base', parent, ref]).rstrip()
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
@@ -161,14 +157,14 @@ def iter_except(func, exception, start=None):
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(git_cmd + ['reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(git_cmd + ['repack', '-ad'])
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(git_cmd + ['prune', '--expire', 'now'])
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -84,7 +84,7 @@ webserverStartAll()
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/web.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
sleep 1
|
||||
@@ -181,14 +181,6 @@ WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
# ${BUILDDIR}/toaster_logs/ became the default location for toaster logs
|
||||
# This is needed for implemented django-log-viewer: https://pypi.org/project/django-log-viewer/
|
||||
# If the directory does not exist, create it.
|
||||
TOASTER_LOGS_DIR="${BUILDDIR}/toaster_logs/"
|
||||
if [ ! -d $TOASTER_LOGS_DIR ]
|
||||
then
|
||||
mkdir $TOASTER_LOGS_DIR
|
||||
fi
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
@@ -307,7 +299,7 @@ case $CMD in
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/toaster_runbuilds.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
|
||||
@@ -30,23 +30,79 @@ sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib'))
|
||||
|
||||
import bb.cooker
|
||||
from bb.ui import toasterui
|
||||
from bb.ui import eventreplay
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
event_str = json.loads(line)['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
|
||||
def main(argv):
|
||||
with open(argv[-1]) as eventfile:
|
||||
# load variables from the first line
|
||||
variables = None
|
||||
while line := eventfile.readline().strip():
|
||||
try:
|
||||
variables = json.loads(line)['allvariables']
|
||||
break
|
||||
except (KeyError, json.JSONDecodeError):
|
||||
continue
|
||||
if not variables:
|
||||
sys.exit("Cannot find allvariables entry in event log file %s" % argv[-1])
|
||||
eventfile.seek(0)
|
||||
variables = json.loads(eventfile.readline().strip())['allvariables']
|
||||
|
||||
params = namedtuple('ConfigParams', ['observe_only'])(True)
|
||||
player = eventreplay.EventPlayer(eventfile, variables)
|
||||
player = EventPlayer(eventfile, variables)
|
||||
|
||||
return toasterui.main(player, player, params)
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
FROM alpine:3.13.1
|
||||
|
||||
RUN apk add --no-cache python3 libgcc
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
|
||||
@@ -40,7 +40,7 @@ set cpo&vim
|
||||
|
||||
let s:maxoff = 50 " maximum number of lines to look backwards for ()
|
||||
|
||||
function! GetBBPythonIndent(lnum)
|
||||
function GetPythonIndent(lnum)
|
||||
|
||||
" If this line is explicitly joined: If the previous line was also joined,
|
||||
" line it up with that one, otherwise add two 'shiftwidth'
|
||||
@@ -257,7 +257,7 @@ let b:did_indent = 1
|
||||
setlocal indentkeys+=0\"
|
||||
|
||||
|
||||
function! BitbakeIndent(lnum)
|
||||
function BitbakeIndent(lnum)
|
||||
if !has('syntax_items')
|
||||
return -1
|
||||
endif
|
||||
@@ -315,7 +315,7 @@ function! BitbakeIndent(lnum)
|
||||
endif
|
||||
|
||||
if index(["bbPyDefRegion", "bbPyFuncRegion"], name) != -1
|
||||
let ret = GetBBPythonIndent(a:lnum)
|
||||
let ret = GetPythonIndent(a:lnum)
|
||||
" Should normally always be indented by at least one shiftwidth; but allow
|
||||
" return of -1 (defer to autoindent) or -2 (force indent to 0)
|
||||
if ret == 0
|
||||
|
||||
@@ -63,14 +63,13 @@ syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*
|
||||
|
||||
" Includes and requires
|
||||
syn keyword bbInclude inherit include require contained
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref
|
||||
syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest
|
||||
|
||||
" Add taks and similar
|
||||
syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained
|
||||
syn match bbStatementRest /[^\\]*$/ skipwhite contained contains=bbStatement,bbVarDeref,bbVarPyValue
|
||||
syn region bbStatementRestCont start=/.*\\$/ end=/^[^\\]*$/ contained contains=bbStatement,bbVarDeref,bbVarPyValue,bbContinue keepend
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest,bbStatementRestCont
|
||||
syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
|
||||
" OE Important Functions
|
||||
syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained
|
||||
@@ -123,7 +122,6 @@ hi def link bbPyFlag Type
|
||||
hi def link bbPyDef Statement
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbStatementRestCont Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
hi def link bbOverrideOperator Operator
|
||||
|
||||
@@ -47,8 +47,8 @@ To install all required packages run:
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
$ cd doc
|
||||
$ make html
|
||||
$ cd documentation
|
||||
$ make -f Makefile.sphinx html
|
||||
|
||||
The resulting HTML index page will be _build/html/index.html, and you
|
||||
can browse your own copy of the locally generated documentation with
|
||||
|
||||
9
bitbake/doc/_templates/footer.html
vendored
9
bitbake/doc/_templates/footer.html
vendored
@@ -1,9 +0,0 @@
|
||||
<footer>
|
||||
<hr/>
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright {{ copyright }}
|
||||
<br>Last updated on {{ last_updated }} from the <a href="https://git.openembedded.org/bitbake/">bitbake</a> git repository.
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
@@ -552,8 +552,8 @@ through dependency chains are more complex and are generally
|
||||
accomplished with a Python function. The code in
|
||||
``meta/lib/oe/sstatesig.py`` shows two examples of this and also
|
||||
illustrates how you can insert your own policy into the system if so
|
||||
desired. This file defines the basic signature generator
|
||||
OpenEmbedded-Core uses: "OEBasicHash". By default, there
|
||||
desired. This file defines the two basic signature generators
|
||||
OpenEmbedded-Core uses: "OEBasic" and "OEBasicHash". By default, there
|
||||
is a dummy "noop" signature handler enabled in BitBake. This means that
|
||||
behavior is unchanged from previous versions. ``OE-Core`` uses the
|
||||
"OEBasicHash" signature handler by default through this setting in the
|
||||
@@ -561,13 +561,14 @@ behavior is unchanged from previous versions. ``OE-Core`` uses the
|
||||
|
||||
BB_SIGNATURE_HANDLER ?= "OEBasicHash"
|
||||
|
||||
The main feature of the "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is that
|
||||
it adds the task hash to the stamp files. Thanks to this, any metadata
|
||||
change will change the task hash, automatically causing the task to be run
|
||||
again. This removes the need to bump :term:`PR` values, and changes to
|
||||
metadata automatically ripple across the build.
|
||||
The "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is the same as the "OEBasic"
|
||||
version but adds the task hash to the stamp files. This results in any
|
||||
metadata change that changes the task hash, automatically causing the
|
||||
task to be run again. This removes the need to bump
|
||||
:term:`PR` values, and changes to metadata automatically
|
||||
ripple across the build.
|
||||
|
||||
It is also worth noting that the end result of signature
|
||||
It is also worth noting that the end result of these signature
|
||||
generators is to make some dependency and hash information available to
|
||||
the build. This information includes:
|
||||
|
||||
@@ -586,11 +587,10 @@ or possibly those defined in the metadata/signature handler itself. The
|
||||
simplest parameter to pass is "none", which causes a set of signature
|
||||
information to be written out into ``STAMPS_DIR`` corresponding to the
|
||||
targets specified. The other currently available parameter is
|
||||
"printdiff", which causes BitBake to try to establish the most recent
|
||||
"printdiff", which causes BitBake to try to establish the closest
|
||||
signature match it can (e.g. in the sstate cache) and then run
|
||||
compare the matched signatures to determine the stamps and delta
|
||||
where these two stamp trees diverge. This can be used to determine why
|
||||
tasks need to be re-run in situations where that is not expected.
|
||||
``bitbake-diffsigs`` over the matches to determine the stamps and delta
|
||||
where these two stamp trees diverge.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -657,7 +657,7 @@ builds are when execute, bitbake also supports user defined
|
||||
configuration of the `Python
|
||||
logging <https://docs.python.org/3/library/logging.html>`__ facilities
|
||||
through the :term:`BB_LOGCONFIG` variable. This
|
||||
variable defines a JSON or YAML `logging
|
||||
variable defines a json or yaml `logging
|
||||
configuration <https://docs.python.org/3/library/logging.config.html>`__
|
||||
that will be intelligently merged into the default configuration. The
|
||||
logging configuration is merged using the following rules:
|
||||
@@ -691,9 +691,9 @@ logging configuration is merged using the following rules:
|
||||
adds a filter called ``BitBake.defaultFilter``, both filters will be
|
||||
applied to the logger
|
||||
|
||||
As a first example, you can create a ``hashequiv.json`` user logging
|
||||
configuration file to log all Hash Equivalence related messages of ``VERBOSE``
|
||||
or higher priority to a file called ``hashequiv.log``::
|
||||
As an example, consider the following user logging configuration file
|
||||
which logs all Hash Equivalence related messages of VERBOSE or higher to
|
||||
a file called ``hashequiv.log`` ::
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
@@ -722,40 +722,3 @@ or higher priority to a file called ``hashequiv.log``::
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Then set the :term:`BB_LOGCONFIG` variable in ``conf/local.conf``::
|
||||
|
||||
BB_LOGCONFIG = "hashequiv.json"
|
||||
|
||||
Another example is this ``warn.json`` file to log all ``WARNING`` and
|
||||
higher priority messages to a ``warn.log`` file::
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
"formatters": {
|
||||
"warnlogFormatter": {
|
||||
"()": "bb.msg.BBLogFormatter",
|
||||
"format": "%(levelname)s: %(message)s"
|
||||
}
|
||||
},
|
||||
|
||||
"handlers": {
|
||||
"warnlog": {
|
||||
"class": "logging.FileHandler",
|
||||
"formatter": "warnlogFormatter",
|
||||
"level": "WARNING",
|
||||
"filename": "warn.log"
|
||||
}
|
||||
},
|
||||
|
||||
"loggers": {
|
||||
"BitBake": {
|
||||
"handlers": ["warnlog"]
|
||||
}
|
||||
},
|
||||
|
||||
"@disable_existing_loggers": false
|
||||
}
|
||||
|
||||
Note that BitBake's helper classes for structured logging are implemented in
|
||||
``lib/bb/msg.py``.
|
||||
|
||||
@@ -424,8 +424,8 @@ This fetcher supports the following parameters:
|
||||
|
||||
- *"nobranch":* Tells the fetcher to not check the SHA validation for
|
||||
the branch when set to "1". The default is "0". Set this option for
|
||||
the recipe that refers to the commit that is valid for any namespace
|
||||
(branch, tag, ...) instead of the branch.
|
||||
the recipe that refers to the commit that is valid for a tag instead
|
||||
of the branch.
|
||||
|
||||
- *"bareclone":* Tells the fetcher to clone a bare clone into the
|
||||
destination directory without checking out a working tree. Only the
|
||||
@@ -476,14 +476,6 @@ Here are some example URLs::
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
Using tags with the git fetcher may cause surprising behaviour. Bitbake needs to
|
||||
resolve the tag to a specific revision and to do that, it has to connect to and use
|
||||
the upstream repository. This is because the revision the tags point at can change and
|
||||
we've seen cases of this happening in well known public repositories. This can mean
|
||||
many more network connections than expected and recipes may be reparsed at every build.
|
||||
Source mirrors will also be bypassed as the upstream repository is the only source
|
||||
of truth to resolve the revision accurately. For these reasons, whilst the fetcher
|
||||
can support tags, we recommend being specific about revisions in recipes.
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
@@ -696,43 +688,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _gcp-fetcher:
|
||||
|
||||
GCP Fetcher (``gs://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from a
|
||||
`Google Cloud Storage Bucket <https://cloud.google.com/storage/docs/buckets>`__.
|
||||
It uses the `Google Cloud Storage Python Client <https://cloud.google.com/python/docs/reference/storage/latest>`__
|
||||
to check the status of objects in the bucket and download them.
|
||||
The use of the Python client makes it substantially faster than using command
|
||||
line tools such as gsutil.
|
||||
|
||||
The fetcher requires the Google Cloud Storage Python Client to be installed, along
|
||||
with the gsutil tool.
|
||||
|
||||
The fetcher requires that the machine has valid credentials for accessing the
|
||||
chosen bucket. Instructions for authentication can be found in the
|
||||
`Google Cloud documentation <https://cloud.google.com/docs/authentication/provide-credentials-adc#local-dev>`__.
|
||||
|
||||
If it used from the OpenEmbedded build system, the fetcher can be used for
|
||||
fetching sstate artifacts from a GCS bucket by specifying the
|
||||
``SSTATE_MIRRORS`` variable as shown below::
|
||||
|
||||
SSTATE_MIRRORS ?= "\
|
||||
file://.* gs://<bucket name>/PATH \
|
||||
"
|
||||
|
||||
The fetcher can also be used in recipes::
|
||||
|
||||
SRC_URI = "gs://<bucket name>/<foo_container>/<bar_file>"
|
||||
|
||||
However, the checksum of the file should be also be provided::
|
||||
|
||||
SRC_URI[sha256sum] = "<sha256 string>"
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
----------------------------
|
||||
|
||||
@@ -749,80 +704,6 @@ Here is an example URL::
|
||||
|
||||
SRC_URI = "crate://crates.io/glob/0.2.11"
|
||||
|
||||
.. _npm-fetcher:
|
||||
|
||||
NPM Fetcher (``npm://``)
|
||||
------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM <https://en.wikipedia.org/wiki/Npm_(software)>`__
|
||||
Javascript package registry.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npm://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"package":* The NPM package name. This is a mandatory parameter.
|
||||
|
||||
- *"version":* The NPM package version. This is a mandatory parameter.
|
||||
|
||||
- *"downloadfilename":* Specifies the filename used when storing the downloaded file.
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the package (default: ``npm``).
|
||||
|
||||
Note that NPM fetcher only fetches the package source itself. The dependencies
|
||||
can be fetched through the `npmsw-fetcher`_.
|
||||
|
||||
Here is an example URL with both fetchers::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
See :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/packages.html#creating-node-package-manager-npm-packages>`
|
||||
in the Yocto Project manual for details about using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
to automatically create a recipe from an NPM URL.
|
||||
|
||||
.. _npmsw-fetcher:
|
||||
|
||||
NPM shrinkwrap Fetcher (``npmsw://``)
|
||||
-------------------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM shrinkwrap <https://docs.npmjs.com/cli/v8/commands/npm-shrinkwrap>`__
|
||||
description file, which lists the dependencies
|
||||
of an NPM package while locking their versions.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npmsw://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"dev":* Set this parameter to ``1`` to install "devDependencies".
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the dependencies
|
||||
(``${S}`` by default).
|
||||
|
||||
Note that the shrinkwrap file can also be provided by the recipe for
|
||||
the package which has such dependencies, for example::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
Such a file can automatically be generated using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/packages.html#creating-node-package-manager-npm-packages>`
|
||||
section of the Yocto Project.
|
||||
|
||||
Other Fetchers
|
||||
--------------
|
||||
|
||||
@@ -832,9 +713,9 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- Mercurial (``hg://``)
|
||||
|
||||
- OSC (``osc://``)
|
||||
- npm (``npm://``)
|
||||
|
||||
- S3 (``s3://``)
|
||||
- OSC (``osc://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
|
||||
@@ -18,32 +18,28 @@ it.
|
||||
Obtaining BitBake
|
||||
=================
|
||||
|
||||
See the :ref:`bitbake-user-manual/bitbake-user-manual-intro:obtaining bitbake` section for
|
||||
See the :ref:`bitbake-user-manual/bitbake-user-manual-hello:obtaining bitbake` section for
|
||||
information on how to obtain BitBake. Once you have the source code on
|
||||
your machine, the BitBake directory appears as follows::
|
||||
|
||||
$ ls -al
|
||||
total 108
|
||||
drwxr-xr-x 9 fawkh 10000 4096 feb 24 12:10 .
|
||||
drwx------ 36 fawkh 10000 4096 mar 2 17:00 ..
|
||||
-rw-r--r-- 1 fawkh 10000 365 feb 24 12:10 AUTHORS
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 bin
|
||||
-rw-r--r-- 1 fawkh 10000 16501 feb 24 12:10 ChangeLog
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 classes
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 conf
|
||||
drwxr-xr-x 5 fawkh 10000 4096 feb 24 12:10 contrib
|
||||
drwxr-xr-x 6 fawkh 10000 4096 feb 24 12:10 doc
|
||||
drwxr-xr-x 8 fawkh 10000 4096 mar 2 16:26 .git
|
||||
-rw-r--r-- 1 fawkh 10000 31 feb 24 12:10 .gitattributes
|
||||
-rw-r--r-- 1 fawkh 10000 392 feb 24 12:10 .gitignore
|
||||
drwxr-xr-x 13 fawkh 10000 4096 feb 24 12:11 lib
|
||||
-rw-r--r-- 1 fawkh 10000 1224 feb 24 12:10 LICENSE
|
||||
-rw-r--r-- 1 fawkh 10000 15394 feb 24 12:10 LICENSE.GPL-2.0-only
|
||||
-rw-r--r-- 1 fawkh 10000 1286 feb 24 12:10 LICENSE.MIT
|
||||
-rw-r--r-- 1 fawkh 10000 229 feb 24 12:10 MANIFEST.in
|
||||
-rw-r--r-- 1 fawkh 10000 2413 feb 24 12:10 README
|
||||
-rw-r--r-- 1 fawkh 10000 43 feb 24 12:10 toaster-requirements.txt
|
||||
-rw-r--r-- 1 fawkh 10000 2887 feb 24 12:10 TODO
|
||||
total 100
|
||||
drwxrwxr-x. 9 wmat wmat 4096 Jan 31 13:44 .
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Feb 4 10:45 ..
|
||||
-rw-rw-r--. 1 wmat wmat 365 Nov 26 04:55 AUTHORS
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 bin
|
||||
drwxrwxr-x. 4 wmat wmat 4096 Jan 31 13:44 build
|
||||
-rw-rw-r--. 1 wmat wmat 16501 Nov 26 04:55 ChangeLog
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 classes
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 conf
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 contrib
|
||||
-rw-rw-r--. 1 wmat wmat 17987 Nov 26 04:55 COPYING
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 doc
|
||||
-rw-rw-r--. 1 wmat wmat 69 Nov 26 04:55 .gitignore
|
||||
-rw-rw-r--. 1 wmat wmat 849 Nov 26 04:55 HEADER
|
||||
drwxrwxr-x. 5 wmat wmat 4096 Jan 31 13:44 lib
|
||||
-rw-rw-r--. 1 wmat wmat 195 Nov 26 04:55 MANIFEST.in
|
||||
-rw-rw-r--. 1 wmat wmat 2887 Nov 26 04:55 TODO
|
||||
|
||||
At this point, you should have BitBake cloned to a directory that
|
||||
matches the previous listing except for dates and user names.
|
||||
@@ -56,7 +52,7 @@ directory to where your local BitBake files are and run the following
|
||||
command::
|
||||
|
||||
$ ./bin/bitbake --version
|
||||
BitBake Build Tool Core version 2.3.1
|
||||
BitBake Build Tool Core version 1.23.0, bitbake version 1.23.0
|
||||
|
||||
The console output tells you what version
|
||||
you are running.
|
||||
@@ -134,8 +130,23 @@ Following is the complete "Hello World" example.
|
||||
directory. Run the ``bitbake`` command and see what it does::
|
||||
|
||||
$ bitbake
|
||||
ERROR: The BBPATH variable is not set and bitbake did not find a conf/bblayers.conf file in the expected location.
|
||||
The BBPATH variable is not set and bitbake did not
|
||||
find a conf/bblayers.conf file in the expected location.
|
||||
Maybe you accidentally invoked bitbake from the wrong directory?
|
||||
DEBUG: Removed the following variables from the environment:
|
||||
GNOME_DESKTOP_SESSION_ID, XDG_CURRENT_DESKTOP,
|
||||
GNOME_KEYRING_CONTROL, DISPLAY, SSH_AGENT_PID, LANG, no_proxy,
|
||||
XDG_SESSION_PATH, XAUTHORITY, SESSION_MANAGER, SHLVL,
|
||||
MANDATORY_PATH, COMPIZ_CONFIG_PROFILE, WINDOWID, EDITOR,
|
||||
GPG_AGENT_INFO, SSH_AUTH_SOCK, GDMSESSION, GNOME_KEYRING_PID,
|
||||
XDG_SEAT_PATH, XDG_CONFIG_DIRS, LESSOPEN, DBUS_SESSION_BUS_ADDRESS,
|
||||
_, XDG_SESSION_COOKIE, DESKTOP_SESSION, LESSCLOSE, DEFAULTS_PATH,
|
||||
UBUNTU_MENUPROXY, OLDPWD, XDG_DATA_DIRS, COLORTERM, LS_COLORS
|
||||
|
||||
The majority of this output is specific to environment variables that
|
||||
are not directly relevant to BitBake. However, the very first
|
||||
message regarding the :term:`BBPATH` variable and the
|
||||
``conf/bblayers.conf`` file is relevant.
|
||||
|
||||
When you run BitBake, it begins looking for metadata files. The
|
||||
:term:`BBPATH` variable is what tells BitBake where
|
||||
@@ -168,14 +179,20 @@ Following is the complete "Hello World" example.
|
||||
``bitbake`` command again::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/__init__.py
|
||||
Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 127, in resolve_file(fn='conf/bitbake.conf', d=<bb.data_smart.DataSmart object at 0x7f22919a3df0>):
|
||||
if not newfn:
|
||||
> raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
FileNotFoundError: [Errno 2] file conf/bitbake.conf not found in <projectdirectory>
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 173, in parse_config_file
|
||||
return bb.parse.handle(fn, data, include)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 99, in handle
|
||||
return h['handle'](fn, data, include)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 120, in handle
|
||||
abs_fn = resolve_file(fn, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 117, in resolve_file
|
||||
raise IOError("file %s not found in %s" % (fn, bbpath))
|
||||
IOError: file conf/bitbake.conf not found in /home/scott-lenovo/hello
|
||||
|
||||
ERROR: Unable to parse conf/bitbake.conf: file conf/bitbake.conf not found in /home/scott-lenovo/hello
|
||||
|
||||
This sample output shows that BitBake could not find the
|
||||
``conf/bitbake.conf`` file in the project directory. This file is
|
||||
@@ -209,12 +226,12 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
Without a value for :term:`PN`, the variables :term:`STAMP`, :term:`T`, and :term:`B`, prevent more
|
||||
than one recipe from working. You can fix this by either setting :term:`PN` to
|
||||
Without a value for PN , the variables STAMP , T , and B , prevent more
|
||||
than one recipe from working. You can fix this by either setting PN to
|
||||
have a value similar to what OpenEmbedded and BitBake use in the default
|
||||
``bitbake.conf`` file (see previous example). Or, by manually updating each
|
||||
recipe to set :term:`PN`. You will also need to include :term:`PN` as part of the :term:`STAMP`,
|
||||
:term:`T`, and :term:`B` variable definitions in the ``local.conf`` file.
|
||||
bitbake.conf file (see previous example). Or, by manually updating each
|
||||
recipe to set PN . You will also need to include PN as part of the STAMP
|
||||
, T , and B variable definitions in the local.conf file.
|
||||
|
||||
The ``TMPDIR`` variable establishes a directory that BitBake uses
|
||||
for build output and intermediate files other than the cached
|
||||
@@ -237,14 +254,18 @@ Following is the complete "Hello World" example.
|
||||
exists, you can run the ``bitbake`` command again::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py
|
||||
Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 67, in inherit(files=['base'], fn='configuration INHERITs', lineno=0, d=<bb.data_smart.DataSmart object at 0x7fab6815edf0>):
|
||||
if not os.path.exists(file):
|
||||
> raise ParseError("Could not inherit file %s" % (file), fn, lineno)
|
||||
|
||||
bb.parse.ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
In the sample output,
|
||||
BitBake could not find the ``classes/base.bbclass`` file. You need
|
||||
@@ -263,10 +284,7 @@ Following is the complete "Hello World" example.
|
||||
$ mkdir classes
|
||||
|
||||
Move to the ``classes`` directory and then create the
|
||||
``base.bbclass`` file by inserting this single line::
|
||||
|
||||
addtask build
|
||||
|
||||
``base.bbclass`` file by inserting this single line: addtask build
|
||||
The minimal task that BitBake runs is the ``do_build`` task. This is
|
||||
all the example needs in order to build the project. Of course, the
|
||||
``base.bbclass`` can have much more depending on which build
|
||||
@@ -310,19 +328,10 @@ Following is the complete "Hello World" example.
|
||||
BBFILES += "${LAYERDIR}/*.bb"
|
||||
BBFILE_COLLECTIONS += "mylayer"
|
||||
BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/"
|
||||
LAYERSERIES_CORENAMES = "hello_world_example"
|
||||
LAYERSERIES_COMPAT_mylayer = "hello_world_example"
|
||||
|
||||
For information on these variables, click on :term:`BBFILES`,
|
||||
:term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS`, :term:`BBFILE_PATTERN_mylayer <BBFILE_PATTERN>`
|
||||
or :term:`LAYERSERIES_COMPAT` to go to the definitions in the glossary.
|
||||
|
||||
.. note::
|
||||
|
||||
We are setting both ``LAYERSERIES_CORENAMES`` and :term:`LAYERSERIES_COMPAT` in this particular case, because we
|
||||
are using bitbake without OpenEmbedded.
|
||||
You should usually just use :term:`LAYERSERIES_COMPAT` to specify the OE-Core versions for which your layer
|
||||
is compatible, and add the meta-openembedded layer to your project.
|
||||
:term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS` or :term:`BBFILE_PATTERN_mylayer <BBFILE_PATTERN>`
|
||||
to go to the definitions in the glossary.
|
||||
|
||||
You need to create the recipe file next. Inside your layer at the
|
||||
top-level, use an editor and create a recipe file named
|
||||
@@ -380,14 +389,12 @@ Following is the complete "Hello World" example.
|
||||
target::
|
||||
|
||||
$ bitbake printhello
|
||||
Loading cache: 100% |
|
||||
Loaded 0 entries from dependency cache.
|
||||
Parsing recipes: 100% |##################################################################################|
|
||||
Time: 00:00:00
|
||||
Parsing of 1 .bb files complete (0 cached, 1 parsed). 1 targets, 0 skipped, 0 masked, 0 errors.
|
||||
NOTE: Resolving any missing task queue dependencies
|
||||
Initialising tasks: 100% |###############################################################################|
|
||||
NOTE: No setscene tasks
|
||||
NOTE: Executing Tasks
|
||||
NOTE: Preparing RunQueue
|
||||
NOTE: Executing RunQueue Tasks
|
||||
********************
|
||||
* *
|
||||
* Hello, World! *
|
||||
|
||||
@@ -349,84 +349,40 @@ Usage and syntax
|
||||
Following is the usage and syntax for BitBake::
|
||||
|
||||
$ bitbake -h
|
||||
usage: bitbake [-s] [-e] [-g] [-u UI] [--version] [-h] [-f] [-c CMD]
|
||||
[-C INVALIDATE_STAMP] [--runall RUNALL] [--runonly RUNONLY]
|
||||
[--no-setscene] [--skip-setscene] [--setscene-only] [-n] [-p]
|
||||
[-k] [-P] [-S SIGNATURE_HANDLER] [--revisions-changed]
|
||||
[-b BUILDFILE] [-D] [-l DEBUG_DOMAINS] [-v] [-q]
|
||||
[-w WRITEEVENTLOG] [-B BIND] [-T SERVER_TIMEOUT]
|
||||
[--remote-server REMOTE_SERVER] [-m] [--token XMLRPCTOKEN]
|
||||
[--observe-only] [--status-only] [--server-only] [-r PREFILE]
|
||||
[-R POSTFILE] [-I EXTRA_ASSUME_PROVIDED]
|
||||
[recipename/target ...]
|
||||
Usage: bitbake [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH
|
||||
which will provide the layer, BBFILES and other configuration information.
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.
|
||||
|
||||
General options:
|
||||
recipename/target Execute the specified task (default is 'build') for
|
||||
these target recipes (.bb files).
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-u UI, --ui UI The user interface to use (knotty, ncurses, taskexp,
|
||||
taskexp_ncurses or teamcity - default knotty).
|
||||
--version Show programs version and exit.
|
||||
-h, --help Show this help message and exit.
|
||||
|
||||
Task control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
--runall RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
|
||||
Execution control options:
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
Options:
|
||||
--version show program's version number and exit
|
||||
-h, --help show this help message and exit
|
||||
-b BUILDFILE, --buildfile=BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
-k, --continue Continue as much as possible after an error. While the
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-P, --profile Profile the command and save reports.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means
|
||||
recursively compare the dumped signature with the most
|
||||
recent one in a local build or sstate cache (can be
|
||||
used to find out why tasks re-run when that is not
|
||||
expected)
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
-b BUILDFILE, --buildfile BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
|
||||
Logging/output control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp=INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
-r PREFILE, --read=PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
@@ -436,47 +392,65 @@ Following is the usage and syntax for BitBake::
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-l DEBUG_DOMAINS, --log-domains DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
-w WRITEEVENTLOG, --write-log WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
|
||||
Server options:
|
||||
-B BIND, --bind BIND The name/address for the bitbake xmlrpc server to bind
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means compare
|
||||
the dumped signature with the cached one.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps=EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses, taskexp or
|
||||
teamcity - default knotty).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout SERVER_TIMEOUT
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
--remote-server REMOTE_SERVER
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
--token XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
|
||||
Configuration options:
|
||||
-r PREFILE, --read PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing.
|
||||
|
||||
..
|
||||
Bitbake help output generated with "stty columns 80; bin/bitbake -h"
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
|
||||
.. _bitbake-examples:
|
||||
|
||||
|
||||
@@ -195,45 +195,22 @@ value. However, if ``A`` is not set, the variable is set to "aval".
|
||||
Setting a weak default value (??=)
|
||||
----------------------------------
|
||||
|
||||
The weak default value of a variable is the value which that variable
|
||||
will expand to if no value has been assigned to it via any of the other
|
||||
assignment operators. The "??=" operator takes effect immediately, replacing
|
||||
any previously defined weak default value. Here is an example::
|
||||
It is possible to use a "weaker" assignment than in the previous section
|
||||
by using the "??=" operator. This assignment behaves identical to "?="
|
||||
except that the assignment is made at the end of the parsing process
|
||||
rather than immediately. Consequently, when multiple "??=" assignments
|
||||
exist, the last one is used. Also, any "=" or "?=" assignment will
|
||||
override the value set with "??=". Here is an example::
|
||||
|
||||
W ??= "x"
|
||||
A := "${W}" # Immediate variable expansion
|
||||
W ??= "y"
|
||||
B := "${W}" # Immediate variable expansion
|
||||
W ??= "z"
|
||||
C = "${W}"
|
||||
W ?= "i"
|
||||
A ??= "somevalue"
|
||||
A ??= "someothervalue"
|
||||
|
||||
After parsing we will have::
|
||||
If ``A`` is set before the above statements are
|
||||
parsed, the variable retains its value. If ``A`` is not set, the
|
||||
variable is set to "someothervalue".
|
||||
|
||||
A = "x"
|
||||
B = "y"
|
||||
C = "i"
|
||||
W = "i"
|
||||
|
||||
Appending and prepending non-override style will not substitute the weak
|
||||
default value, which means that after parsing::
|
||||
|
||||
W ??= "x"
|
||||
W += "y"
|
||||
|
||||
we will have::
|
||||
|
||||
W = " y"
|
||||
|
||||
On the other hand, override-style appends/prepends/removes are applied after
|
||||
any active weak default value has been substituted::
|
||||
|
||||
W ??= "x"
|
||||
W:append = "y"
|
||||
|
||||
After parsing we will have::
|
||||
|
||||
W = "xy"
|
||||
Again, this assignment is a "lazy" or "weak" assignment because it does
|
||||
not occur until the end of the parsing process.
|
||||
|
||||
Immediate variable expansion (:=)
|
||||
---------------------------------
|
||||
@@ -319,10 +296,6 @@ The variable ``D`` becomes "dvaladditional data".
|
||||
|
||||
You must control all spacing when you use the override syntax.
|
||||
|
||||
.. note::
|
||||
|
||||
The overrides are applied in this order, ":append", ":prepend", ":remove".
|
||||
|
||||
It is also possible to append and prepend to shell functions and
|
||||
BitBake-style Python functions. See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:shell functions`" and ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:bitbake-style python functions`"
|
||||
sections for examples.
|
||||
@@ -334,8 +307,7 @@ Removal (Override Style Syntax)
|
||||
|
||||
You can remove values from lists using the removal override style
|
||||
syntax. Specifying a value for removal causes all occurrences of that
|
||||
value to be removed from the variable. Unlike ":append" and ":prepend",
|
||||
there is no need to add a leading or trailing space to the value.
|
||||
value to be removed from the variable.
|
||||
|
||||
When you use this syntax, BitBake expects one or more strings.
|
||||
Surrounding spaces and spacing are preserved. Here is an example::
|
||||
@@ -356,28 +328,6 @@ The variable ``FOO`` becomes
|
||||
Like ":append" and ":prepend", ":remove" is applied at variable
|
||||
expansion time.
|
||||
|
||||
.. note::
|
||||
|
||||
The overrides are applied in this order, ":append", ":prepend", ":remove".
|
||||
This implies it is not possible to re-append previously removed strings.
|
||||
However, one can undo a ":remove" by using an intermediate variable whose
|
||||
content is passed to the ":remove" so that modifying the intermediate
|
||||
variable equals to keeping the string in::
|
||||
|
||||
FOOREMOVE = "123 456 789"
|
||||
FOO:remove = "${FOOREMOVE}"
|
||||
...
|
||||
FOOREMOVE = "123 789"
|
||||
|
||||
This expands to ``FOO:remove = "123 789"``.
|
||||
|
||||
.. note::
|
||||
|
||||
Override application order may not match variable parse history, i.e.
|
||||
the output of ``bitbake -e`` may contain ":remove" before ":append",
|
||||
but the result will be removed string, because ":remove" is handled
|
||||
last.
|
||||
|
||||
Override Style Operation Advantages
|
||||
-----------------------------------
|
||||
|
||||
@@ -448,12 +398,6 @@ documentation to a BitBake variable as follows::
|
||||
|
||||
CACHE[doc] = "The directory holding the cache of the metadata."
|
||||
|
||||
.. note::
|
||||
|
||||
Variable flag names starting with an underscore (``_``) character
|
||||
are allowed but are ignored by ``d.getVarFlags("VAR")``
|
||||
in Python code. Such flag names are used internally by BitBake.
|
||||
|
||||
Inline Python Variable Expansion
|
||||
--------------------------------
|
||||
|
||||
@@ -754,9 +698,7 @@ share the task.
|
||||
This section presents the mechanisms BitBake provides to allow you to
|
||||
share functionality between recipes. Specifically, the mechanisms
|
||||
include ``include``, ``inherit``, :term:`INHERIT`, and ``require``
|
||||
directives. There is also a higher-level abstraction called
|
||||
``configuration fragments`` that is enabled with ``addfragments``
|
||||
directive.
|
||||
directives.
|
||||
|
||||
Locating Include and Class Files
|
||||
--------------------------------
|
||||
@@ -773,8 +715,6 @@ In order for include and class files to be found by BitBake, they need
|
||||
to be located in a "classes" subdirectory that can be found in
|
||||
:term:`BBPATH`.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit:
|
||||
|
||||
``inherit`` Directive
|
||||
---------------------
|
||||
|
||||
@@ -813,43 +753,19 @@ An advantage with the inherit directive as compared to both the
|
||||
:ref:`include <bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>` and :ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives is that you can inherit class files conditionally. You can
|
||||
accomplish this by using a variable expression after the ``inherit``
|
||||
statement.
|
||||
statement. Here is an example::
|
||||
|
||||
For inheriting classes conditionally, using the :ref:`inherit_defer
|
||||
<ref-bitbake-user-manual-metadata-inherit-defer>` directive is advised as
|
||||
:ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>` is
|
||||
evaluated at the end of parsing.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit-defer:
|
||||
|
||||
``inherit_defer`` Directive
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>`
|
||||
directive works like the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive, except that it is only
|
||||
evaluated at the end of parsing. Its usage is recommended when a conditional
|
||||
expression is used.
|
||||
|
||||
This allows conditional expressions to be evaluated "late", meaning changes to
|
||||
the variable after the line is parsed will take effect. With the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive this is not the case.
|
||||
|
||||
Here is an example::
|
||||
|
||||
inherit_defer ${VARNAME}
|
||||
inherit ${VARNAME}
|
||||
|
||||
If ``VARNAME`` is
|
||||
going to be set, it needs to be set before the ``inherit_defer`` statement is
|
||||
going to be set, it needs to be set before the ``inherit`` statement is
|
||||
parsed. One way to achieve a conditional inherit in this case is to use
|
||||
overrides::
|
||||
|
||||
VARIABLE = ""
|
||||
VARIABLE:someoverride = "myclass"
|
||||
|
||||
Another method is by using :ref:`anonymous Python
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:Anonymous Python Functions>`.
|
||||
Here is an example::
|
||||
Another method is by using anonymous Python. Here is an example::
|
||||
|
||||
python () {
|
||||
if condition == value:
|
||||
@@ -858,14 +774,11 @@ Here is an example::
|
||||
d.setVar('VARIABLE', '')
|
||||
}
|
||||
|
||||
Alternatively, you could use an inline Python expression in the
|
||||
Alternatively, you could use an in-line Python expression in the
|
||||
following form::
|
||||
|
||||
inherit_defer ${@'classname' if condition else ''}
|
||||
|
||||
Or::
|
||||
|
||||
inherit_defer ${@bb.utils.contains('VARIABLE', 'something', 'classname', '', d)}
|
||||
inherit ${@'classname' if condition else ''}
|
||||
inherit ${@functionname(params)}
|
||||
|
||||
In all cases, if the expression evaluates to an
|
||||
empty string, the statement does not trigger a syntax error because it
|
||||
@@ -964,50 +877,6 @@ the ``autotools`` and ``pkgconfig`` classes::
|
||||
|
||||
INHERIT += "autotools pkgconfig"
|
||||
|
||||
``addfragments`` Directive
|
||||
--------------------------
|
||||
|
||||
This directive allows fine-tuning local configurations with configuration
|
||||
snippets contained in layers in a structured, controlled way. Typically it would
|
||||
go into ``bitbake.conf``, for example::
|
||||
|
||||
addfragments conf/fragments OE_FRAGMENTS OE_FRAGMENTS_METADATA_VARS
|
||||
|
||||
``addfragments`` takes three parameters:
|
||||
|
||||
- path prefix for fragment files inside the layer file tree that bitbake
|
||||
uses to construct full paths to the fragment files
|
||||
|
||||
- name of variable that holds the list of enabled fragments in an
|
||||
active build
|
||||
|
||||
- name of variable that contains a list of variable names containing
|
||||
fragment-specific metadata (such as descriptions)
|
||||
|
||||
This allows listing enabled configuration fragments in ``OE_FRAGMENTS``
|
||||
variable like this::
|
||||
|
||||
OE_FRAGMENTS = "core/domain/somefragment core/someotherfragment anotherlayer/anotherdomain/anotherfragment"
|
||||
|
||||
Fragment names listed in this variable must be prefixed by the layer name
|
||||
where a fragment file is located, defined by :term:`BBFILE_COLLECTIONS` in ``layer.conf``.
|
||||
|
||||
The implementation then expands this list into
|
||||
:ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives with full paths to respective layers::
|
||||
|
||||
require /path/to/core-layer/conf/fragments/domain/somefragment.conf
|
||||
require /path/to/core-layer/conf/fragments/someotherfragment.conf
|
||||
require /path/to/another-layer/conf/fragments/anotherdomain/anotherfragment.conf
|
||||
|
||||
The variable containing a list of fragment metadata variables could look like this::
|
||||
|
||||
OE_FRAGMENTS_METADATA_VARS = "BB_CONF_FRAGMENT_SUMMARY BB_CONF_FRAGMENT_DESCRIPTION"
|
||||
|
||||
The implementation will add a flag containing the fragment name to each of those variables
|
||||
when parsing fragments, so that the variables are namespaced by fragment name, and do not override
|
||||
each other when several fragments are enabled.
|
||||
|
||||
Functions
|
||||
=========
|
||||
|
||||
@@ -1571,35 +1440,12 @@ functionality of the task:
|
||||
directory listed is used as the current working directory for the
|
||||
task.
|
||||
|
||||
- ``[file-checksums]``: Controls the file dependencies for a task. The
|
||||
baseline file list is the set of files associated with
|
||||
:term:`SRC_URI`. May be used to set additional dependencies on
|
||||
files not associated with :term:`SRC_URI`.
|
||||
|
||||
The value set to the list is a file-boolean pair where the first
|
||||
value is the file name and the second is whether or not it
|
||||
physically exists on the filesystem. ::
|
||||
|
||||
do_configure[file-checksums] += "${MY_DIRPATH}/my-file.txt:True"
|
||||
|
||||
It is important to record any paths which the task looked at and
|
||||
which didn't exist. This means that if these do exist at a later
|
||||
time, the task can be rerun with the new additional files. The
|
||||
"exists" True or False value after the path allows this to be
|
||||
handled.
|
||||
|
||||
- ``[lockfiles]``: Specifies one or more lockfiles to lock while the
|
||||
task executes. Only one task may hold a lockfile, and any task that
|
||||
attempts to lock an already locked file will block until the lock is
|
||||
released. You can use this variable flag to accomplish mutual
|
||||
exclusion.
|
||||
|
||||
- ``[network]``: When set to "1", allows a task to access the network. By
|
||||
default, only the ``do_fetch`` task is granted network access. Recipes
|
||||
shouldn't access the network outside of ``do_fetch`` as it usually
|
||||
undermines fetcher source mirroring, image and licence manifests, software
|
||||
auditing and supply chain security.
|
||||
|
||||
- ``[noexec]``: When set to "1", marks the task as being empty, with
|
||||
no execution required. You can use the ``[noexec]`` flag to set up
|
||||
tasks as dependency placeholders, or to disable tasks defined
|
||||
@@ -2053,33 +1899,6 @@ looking at the source code of the ``bb`` module, which is in
|
||||
the commonly used functions ``bb.utils.contains()`` and
|
||||
``bb.utils.mkdirhier()``, which come with docstrings.
|
||||
|
||||
Extending Python Library Code
|
||||
-----------------------------
|
||||
|
||||
If you wish to add your own Python library code (e.g. to provide
|
||||
functions/classes you can use from Python functions in the metadata)
|
||||
you can do so from any layer using the ``addpylib`` directive.
|
||||
This directive is typically added to your layer configuration (
|
||||
``conf/layer.conf``) although it will be handled in any ``.conf`` file.
|
||||
|
||||
Usage is of the form::
|
||||
|
||||
addpylib <directory> <namespace>
|
||||
|
||||
Where <directory> specifies the directory to add to the library path.
|
||||
The specified <namespace> is imported automatically, and if the imported
|
||||
module specifies an attribute named ``BBIMPORTS``, that list of
|
||||
sub-modules is iterated and imported too.
|
||||
|
||||
Testing and Debugging BitBake Python code
|
||||
-----------------------------------------
|
||||
|
||||
The OpenEmbedded build system implements a convenient ``pydevshell`` target which
|
||||
you can use to access the BitBake datastore and experiment with your own Python
|
||||
code. See :yocto_docs:`Using a Python Development Shell
|
||||
</dev-manual/python-development-shell.html#using-a-python-development-shell>` in the Yocto
|
||||
Project manual for details.
|
||||
|
||||
Task Checksums and Setscene
|
||||
===========================
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
================
|
||||
Variable Context
|
||||
================
|
||||
|
||||
|
|
||||
|
||||
Variables might only have an impact or can be used in certain contexts. Some
|
||||
should only be used in global files like ``.conf``, while others are intended only
|
||||
for local files like ``.bb``. This chapter aims to describe some important variable
|
||||
contexts.
|
||||
|
||||
.. _ref-varcontext-configuration:
|
||||
|
||||
BitBake's own configuration
|
||||
===========================
|
||||
|
||||
Variables starting with ``BB_`` usually configure the behaviour of BitBake itself.
|
||||
For example, one could configure:
|
||||
|
||||
- System resources, like disk space to be used (:term:`BB_DISKMON_DIRS`),
|
||||
or the number of tasks to be run in parallel by BitBake (:term:`BB_NUMBER_THREADS`).
|
||||
|
||||
- How the fetchers shall behave, e.g., :term:`BB_FETCH_PREMIRRORONLY` is used
|
||||
by BitBake to determine if BitBake's fetcher shall search only
|
||||
:term:`PREMIRRORS` for files.
|
||||
|
||||
Those variables are usually configured globally.
|
||||
|
||||
BitBake configuration
|
||||
=====================
|
||||
|
||||
There are variables:
|
||||
|
||||
- Like :term:`B` or :term:`T`, that are used to specify directories used by
|
||||
BitBake during the build of a particular recipe. Those variables are
|
||||
specified in ``bitbake.conf``. Some, like :term:`B`, are quite often
|
||||
overwritten in recipes.
|
||||
|
||||
- Starting with ``FAKEROOT``, to configure how the ``fakeroot`` command is
|
||||
handled. Those are usually set by ``bitbake.conf`` and might get adapted in a
|
||||
``bbclass``.
|
||||
|
||||
- Detailing where BitBake will store and fetch information from, for
|
||||
data reuse between build runs like :term:`CACHE`, :term:`DL_DIR` or
|
||||
:term:`PERSISTENT_DIR`. Those are usually global.
|
||||
|
||||
|
||||
Layers and files
|
||||
================
|
||||
|
||||
Variables starting with ``LAYER`` configure how BitBake handles layers.
|
||||
Additionally, variables starting with ``BB`` configure how layers and files are
|
||||
handled. For example:
|
||||
|
||||
- :term:`LAYERDEPENDS` is used to configure on which layers a given layer
|
||||
depends.
|
||||
|
||||
- The configured layers are contained in :term:`BBLAYERS` and files in
|
||||
:term:`BBFILES`.
|
||||
|
||||
Those variables are often used in the files ``layer.conf`` and ``bblayers.conf``.
|
||||
|
||||
Recipes and packages
|
||||
====================
|
||||
|
||||
Variables handling recipes and packages can be split into:
|
||||
|
||||
- :term:`PN`, :term:`PV` or :term:`PF` for example, contain information about
|
||||
the name or revision of a recipe or package. Usually, the default set in
|
||||
``bitbake.conf`` is used, but those are from time to time overwritten in
|
||||
recipes.
|
||||
|
||||
- :term:`SUMMARY`, :term:`DESCRIPTION`, :term:`LICENSE` or :term:`HOMEPAGE`
|
||||
contain the expected information and should be set specifically for every
|
||||
recipe.
|
||||
|
||||
- In recipes, variables are also used to control build and runtime
|
||||
dependencies between recipes/packages with other recipes/packages. The
|
||||
most common should be: :term:`PROVIDES`, :term:`RPROVIDES`, :term:`DEPENDS`,
|
||||
and :term:`RDEPENDS`.
|
||||
|
||||
- There are further variables starting with ``SRC`` that specify the sources in
|
||||
a recipe like :term:`SRC_URI` or :term:`SRCDATE`. Those are also usually set
|
||||
in recipes.
|
||||
|
||||
- Which version or provider of a recipe should be given preference when
|
||||
multiple recipes would provide the same item, is controlled by variables
|
||||
starting with ``PREFERRED_``. Those are normally set in the configuration
|
||||
files of a ``MACHINE`` or ``DISTRO``.
|
||||
@@ -40,7 +40,8 @@ overview of their function and contents.
|
||||
Azure Storage Shared Access Signature, when using the
|
||||
:ref:`Azure Storage fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
This variable can be defined to be used by the fetcher to authenticate
|
||||
and gain access to non-public artifacts::
|
||||
and gain access to non-public artifacts.
|
||||
::
|
||||
|
||||
AZ_SAS = ""se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>""
|
||||
|
||||
@@ -99,26 +100,10 @@ overview of their function and contents.
|
||||
the path of the build. BitBake's output should not (and usually does
|
||||
not) depend on the directory in which it was built.
|
||||
|
||||
:term:`BB_CACHEDIR`
|
||||
Specifies the code parser cache directory (distinct from :term:`CACHE`
|
||||
and :term:`PERSISTENT_DIR` although they can be set to the same value
|
||||
if desired). The default value is "${TOPDIR}/cache".
|
||||
|
||||
:term:`BB_CHECK_SSL_CERTS`
|
||||
Specifies if SSL certificates should be checked when fetching. The default
|
||||
value is ``1`` and certificates are not checked if the value is set to ``0``.
|
||||
|
||||
:term:`BB_HASH_CODEPARSER_VALS`
|
||||
Specifies values for variables to use when populating the codeparser cache.
|
||||
This can be used selectively to set dummy values for variables to avoid
|
||||
the codeparser cache growing on every parse. Variables that would typically
|
||||
be included are those where the value is not significant for where the
|
||||
codeparser cache is used (i.e. when calculating variable dependencies for
|
||||
code fragments.) The value is space-separated without quoting values, for
|
||||
example::
|
||||
|
||||
BB_HASH_CODEPARSER_VALS = "T=/ WORKDIR=/ DATE=1234 TIME=1234"
|
||||
|
||||
:term:`BB_CONSOLELOG`
|
||||
Specifies the path to a log file into which BitBake's user interface
|
||||
writes output during the build.
|
||||
@@ -127,6 +112,18 @@ overview of their function and contents.
|
||||
Contains the name of the currently running task. The name does not
|
||||
include the ``do_`` prefix.
|
||||
|
||||
:term:`BB_DANGLINGAPPENDS_WARNONLY`
|
||||
Defines how BitBake handles situations where an append file
|
||||
(``.bbappend``) has no corresponding recipe file (``.bb``). This
|
||||
condition often occurs when layers get out of sync (e.g. ``oe-core``
|
||||
bumps a recipe version and the old recipe no longer exists and the
|
||||
other layer has not been updated to the new version of the recipe
|
||||
yet).
|
||||
|
||||
The default fatal behavior is safest because it is the sane reaction
|
||||
given something is out of sync. It is important to realize when your
|
||||
changes are no longer being applied.
|
||||
|
||||
:term:`BB_DEFAULT_TASK`
|
||||
The default task to use when none is specified (e.g. with the ``-c``
|
||||
command line option). The task name specified should not include the
|
||||
@@ -347,14 +344,6 @@ overview of their function and contents.
|
||||
|
||||
For example usage, see :term:`BB_GIT_SHALLOW`.
|
||||
|
||||
:term:`BB_GLOBAL_PYMODULES`
|
||||
Specifies the list of Python modules to place in the global namespace.
|
||||
It is intended that only the core layer should set this and it is meant
|
||||
to be a very small list, typically just ``os`` and ``sys``.
|
||||
:term:`BB_GLOBAL_PYMODULES` is expected to be set before the first
|
||||
``addpylib`` directive.
|
||||
See also ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:extending python library code`".
|
||||
|
||||
:term:`BB_HASHCHECK_FUNCTION`
|
||||
Specifies the name of the function to call during the "setscene" part
|
||||
of the task's execution in order to validate the list of task hashes.
|
||||
@@ -412,7 +401,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
@@ -420,15 +409,6 @@ overview of their function and contents.
|
||||
``ConfigParsed`` event can set the variable to trigger the re-parse.
|
||||
You must be careful to avoid recursive loops with this functionality.
|
||||
|
||||
:term:`BB_LOADFACTOR_MAX`
|
||||
Setting this to a value will cause BitBake to check the system load
|
||||
average before executing new tasks. If the load average is above the
|
||||
the number of CPUs multipled by this factor, no new task will be started
|
||||
unless there is no task executing. A value of "1.5" has been found to
|
||||
work reasonably. This is helpful for systems which don't have pressure
|
||||
regulation enabled, which is more granular. Pressure values take
|
||||
precedence over loadfactor.
|
||||
|
||||
:term:`BB_LOGCONFIG`
|
||||
Specifies the name of a config file that contains the user logging
|
||||
configuration. See
|
||||
@@ -503,64 +483,13 @@ overview of their function and contents.
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_PRESSURE_MAX_CPU`
|
||||
Specifies a maximum CPU pressure threshold, above which BitBake's
|
||||
scheduler will not start new tasks (providing there is at least
|
||||
one active task). If no value is set, CPU pressure is not
|
||||
monitored when starting tasks.
|
||||
|
||||
The pressure data is calculated based upon what Linux kernels since
|
||||
version 4.20 expose under ``/proc/pressure``. The threshold represents
|
||||
the difference in "total" pressure from the previous second. The
|
||||
minimum value is 1.0 (extremely slow builds) and the maximum is
|
||||
1000000 (a pressure value unlikely to ever be reached).
|
||||
|
||||
This threshold can be set in ``conf/local.conf`` as::
|
||||
|
||||
BB_PRESSURE_MAX_CPU = "500"
|
||||
|
||||
:term:`BB_PRESSURE_MAX_IO`
|
||||
Specifies a maximum I/O pressure threshold, above which BitBake's
|
||||
scheduler will not start new tasks (providing there is at least
|
||||
one active task). If no value is set, I/O pressure is not
|
||||
monitored when starting tasks.
|
||||
|
||||
The pressure data is calculated based upon what Linux kernels since
|
||||
version 4.20 expose under ``/proc/pressure``. The threshold represents
|
||||
the difference in "total" pressure from the previous second. The
|
||||
minimum value is 1.0 (extremely slow builds) and the maximum is
|
||||
1000000 (a pressure value unlikely to ever be reached).
|
||||
|
||||
At this point in time, experiments show that IO pressure tends to
|
||||
be short-lived and regulating just the CPU with
|
||||
:term:`BB_PRESSURE_MAX_CPU` can help to reduce it.
|
||||
|
||||
:term:`BB_PRESSURE_MAX_MEMORY`
|
||||
|
||||
Specifies a maximum memory pressure threshold, above which BitBake's
|
||||
scheduler will not start new tasks (providing there is at least
|
||||
one active task). If no value is set, memory pressure is not
|
||||
monitored when starting tasks.
|
||||
|
||||
The pressure data is calculated based upon what Linux kernels since
|
||||
version 4.20 expose under ``/proc/pressure``. The threshold represents
|
||||
the difference in "total" pressure from the previous second. The
|
||||
minimum value is 1.0 (extremely slow builds) and the maximum is
|
||||
1000000 (a pressure value unlikely to ever be reached).
|
||||
|
||||
Memory pressure is experienced when time is spent swapping,
|
||||
refaulting pages from the page cache or performing direct reclaim.
|
||||
This is why memory pressure is rarely seen, but setting this variable
|
||||
might be useful as a last resort to prevent OOM errors if they are
|
||||
occurring during builds.
|
||||
|
||||
:term:`BB_RUNFMT`
|
||||
Specifies the name of the executable script files (i.e. run files)
|
||||
saved into ``${``\ :term:`T`\ ``}``. By default, the
|
||||
:term:`BB_RUNFMT` variable is undefined and the run filenames get
|
||||
created using the following form::
|
||||
|
||||
run.{func}.{pid}
|
||||
run.{task}.{pid}
|
||||
|
||||
If you want to force run files to take a specific name, you can set this
|
||||
variable in a configuration file.
|
||||
@@ -754,10 +683,6 @@ overview of their function and contents.
|
||||
:term:`BBFILE_PRIORITY`
|
||||
Assigns the priority for recipe files in each layer.
|
||||
|
||||
This variable is used in the ``conf/layer.conf`` file and must be
|
||||
suffixed with a `_` followed by the name of the specific layer (e.g.
|
||||
``BBFILE_PRIORITY_emenlow``). Colon as separator is not supported.
|
||||
|
||||
This variable is useful in situations where the same recipe appears
|
||||
in more than one layer. Setting this variable allows you to
|
||||
prioritize a layer against other layers that contain the same recipe
|
||||
@@ -772,7 +697,7 @@ overview of their function and contents.
|
||||
higher precedence. For example, the value 6 has a higher precedence
|
||||
than the value 5. If not specified, the :term:`BBFILE_PRIORITY` variable
|
||||
is set based on layer dependencies (see the :term:`LAYERDEPENDS` variable
|
||||
for more information). The default priority, if unspecified for a
|
||||
for more information. The default priority, if unspecified for a
|
||||
layer with no dependencies, is the lowest defined priority + 1 (or 1
|
||||
if no priorities are defined).
|
||||
|
||||
@@ -921,9 +846,9 @@ overview of their function and contents.
|
||||
section.
|
||||
|
||||
:term:`BBPATH`
|
||||
A colon-separated list used by BitBake to locate class (``.bbclass``)
|
||||
and configuration (``.conf``) files. This variable is analogous to the
|
||||
``PATH`` variable.
|
||||
Used by BitBake to locate class (``.bbclass``) and configuration
|
||||
(``.conf``) files. This variable is analogous to the ``PATH``
|
||||
variable.
|
||||
|
||||
If you run BitBake from a directory outside of the build directory,
|
||||
you must be sure to set :term:`BBPATH` to point to the build directory.
|
||||
@@ -1015,7 +940,7 @@ overview of their function and contents.
|
||||
``bblayers.conf`` configuration file.
|
||||
|
||||
To exclude a recipe from a world build using this variable, set the
|
||||
variable to "1" in the recipe. Set it to "0" to add it back to world build.
|
||||
variable to "1" in the recipe.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -1073,11 +998,6 @@ overview of their function and contents.
|
||||
environment variable. The value is a colon-separated list of
|
||||
directories that are searched left-to-right in order.
|
||||
|
||||
:term:`FILE_LAYERNAME`
|
||||
During parsing and task execution, this is set to the name of the
|
||||
layer containing the recipe file. Code can use this to identify which
|
||||
layer a recipe is from.
|
||||
|
||||
:term:`GITDIR`
|
||||
The directory in which a local copy of a Git repository is stored
|
||||
when it is cloned.
|
||||
@@ -1126,29 +1046,6 @@ overview of their function and contents.
|
||||
variable is not available outside of ``layer.conf`` and references
|
||||
are expanded immediately when parsing of the file completes.
|
||||
|
||||
:term:`LAYERSERIES_COMPAT`
|
||||
Lists the versions of the OpenEmbedded-Core (OE-Core) for which
|
||||
a layer is compatible. Using the :term:`LAYERSERIES_COMPAT` variable
|
||||
allows the layer maintainer to indicate which combinations of the
|
||||
layer and OE-Core can be expected to work. The variable gives the
|
||||
system a way to detect when a layer has not been tested with new
|
||||
releases of OE-Core (e.g. the layer is not maintained).
|
||||
|
||||
To specify the OE-Core versions for which a layer is compatible, use
|
||||
this variable in your layer's ``conf/layer.conf`` configuration file.
|
||||
For the list, use the Yocto Project release name (e.g. "kirkstone",
|
||||
"mickledore"). To specify multiple OE-Core versions for the layer, use
|
||||
a space-separated list::
|
||||
|
||||
LAYERSERIES_COMPAT_layer_root_name = "kirkstone mickledore"
|
||||
|
||||
.. note::
|
||||
|
||||
Setting :term:`LAYERSERIES_COMPAT` is required by the Yocto Project
|
||||
Compatible version 2 standard.
|
||||
The OpenEmbedded build system produces a warning if the variable
|
||||
is not set for any given layer.
|
||||
|
||||
:term:`LAYERVERSION`
|
||||
Optionally specifies the version of a layer as a single number. You
|
||||
can use this variable within
|
||||
@@ -1171,8 +1068,8 @@ overview of their function and contents.
|
||||
order.
|
||||
|
||||
:term:`OVERRIDES`
|
||||
A colon-separated list that BitBake uses to control what variables are
|
||||
overridden after BitBake parses recipes and configuration files.
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
|
||||
Following is a simple example that uses an overrides list based on
|
||||
machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can
|
||||
|
||||
@@ -13,7 +13,6 @@ BitBake User Manual
|
||||
bitbake-user-manual/bitbake-user-manual-intro
|
||||
bitbake-user-manual/bitbake-user-manual-execution
|
||||
bitbake-user-manual/bitbake-user-manual-metadata
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables-context
|
||||
bitbake-user-manual/bitbake-user-manual-fetching
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables
|
||||
bitbake-user-manual/bitbake-user-manual-hello
|
||||
|
||||
@@ -1,76 +1,61 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
=================================
|
||||
BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
****************************
|
||||
Release Series 5.1 (styhead)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.10 User Manual </bitbake/2.10/>`
|
||||
|
||||
*******************************
|
||||
Release Series 5.0 (scarthgap)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.8 User Manual </bitbake/2.8/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
================================
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.3 (nanbield)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.6 User Manual </bitbake/2.6/>`
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.2 User Manual </bitbake/2.2/>`
|
||||
===========================
|
||||
Supported Release Manuals
|
||||
===========================
|
||||
|
||||
******************************
|
||||
Release Series 3.4 (honister)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.52 User Manual </bitbake/1.52/>`
|
||||
- :yocto_docs:`3.4 BitBake User Manual </3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.1 BitBake User Manual </3.4.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.2 BitBake User Manual </3.4.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
******************************
|
||||
Release Series 3.3 (hardknott)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.50 User Manual </bitbake/1.50/>`
|
||||
|
||||
*******************************
|
||||
Release Series 3.2 (gatesgarth)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.48 User Manual </bitbake/1.48/>`
|
||||
- :yocto_docs:`3.3 BitBake User Manual </3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.1 BitBake User Manual </3.3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.2 BitBake User Manual </3.3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.3 BitBake User Manual </3.3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.4 BitBake User Manual </3.3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.5 BitBake User Manual </3.3.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
- :yocto_docs:`3.1 BitBake User Manual </3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.1 BitBake User Manual </3.1.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.2 BitBake User Manual </3.1.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.3 BitBake User Manual </3.1.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.4 BitBake User Manual </3.1.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.5 BitBake User Manual </3.1.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.6 BitBake User Manual </3.1.6/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.7 BitBake User Manual </3.1.7/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.8 BitBake User Manual </3.1.8/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.9 BitBake User Manual </3.1.9/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.10 BitBake User Manual </3.1.10/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.11 BitBake User Manual </3.1.11/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.12 BitBake User Manual </3.1.12/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.13 BitBake User Manual </3.1.13/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.14 BitBake User Manual </3.1.14/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
==========================
|
||||
Outdated Release Manuals
|
||||
==========================
|
||||
|
||||
*******************************
|
||||
Release Series 3.2 (gatesgarth)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`3.2 BitBake User Manual </3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.1 BitBake User Manual </3.2.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.2 BitBake User Manual </3.2.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.3 BitBake User Manual </3.2.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.4 BitBake User Manual </3.2.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 3.0 (zeus)
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Please Note:
|
||||
# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
|
||||
# Assign a file to __warn__ to get warnings about slow operations.
|
||||
@@ -36,9 +34,8 @@ class COWDictMeta(COWMeta):
|
||||
__marker__ = tuple()
|
||||
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
@@ -162,9 +159,8 @@ class COWDictMeta(COWMeta):
|
||||
|
||||
class COWSetMeta(COWDictMeta):
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
@@ -9,19 +9,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.9.1"
|
||||
__version__ = "2.0.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise RuntimeError("Sorry, python 3.8.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (3, 6, 0):
|
||||
raise RuntimeError("Sorry, python 3.6.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
# https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
|
||||
# https://bugs.ams1.psf.io/issue42888
|
||||
# so ensure libgcc_s is loaded early on
|
||||
import ctypes
|
||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||
|
||||
class BBHandledException(Exception):
|
||||
"""
|
||||
@@ -36,7 +29,6 @@ class BBHandledException(Exception):
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
@@ -68,10 +60,6 @@ class BBLoggerMixin(object):
|
||||
return
|
||||
if loglevel < bb.msg.loggerDefaultLogLevel:
|
||||
return
|
||||
|
||||
if not isinstance(level, int) or not isinstance(msg, str):
|
||||
mainlogger.warning("Invalid arguments in bbdebug: %s" % repr((level, msg,) + args))
|
||||
|
||||
return self.log(loglevel, msg, *args, **kwargs)
|
||||
|
||||
def plain(self, msg, *args, **kwargs):
|
||||
@@ -104,6 +92,26 @@ class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin):
|
||||
self.setup_bblogger(logger.name)
|
||||
super().__init__(logger, *args, **kwargs)
|
||||
|
||||
if sys.version_info < (3, 6):
|
||||
# These properties were added in Python 3.6. Add them in older versions
|
||||
# for compatibility
|
||||
@property
|
||||
def manager(self):
|
||||
return self.logger.manager
|
||||
|
||||
@manager.setter
|
||||
def manager(self, value):
|
||||
self.logger.manager = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.logger.name
|
||||
|
||||
def __repr__(self):
|
||||
logger = self.logger
|
||||
level = logger.getLevelName(logger.getEffectiveLevel())
|
||||
return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
|
||||
|
||||
logging.LoggerAdapter = BBLoggerAdapter
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
@@ -208,14 +216,3 @@ def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
|
||||
setattr(sys.modules[current], newname, newobj)
|
||||
|
||||
TaskData = namedtuple("TaskData", [
|
||||
"pn",
|
||||
"taskname",
|
||||
"fn",
|
||||
"deps",
|
||||
"provides",
|
||||
"taskhash",
|
||||
"unihash",
|
||||
"hashfn",
|
||||
"taskhash_deps",
|
||||
])
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
libacl = ctypes.CDLL("libacl.so.1", use_errno=True)
|
||||
|
||||
|
||||
ACL_TYPE_ACCESS = 0x8000
|
||||
ACL_TYPE_DEFAULT = 0x4000
|
||||
|
||||
ACL_FIRST_ENTRY = 0
|
||||
ACL_NEXT_ENTRY = 1
|
||||
|
||||
ACL_UNDEFINED_TAG = 0x00
|
||||
ACL_USER_OBJ = 0x01
|
||||
ACL_USER = 0x02
|
||||
ACL_GROUP_OBJ = 0x04
|
||||
ACL_GROUP = 0x08
|
||||
ACL_MASK = 0x10
|
||||
ACL_OTHER = 0x20
|
||||
|
||||
ACL_READ = 0x04
|
||||
ACL_WRITE = 0x02
|
||||
ACL_EXECUTE = 0x01
|
||||
|
||||
acl_t = ctypes.c_void_p
|
||||
acl_entry_t = ctypes.c_void_p
|
||||
acl_permset_t = ctypes.c_void_p
|
||||
acl_perm_t = ctypes.c_uint
|
||||
|
||||
acl_tag_t = ctypes.c_int
|
||||
|
||||
libacl.acl_free.argtypes = [acl_t]
|
||||
|
||||
|
||||
def acl_free(acl):
|
||||
libacl.acl_free(acl)
|
||||
|
||||
|
||||
libacl.acl_get_file.restype = acl_t
|
||||
libacl.acl_get_file.argtypes = [ctypes.c_char_p, ctypes.c_uint]
|
||||
|
||||
|
||||
def acl_get_file(path, typ):
|
||||
acl = libacl.acl_get_file(os.fsencode(path), typ)
|
||||
if acl is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
return acl
|
||||
|
||||
|
||||
libacl.acl_get_entry.argtypes = [acl_t, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_entry(acl, entry_id):
|
||||
entry = acl_entry_t()
|
||||
ret = libacl.acl_get_entry(acl, entry_id, ctypes.byref(entry))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
if ret == 0:
|
||||
return None
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
libacl.acl_get_tag_type.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_tag_type(entry_d):
|
||||
tag = acl_tag_t()
|
||||
ret = libacl.acl_get_tag_type(entry_d, ctypes.byref(tag))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return tag.value
|
||||
|
||||
|
||||
libacl.acl_get_qualifier.restype = ctypes.c_void_p
|
||||
libacl.acl_get_qualifier.argtypes = [acl_entry_t]
|
||||
|
||||
|
||||
def acl_get_qualifier(entry_d):
|
||||
ret = libacl.acl_get_qualifier(entry_d)
|
||||
if ret is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return ctypes.c_void_p(ret)
|
||||
|
||||
|
||||
libacl.acl_get_permset.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_permset(entry_d):
|
||||
permset = acl_permset_t()
|
||||
ret = libacl.acl_get_permset(entry_d, ctypes.byref(permset))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
return permset
|
||||
|
||||
|
||||
libacl.acl_get_perm.argtypes = [acl_permset_t, acl_perm_t]
|
||||
|
||||
|
||||
def acl_get_perm(permset_d, perm):
|
||||
ret = libacl.acl_get_perm(permset_d, perm)
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return bool(ret)
|
||||
|
||||
|
||||
class Entry(object):
|
||||
def __init__(self, tag, qualifier, mode):
|
||||
self.tag = tag
|
||||
self.qualifier = qualifier
|
||||
self.mode = mode
|
||||
|
||||
def __str__(self):
|
||||
typ = ""
|
||||
qual = ""
|
||||
if self.tag == ACL_USER:
|
||||
typ = "user"
|
||||
qual = pwd.getpwuid(self.qualifier).pw_name
|
||||
elif self.tag == ACL_GROUP:
|
||||
typ = "group"
|
||||
qual = grp.getgrgid(self.qualifier).gr_name
|
||||
elif self.tag == ACL_USER_OBJ:
|
||||
typ = "user"
|
||||
elif self.tag == ACL_GROUP_OBJ:
|
||||
typ = "group"
|
||||
elif self.tag == ACL_MASK:
|
||||
typ = "mask"
|
||||
elif self.tag == ACL_OTHER:
|
||||
typ = "other"
|
||||
|
||||
r = "r" if self.mode & ACL_READ else "-"
|
||||
w = "w" if self.mode & ACL_WRITE else "-"
|
||||
x = "x" if self.mode & ACL_EXECUTE else "-"
|
||||
|
||||
return f"{typ}:{qual}:{r}{w}{x}"
|
||||
|
||||
|
||||
class ACL(object):
|
||||
def __init__(self, acl):
|
||||
self.acl = acl
|
||||
|
||||
def __del__(self):
|
||||
acl_free(self.acl)
|
||||
|
||||
def entries(self):
|
||||
entry_id = ACL_FIRST_ENTRY
|
||||
while True:
|
||||
entry = acl_get_entry(self.acl, entry_id)
|
||||
if entry is None:
|
||||
break
|
||||
|
||||
permset = acl_get_permset(entry)
|
||||
|
||||
mode = 0
|
||||
for m in (ACL_READ, ACL_WRITE, ACL_EXECUTE):
|
||||
if acl_get_perm(permset, m):
|
||||
mode |= m
|
||||
|
||||
qualifier = None
|
||||
tag = acl_get_tag_type(entry)
|
||||
|
||||
if tag == ACL_USER or tag == ACL_GROUP:
|
||||
qual = acl_get_qualifier(entry)
|
||||
qualifier = ctypes.cast(qual, ctypes.POINTER(ctypes.c_int))[0]
|
||||
|
||||
yield Entry(tag, qualifier, mode)
|
||||
|
||||
entry_id = ACL_NEXT_ENTRY
|
||||
|
||||
@classmethod
|
||||
def from_path(cls, path, typ):
|
||||
acl = acl_get_file(path, typ)
|
||||
return cls(acl)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import pwd
|
||||
import grp
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
acl = ACL.from_path(args.path, ACL_TYPE_ACCESS)
|
||||
for entry in acl.entries():
|
||||
print(str(entry))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,16 +1,31 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
ClientError,
|
||||
ServerError,
|
||||
ConnectionClosedError,
|
||||
InvokeError,
|
||||
)
|
||||
from .serv import AsyncServer, AsyncServerConnection, ClientError, ServerError
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -10,160 +8,47 @@ import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import re
|
||||
import contextlib
|
||||
from threading import Thread
|
||||
from .connection import StreamConnection, WebsocketConnection, DEFAULT_MAX_CHUNK
|
||||
from .exceptions import ConnectionClosedError, InvokeError
|
||||
|
||||
UNIX_PREFIX = "unix://"
|
||||
WS_PREFIX = "ws://"
|
||||
WSS_PREFIX = "wss://"
|
||||
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
WEBSOCKETS_MIN_VERSION = (9, 1)
|
||||
# Need websockets 10 with python 3.10+
|
||||
if sys.version_info >= (3, 10, 0):
|
||||
WEBSOCKETS_MIN_VERSION = (10, 0)
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
elif addr.startswith(WS_PREFIX) or addr.startswith(WSS_PREFIX):
|
||||
return (ADDR_TYPE_WS, (addr,))
|
||||
else:
|
||||
m = re.match(r"\[(?P<host>[^\]]*)\]:(?P<port>\d+)$", addr)
|
||||
if m is not None:
|
||||
host = m.group("host")
|
||||
port = m.group("port")
|
||||
else:
|
||||
host, port = addr.split(":")
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
proto_name,
|
||||
proto_version,
|
||||
logger,
|
||||
timeout=30,
|
||||
server_headers=False,
|
||||
headers={},
|
||||
):
|
||||
self.socket = None
|
||||
def __init__(self, proto_name, proto_version, logger, timeout=30):
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.proto_name = proto_name
|
||||
self.proto_version = proto_version
|
||||
self.logger = logger
|
||||
self.timeout = timeout
|
||||
self.needs_server_headers = server_headers
|
||||
self.server_headers = {}
|
||||
self.headers = headers
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
reader, writer = await asyncio.open_connection(address, port)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
return await asyncio.open_connection(address, port)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_unix(self, path):
|
||||
async def connect_sock():
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
# The socket must be opened synchronously so that CWD doesn't get
|
||||
# changed out from underneath us so we pass as a sock into asyncio
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
reader, writer = await asyncio.open_unix_connection(sock=sock)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
try:
|
||||
version = tuple(
|
||||
int(v)
|
||||
for v in websockets.__version__.split(".")[
|
||||
0 : len(WEBSOCKETS_MIN_VERSION)
|
||||
]
|
||||
)
|
||||
except ValueError:
|
||||
raise ImportError(
|
||||
f"Unable to parse websockets version '{websockets.__version__}'"
|
||||
)
|
||||
|
||||
if version < WEBSOCKETS_MIN_VERSION:
|
||||
min_ver_str = ".".join(str(v) for v in WEBSOCKETS_MIN_VERSION)
|
||||
raise ImportError(
|
||||
f"Websockets version {websockets.__version__} is less than minimum required version {min_ver_str}"
|
||||
)
|
||||
|
||||
async def connect_sock():
|
||||
try:
|
||||
websocket = await websockets.connect(
|
||||
uri,
|
||||
ping_interval=None,
|
||||
open_timeout=self.timeout,
|
||||
)
|
||||
except asyncio.exceptions.TimeoutError:
|
||||
raise ConnectionError("Timeout while connecting to websocket")
|
||||
except (OSError, websockets.InvalidHandshake, websockets.InvalidURI) as exc:
|
||||
raise ConnectionError(f"Could not connect to websocket: {exc}") from exc
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
return await asyncio.open_unix_connection(path)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def setup_connection(self):
|
||||
# Send headers
|
||||
await self.socket.send("%s %s" % (self.proto_name, self.proto_version))
|
||||
await self.socket.send(
|
||||
"needs-headers: %s" % ("true" if self.needs_server_headers else "false")
|
||||
)
|
||||
for k, v in self.headers.items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
|
||||
# End of headers
|
||||
await self.socket.send("")
|
||||
|
||||
self.server_headers = {}
|
||||
if self.needs_server_headers:
|
||||
while True:
|
||||
line = await self.socket.recv()
|
||||
if not line:
|
||||
# End headers
|
||||
break
|
||||
tag, value = line.split(":", 1)
|
||||
self.server_headers[tag.lower()] = value.strip()
|
||||
|
||||
async def get_header(self, tag, default):
|
||||
await self.connect()
|
||||
return self.server_headers.get(tag, default)
|
||||
s = '%s %s\n\n' % (self.proto_name, self.proto_version)
|
||||
self.writer.write(s.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def connect(self):
|
||||
if self.socket is None:
|
||||
self.socket = await self._connect_sock()
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
await self.setup_connection()
|
||||
|
||||
async def disconnect(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
async def close(self):
|
||||
await self.disconnect()
|
||||
self.reader = None
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
@@ -174,7 +59,6 @@ class AsyncClient(object):
|
||||
except (
|
||||
OSError,
|
||||
ConnectionError,
|
||||
ConnectionClosedError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
@@ -186,27 +70,49 @@ class AsyncClient(object):
|
||||
await self.close()
|
||||
count += 1
|
||||
|
||||
def check_invoke_error(self, msg):
|
||||
if isinstance(msg, dict) and "invoke-error" in msg:
|
||||
raise InvokeError(msg["invoke-error"]["message"])
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for server")
|
||||
|
||||
if not line:
|
||||
raise ConnectionError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line
|
||||
|
||||
async def invoke(self, msg):
|
||||
async def proc():
|
||||
await self.socket.send_message(msg)
|
||||
return await self.socket.recv_message()
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
result = await self._send_wrapper(proc)
|
||||
self.check_invoke_error(result)
|
||||
return result
|
||||
l = await get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def ping(self):
|
||||
return await self.invoke({"ping": {}})
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
return await self.send_message(
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
|
||||
class Client(object):
|
||||
@@ -224,7 +130,7 @@ class Client(object):
|
||||
# required (but harmless) with it.
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self._add_methods("connect_tcp", "ping")
|
||||
self._add_methods('connect_tcp', 'ping')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_async_client(self):
|
||||
@@ -242,8 +148,14 @@ class Client(object):
|
||||
setattr(self, m, self._get_downcall_wrapper(downcall))
|
||||
|
||||
def connect_unix(self, path):
|
||||
self.loop.run_until_complete(self.client.connect_unix(path))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
@property
|
||||
def max_chunk(self):
|
||||
@@ -253,19 +165,8 @@ class Client(object):
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
|
||||
def disconnect(self):
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
self.loop.close()
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
from datetime import datetime
|
||||
from .exceptions import ClientError, ConnectionClosedError
|
||||
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield "".join((msg, "\n"))
|
||||
else:
|
||||
yield "".join((json.dumps({"chunk-stream": None}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map("".join, itertools.zip_longest(*args, fillvalue="")):
|
||||
yield "".join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def json_serialize(obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
raise TypeError("Type %s not serializeable" % type(obj))
|
||||
|
||||
|
||||
class StreamConnection(object):
|
||||
def __init__(self, reader, writer, timeout, max_chunk=DEFAULT_MAX_CHUNK):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.timeout = timeout
|
||||
self.max_chunk = max_chunk
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self.writer.get_extra_info("peername")
|
||||
|
||||
async def send_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg, default=json_serialize), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv_message(self):
|
||||
l = await self.recv()
|
||||
|
||||
m = json.loads(l)
|
||||
if not m:
|
||||
return m
|
||||
|
||||
if "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = await self.recv()
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
async def send(self, msg):
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv(self):
|
||||
if self.timeout < 0:
|
||||
line = await self.reader.readline()
|
||||
else:
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
|
||||
if not line:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line.rstrip()
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
|
||||
class WebsocketConnection(object):
|
||||
def __init__(self, socket, timeout):
|
||||
self.socket = socket
|
||||
self.timeout = timeout
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return ":".join(str(s) for s in self.socket.remote_address)
|
||||
|
||||
async def send_message(self, msg):
|
||||
await self.send(json.dumps(msg, default=json_serialize))
|
||||
|
||||
async def recv_message(self):
|
||||
m = await self.recv()
|
||||
return json.loads(m)
|
||||
|
||||
async def send(self, msg):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
await self.socket.send(msg)
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def recv(self):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
if self.timeout < 0:
|
||||
return await self.socket.recv()
|
||||
|
||||
try:
|
||||
return await asyncio.wait_for(self.socket.recv(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def close(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
@@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvokeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionClosedError(Exception):
|
||||
pass
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -12,353 +10,234 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import multiprocessing
|
||||
import logging
|
||||
from .connection import StreamConnection, WebsocketConnection
|
||||
from .exceptions import ClientError, ServerError, ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class ClientLoggerAdapter(logging.LoggerAdapter):
|
||||
def process(self, msg, kwargs):
|
||||
return f"[Client {self.extra['address']}] {msg}", kwargs
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncServerConnection(object):
|
||||
# If a handler returns this object (e.g. `return self.NO_RESPONSE`), no
|
||||
# return message will be automatically be sent back to the client
|
||||
NO_RESPONSE = object()
|
||||
|
||||
def __init__(self, socket, proto_name, logger):
|
||||
self.socket = socket
|
||||
def __init__(self, reader, writer, proto_name, logger):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.proto_name = proto_name
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.handlers = {
|
||||
"ping": self.handle_ping,
|
||||
'chunk-stream': self.handle_chunk,
|
||||
'ping': self.handle_ping,
|
||||
}
|
||||
self.logger = ClientLoggerAdapter(
|
||||
logger,
|
||||
{
|
||||
"address": socket.address,
|
||||
},
|
||||
)
|
||||
self.client_headers = {}
|
||||
|
||||
async def close(self):
|
||||
await self.socket.close()
|
||||
|
||||
async def handle_headers(self, headers):
|
||||
return {}
|
||||
self.logger = logger
|
||||
|
||||
async def process_requests(self):
|
||||
try:
|
||||
self.logger.info("Client %r connected" % (self.socket.address,))
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
self.logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
# Read protocol and version
|
||||
client_protocol = await self.socket.recv()
|
||||
if not client_protocol:
|
||||
client_protocol = await self.reader.readline()
|
||||
if client_protocol is None:
|
||||
return
|
||||
|
||||
(client_proto_name, client_proto_version) = client_protocol.split()
|
||||
(client_proto_name, client_proto_version) = client_protocol.decode('utf-8').rstrip().split()
|
||||
if client_proto_name != self.proto_name:
|
||||
self.logger.debug("Rejecting invalid protocol %s" % (self.proto_name))
|
||||
self.logger.debug('Rejecting invalid protocol %s' % (self.proto_name))
|
||||
return
|
||||
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split("."))
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split('.'))
|
||||
if not self.validate_proto_version():
|
||||
self.logger.debug(
|
||||
"Rejecting invalid protocol version %s" % (client_proto_version)
|
||||
)
|
||||
self.logger.debug('Rejecting invalid protocol version %s' % (client_proto_version))
|
||||
return
|
||||
|
||||
# Read headers
|
||||
self.client_headers = {}
|
||||
# Read headers. Currently, no headers are implemented, so look for
|
||||
# an empty line to signal the end of the headers
|
||||
while True:
|
||||
header = await self.socket.recv()
|
||||
if not header:
|
||||
# Empty line. End of headers
|
||||
break
|
||||
tag, value = header.split(":", 1)
|
||||
self.client_headers[tag.lower()] = value.strip()
|
||||
line = await self.reader.readline()
|
||||
if line is None:
|
||||
return
|
||||
|
||||
if self.client_headers.get("needs-headers", "false") == "true":
|
||||
for k, v in (await self.handle_headers(self.client_headers)).items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
await self.socket.send("")
|
||||
line = line.decode('utf-8').rstrip()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Handle messages
|
||||
while True:
|
||||
d = await self.socket.recv_message()
|
||||
d = await self.read_message()
|
||||
if d is None:
|
||||
break
|
||||
try:
|
||||
response = await self.dispatch_message(d)
|
||||
except InvokeError as e:
|
||||
await self.socket.send_message(
|
||||
{"invoke-error": {"message": str(e)}}
|
||||
)
|
||||
break
|
||||
|
||||
if response is not self.NO_RESPONSE:
|
||||
await self.socket.send_message(response)
|
||||
|
||||
except ConnectionClosedError as e:
|
||||
self.logger.info(str(e))
|
||||
except (ClientError, ConnectionError) as e:
|
||||
await self.dispatch_message(d)
|
||||
await self.writer.drain()
|
||||
except ClientError as e:
|
||||
self.logger.error(str(e))
|
||||
finally:
|
||||
await self.close()
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug("Handling %s" % k)
|
||||
return await self.handlers[k](msg[k])
|
||||
self.logger.debug('Handling %s' % k)
|
||||
await self.handlers[k](msg[k])
|
||||
return
|
||||
|
||||
raise ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
async def handle_ping(self, request):
|
||||
return {"alive": True}
|
||||
def write_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode('utf-8'))
|
||||
|
||||
async def read_message(self):
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return None
|
||||
|
||||
class StreamServer(object):
|
||||
def __init__(self, handler, logger):
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.closed = False
|
||||
|
||||
async def handle_stream_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
socket = StreamConnection(reader, writer, -1)
|
||||
if self.closed:
|
||||
await socket.close()
|
||||
return
|
||||
|
||||
await self.handler(socket)
|
||||
|
||||
async def stop(self):
|
||||
self.closed = True
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(
|
||||
self.handle_stream_client,
|
||||
self.host,
|
||||
self.port,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
|
||||
class UnixStreamServer(StreamServer):
|
||||
def __init__(self, path, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.path = path
|
||||
|
||||
def start(self, loop):
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(self.path))
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_unix_server(
|
||||
self.handle_stream_client, os.path.basename(self.path)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
message = l.decode('utf-8')
|
||||
|
||||
self.logger.debug("Listening on %r" % self.path)
|
||||
self.address = "unix://%s" % os.path.abspath(self.path)
|
||||
return [self.server.wait_closed()]
|
||||
if not message.endswith('\n'):
|
||||
return None
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
return json.loads(message)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % message)
|
||||
raise e
|
||||
|
||||
def cleanup(self):
|
||||
os.unlink(self.path)
|
||||
async def handle_chunk(self, request):
|
||||
lines = []
|
||||
try:
|
||||
while True:
|
||||
l = await self.reader.readline()
|
||||
l = l.rstrip(b"\n").decode("utf-8")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
msg = json.loads(''.join(lines))
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % lines)
|
||||
raise e
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.reuseport = reuseport
|
||||
if 'chunk-stream' in msg:
|
||||
raise ClientError("Nested chunks are not allowed")
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
await self.dispatch_message(msg)
|
||||
|
||||
self.server = loop.run_until_complete(
|
||||
websockets.server.serve(
|
||||
self.client_handler,
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "ws://[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "ws://%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
async def client_handler(self, websocket):
|
||||
socket = WebsocketConnection(websocket, -1)
|
||||
await self.handler(socket)
|
||||
async def handle_ping(self, request):
|
||||
response = {'alive': True}
|
||||
self.write_message(response)
|
||||
|
||||
|
||||
class AsyncServer(object):
|
||||
def __init__(self, logger):
|
||||
self._cleanup_socket = None
|
||||
self.logger = logger
|
||||
self.start = None
|
||||
self.address = None
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port, *, reuseport=False):
|
||||
self.server = TCPStreamServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_tcp_server(self, host, port):
|
||||
def start_tcp():
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug('Listening on %r' % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
self.start = start_tcp
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
def cleanup():
|
||||
os.unlink(path)
|
||||
|
||||
def start_websocket_server(self, host, port, reuseport=False):
|
||||
self.server = WebsocketsServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_unix():
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
self.logger.debug('Listening on %r' % path)
|
||||
|
||||
self._cleanup_socket = cleanup
|
||||
self.address = "unix://%s" % os.path.abspath(path)
|
||||
|
||||
self.start = start_unix
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, reader, writer):
|
||||
pass
|
||||
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = self.accept_client(socket)
|
||||
client = self.accept_client(reader, writer)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
self.logger.error(
|
||||
"Error from client %s: %s" % (address, str(e)), exc_info=True
|
||||
)
|
||||
self.logger.error('Error from client: %s' % str(e), exc_info=True)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
self.logger.debug("Client %s disconnected", address)
|
||||
await socket.close()
|
||||
writer.close()
|
||||
self.logger.debug('Client disconnected')
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, socket):
|
||||
pass
|
||||
|
||||
async def stop(self):
|
||||
self.logger.debug("Stopping server")
|
||||
await self.server.stop()
|
||||
|
||||
def start(self):
|
||||
tasks = self.server.start(self.loop)
|
||||
self.address = self.server.address
|
||||
return tasks
|
||||
def run_loop_forever(self):
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def signal_handler(self):
|
||||
self.logger.debug("Got exit signal")
|
||||
self.loop.create_task(self.stop())
|
||||
self.loop.stop()
|
||||
|
||||
def _serve_forever(self, tasks):
|
||||
def _serve_forever(self):
|
||||
try:
|
||||
self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGINT, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGQUIT, self.signal_handler)
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
|
||||
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
self.run_loop_forever()
|
||||
self.server.close()
|
||||
|
||||
self.logger.debug("Server shutting down")
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.logger.debug('Server shutting down')
|
||||
finally:
|
||||
self.server.cleanup()
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
def serve_forever(self):
|
||||
"""
|
||||
Serve requests in the current process
|
||||
"""
|
||||
self._create_loop()
|
||||
tasks = self.start()
|
||||
self._serve_forever(tasks)
|
||||
self.loop.close()
|
||||
|
||||
def _create_loop(self):
|
||||
# Create loop and override any loop that may have existed in
|
||||
# a parent process. It is possible that the usecases of
|
||||
# serve_forever might be constrained enough to allow using
|
||||
# get_event_loop here, but better safe than sorry for now.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.start()
|
||||
self._serve_forever()
|
||||
|
||||
def serve_as_process(self, *, prefunc=None, args=(), log_level=None):
|
||||
def serve_as_process(self, *, prefunc=None, args=()):
|
||||
"""
|
||||
Serve requests in a child process
|
||||
"""
|
||||
|
||||
def run(queue):
|
||||
# Create loop and override any loop that may have existed
|
||||
# in a parent process. Without doing this and instead
|
||||
@@ -371,24 +250,21 @@ class AsyncServer(object):
|
||||
# more general, though, as any potential use of asyncio in
|
||||
# Cooker could create a loop that needs to replaced in this
|
||||
# new process.
|
||||
self._create_loop()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
try:
|
||||
self.address = None
|
||||
tasks = self.start()
|
||||
self.start()
|
||||
finally:
|
||||
# Always put the server address to wake up the parent task
|
||||
queue.put(self.address)
|
||||
queue.close()
|
||||
|
||||
if prefunc is not None:
|
||||
prefunc(self, *args)
|
||||
|
||||
if log_level is not None:
|
||||
self.logger.setLevel(log_level)
|
||||
self._serve_forever()
|
||||
|
||||
self._serve_forever(tasks)
|
||||
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
|
||||
@@ -20,12 +20,10 @@ import itertools
|
||||
import time
|
||||
import re
|
||||
import stat
|
||||
import datetime
|
||||
import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
import bb.progress
|
||||
from io import StringIO
|
||||
from bb import data, event, utils
|
||||
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
@@ -178,9 +176,7 @@ class StdoutNoopContextManager:
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if "name" in dir(sys.stdout):
|
||||
return sys.stdout.name
|
||||
return "<mem>"
|
||||
return sys.stdout.name
|
||||
|
||||
|
||||
def exec_func(func, d, dirs = None):
|
||||
@@ -197,8 +193,6 @@ def exec_func(func, d, dirs = None):
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
if cdir == oldcwd:
|
||||
os.chdir(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
@@ -301,21 +295,9 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
lineno = int(d.getVarFlag(func, "lineno", False))
|
||||
bb.methodpool.insert_method(func, text, fn, lineno - 1)
|
||||
|
||||
if verboseStdoutLogging:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
currout = sys.stdout
|
||||
currerr = sys.stderr
|
||||
sys.stderr = sys.stdout = execio = StringIO()
|
||||
comp = utils.better_compile(code, func, "exec_func_python() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_func_python() autogenerated")
|
||||
finally:
|
||||
if verboseStdoutLogging:
|
||||
execio.flush()
|
||||
logger.plain("%s" % execio.getvalue())
|
||||
sys.stdout = currout
|
||||
sys.stderr = currerr
|
||||
execio.close()
|
||||
# We want any stdout/stderr to be printed before any other log messages to make debugging
|
||||
# more accurate. In some cases we seem to lose stdout/stderr entirely in logging tests without this.
|
||||
sys.stdout.flush()
|
||||
@@ -458,11 +440,7 @@ exit $ret
|
||||
if fakerootcmd:
|
||||
cmd = [fakerootcmd, runfile]
|
||||
|
||||
# We only want to output to logger via LogTee if stdout is sys.__stdout__ (which will either
|
||||
# be real stdout or subprocess PIPE or similar). In other cases we are being run "recursively",
|
||||
# ie. inside another function, in which case stdout is already being captured so we don't
|
||||
# want to Tee here as output would be printed twice, and out of order.
|
||||
if verboseStdoutLogging and sys.stdout == sys.__stdout__:
|
||||
if verboseStdoutLogging:
|
||||
logfile = LogTee(logger, StdoutNoopContextManager())
|
||||
else:
|
||||
logfile = StdoutNoopContextManager()
|
||||
@@ -593,6 +571,7 @@ def _task_data(fn, task, d):
|
||||
localdata.setVar('BB_FILENAME', fn)
|
||||
localdata.setVar('OVERRIDES', 'task-%s:%s' %
|
||||
(task[3:].replace('_', '-'), d.getVar('OVERRIDES', False)))
|
||||
localdata.finalize()
|
||||
bb.data.expandKeys(localdata)
|
||||
return localdata
|
||||
|
||||
@@ -639,8 +618,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logorder = os.path.join(tempdir, 'log.task_order')
|
||||
try:
|
||||
with open(logorder, 'a') as logorderfile:
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f")
|
||||
logorderfile.write('{0} {1} ({2}): {3}\n'.format(timestamp, task, os.getpid(), logbase))
|
||||
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
|
||||
except OSError:
|
||||
logger.exception("Opening log file '%s'", logorder)
|
||||
pass
|
||||
@@ -743,7 +721,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
if quieterr:
|
||||
if not handled:
|
||||
logger.warning(str(exc))
|
||||
logger.warning(repr(exc))
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
@@ -752,7 +730,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
if verboseStdoutLogging or handled:
|
||||
errprinted = True
|
||||
if not handled:
|
||||
logger.error(str(exc))
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
|
||||
@@ -793,7 +771,44 @@ def exec_task(fn, task, d, profile = False):
|
||||
event.fire(failedevent, d)
|
||||
return 1
|
||||
|
||||
def _get_cleanmask(taskname, mcfn):
|
||||
def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
"""
|
||||
Internal stamp helper function
|
||||
Makes sure the stamp directory exists
|
||||
Returns the stamp path+filename
|
||||
|
||||
In the bitbake core, d can be a CacheData and file_name will be set.
|
||||
When called in task context, d will be a data store, file_name will not be set
|
||||
"""
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene") and taskname != "do_setscene":
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
|
||||
if baseonly:
|
||||
return stamp
|
||||
if noextra:
|
||||
extrainfo = ""
|
||||
|
||||
if not stamp:
|
||||
return
|
||||
|
||||
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
stampdir = os.path.dirname(stamp)
|
||||
if cached_mtime_noerror(stampdir) == 0:
|
||||
bb.utils.mkdirhier(stampdir)
|
||||
|
||||
return stamp
|
||||
|
||||
def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
"""
|
||||
Internal stamp helper function to generate stamp cleaning mask
|
||||
Returns the stamp path+filename
|
||||
@@ -801,14 +816,31 @@ def _get_cleanmask(taskname, mcfn):
|
||||
In the bitbake core, d can be a CacheData and file_name will be set.
|
||||
When called in task context, d will be a data store, file_name will not be set
|
||||
"""
|
||||
cleanmask = bb.parse.siggen.stampcleanmask_mcfn(taskname, mcfn)
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
if cleanmask:
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
return []
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene") and taskname != "do_setscene":
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
def clean_stamp_mcfn(task, mcfn):
|
||||
cleanmask = _get_cleanmask(task, mcfn)
|
||||
if file_name:
|
||||
stamp = d.stampclean[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMPCLEAN')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
|
||||
if not stamp:
|
||||
return []
|
||||
|
||||
cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
# Preserve sigdata files in the stamps directory
|
||||
@@ -819,45 +851,24 @@ def clean_stamp_mcfn(task, mcfn):
|
||||
continue
|
||||
os.unlink(name)
|
||||
|
||||
def clean_stamp(task, d):
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
clean_stamp_mcfn(task, mcfn)
|
||||
|
||||
def make_stamp_mcfn(task, mcfn):
|
||||
|
||||
basestamp = bb.parse.siggen.stampfile_mcfn(task, mcfn)
|
||||
|
||||
stampdir = os.path.dirname(basestamp)
|
||||
if cached_mtime_noerror(stampdir) == 0:
|
||||
bb.utils.mkdirhier(stampdir)
|
||||
|
||||
clean_stamp_mcfn(task, mcfn)
|
||||
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
# Remove the file and recreate to force timestamp
|
||||
# change on broken NFS filesystems
|
||||
if basestamp:
|
||||
bb.utils.remove(basestamp)
|
||||
open(basestamp, "w").close()
|
||||
|
||||
def make_stamp(task, d):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
"""
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
|
||||
make_stamp_mcfn(task, mcfn)
|
||||
if stamp:
|
||||
bb.utils.remove(stamp)
|
||||
open(stamp, "w").close()
|
||||
|
||||
# If we're in task context, write out a signature file for each task
|
||||
# as it completes
|
||||
if not task.endswith("_setscene"):
|
||||
stampbase = bb.parse.siggen.stampfile_base(mcfn)
|
||||
bb.parse.siggen.dump_sigtask(mcfn, task, stampbase, True)
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
stampbase = stamp_internal(task, d, None, True)
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
|
||||
def find_stale_stamps(task, mcfn):
|
||||
current = bb.parse.siggen.stampfile_mcfn(task, mcfn)
|
||||
current2 = bb.parse.siggen.stampfile_mcfn(task + "_setscene", mcfn)
|
||||
cleanmask = _get_cleanmask(task, mcfn)
|
||||
def find_stale_stamps(task, d, file_name=None):
|
||||
current = stamp_internal(task, d, file_name)
|
||||
current2 = stamp_internal(task + "_setscene", d, file_name)
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
found = []
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
@@ -871,14 +882,38 @@ def find_stale_stamps(task, mcfn):
|
||||
found.append(name)
|
||||
return found
|
||||
|
||||
def write_taint(task, d):
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Removes a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
bb.utils.remove(stamp)
|
||||
|
||||
def write_taint(task, d, file_name = None):
|
||||
"""
|
||||
Creates a "taint" file which will force the specified task and its
|
||||
dependents to be re-run the next time by influencing the value of its
|
||||
taskhash.
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.invalidate_task(task, mcfn)
|
||||
import uuid
|
||||
if file_name:
|
||||
taintfn = d.stamp[file_name] + '.' + task + '.taint'
|
||||
else:
|
||||
taintfn = d.getVar('STAMP') + '.' + task + '.taint'
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
with open(taintfn, 'w') as taintf:
|
||||
taintf.write(str(uuid.uuid4()))
|
||||
|
||||
def stampfile(taskname, d, file_name = None, noextra=False):
|
||||
"""
|
||||
Return the stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
return stamp_internal(taskname, d, file_name, noextra=noextra)
|
||||
|
||||
def add_tasks(tasklist, d):
|
||||
task_deps = d.getVar('_task_deps', False)
|
||||
@@ -932,13 +967,9 @@ def add_tasks(tasklist, d):
|
||||
# don't assume holding a reference
|
||||
d.setVar('_task_deps', task_deps)
|
||||
|
||||
def ensure_task_prefix(name):
|
||||
if name[:3] != "do_":
|
||||
name = "do_" + name
|
||||
return name
|
||||
|
||||
def addtask(task, before, after, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
d.setVarFlag(task, "task", 1)
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
@@ -950,20 +981,19 @@ def addtask(task, before, after, d):
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
for entry in after.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
if entry not in existing:
|
||||
existing.append(entry)
|
||||
d.setVarFlag(task, "deps", existing)
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in before.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
existing = d.getVarFlag(entry, "deps", False) or []
|
||||
if task not in existing:
|
||||
d.setVarFlag(entry, "deps", [task] + existing)
|
||||
|
||||
def deltask(task, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task in bbtasks:
|
||||
@@ -1028,9 +1058,3 @@ def tasksbetween(task_start, task_end, d):
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
def listtasks(d):
|
||||
"""
|
||||
Return the list of tasks in the current recipe.
|
||||
"""
|
||||
return tuple(d.getVar('__BBTASKS', False) or ())
|
||||
|
||||
@@ -24,11 +24,10 @@ from collections.abc import Mapping
|
||||
import bb.utils
|
||||
from bb import PrefixLoggerAdapter
|
||||
import re
|
||||
import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "156"
|
||||
__cache_version__ = "154"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -105,7 +104,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.tasks = metadata.getVar('__BBTASKS', False)
|
||||
|
||||
self.basetaskhashes = metadata.getVar('__siggen_basehashes', False) or {}
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
|
||||
|
||||
self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
|
||||
@@ -216,7 +215,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not bb.utils.to_boolean(self.not_world):
|
||||
if not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
#else:
|
||||
# logger.debug2("EXCLUDE FROM WORLD: %s", fn)
|
||||
@@ -238,113 +237,15 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootlogs[fn] = self.fakerootlogs
|
||||
cachedata.extradepsfunc[fn] = self.extradepsfunc
|
||||
|
||||
|
||||
class SiggenRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
classname = "SiggenRecipeInfo"
|
||||
cachefile = "bb_cache_" + classname +".dat"
|
||||
# we don't want to show this information in graph files so don't set cachefields
|
||||
#cachefields = []
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
self.siggen_gendeps = metadata.getVar("__siggen_gendeps", False)
|
||||
self.siggen_varvals = metadata.getVar("__siggen_varvals", False)
|
||||
self.siggen_taskdeps = metadata.getVar("__siggen_taskdeps", False)
|
||||
|
||||
@classmethod
|
||||
def init_cacheData(cls, cachedata):
|
||||
cachedata.siggen_taskdeps = {}
|
||||
cachedata.siggen_gendeps = {}
|
||||
cachedata.siggen_varvals = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
cachedata.siggen_gendeps[fn] = self.siggen_gendeps
|
||||
cachedata.siggen_varvals[fn] = self.siggen_varvals
|
||||
cachedata.siggen_taskdeps[fn] = self.siggen_taskdeps
|
||||
|
||||
# The siggen variable data is large and impacts:
|
||||
# - bitbake's overall memory usage
|
||||
# - the amount of data sent over IPC between parsing processes and the server
|
||||
# - the size of the cache files on disk
|
||||
# - the size of "sigdata" hash information files on disk
|
||||
# The data consists of strings (some large) or frozenset lists of variables
|
||||
# As such, we a) deplicate the data here and b) pass references to the object at second
|
||||
# access (e.g. over IPC or saving into pickle).
|
||||
|
||||
store = {}
|
||||
save_map = {}
|
||||
save_count = 1
|
||||
restore_map = {}
|
||||
restore_count = {}
|
||||
|
||||
@classmethod
|
||||
def reset(cls):
|
||||
# Needs to be called before starting new streamed data in a given process
|
||||
# (e.g. writing out the cache again)
|
||||
cls.save_map = {}
|
||||
cls.save_count = 1
|
||||
cls.restore_map = {}
|
||||
|
||||
@classmethod
|
||||
def _save(cls, deps):
|
||||
ret = []
|
||||
if not deps:
|
||||
return deps
|
||||
for dep in deps:
|
||||
fs = deps[dep]
|
||||
if fs is None:
|
||||
ret.append((dep, None, None))
|
||||
elif fs in cls.save_map:
|
||||
ret.append((dep, None, cls.save_map[fs]))
|
||||
else:
|
||||
cls.save_map[fs] = cls.save_count
|
||||
ret.append((dep, fs, cls.save_count))
|
||||
cls.save_count = cls.save_count + 1
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def _restore(cls, deps, pid):
|
||||
ret = {}
|
||||
if not deps:
|
||||
return deps
|
||||
if pid not in cls.restore_map:
|
||||
cls.restore_map[pid] = {}
|
||||
map = cls.restore_map[pid]
|
||||
for dep, fs, mapnum in deps:
|
||||
if fs is None and mapnum is None:
|
||||
ret[dep] = None
|
||||
elif fs is None:
|
||||
ret[dep] = map[mapnum]
|
||||
else:
|
||||
try:
|
||||
fs = cls.store[fs]
|
||||
except KeyError:
|
||||
cls.store[fs] = fs
|
||||
map[mapnum] = fs
|
||||
ret[dep] = fs
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
ret = {}
|
||||
for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]:
|
||||
ret[key] = self._save(self.__dict__[key])
|
||||
ret['pid'] = os.getpid()
|
||||
return ret
|
||||
|
||||
def __setstate__(self, state):
|
||||
pid = state['pid']
|
||||
for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]:
|
||||
setattr(self, key, self._restore(state[key], pid))
|
||||
|
||||
|
||||
def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
(_, mc, virtualfn) = virtualfn.split(':', 2)
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
@@ -367,7 +268,7 @@ def realfn2virtual(realfn, cls, mc):
|
||||
|
||||
def variant2virtual(realfn, variant):
|
||||
"""
|
||||
Convert a real filename + a variant to a virtual filename
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
@@ -378,18 +279,75 @@ def variant2virtual(realfn, variant):
|
||||
return "mc:" + elems[1] + ":" + realfn
|
||||
return "virtual:" + variant + ":" + realfn
|
||||
|
||||
#
|
||||
# Cooker calls cacheValid on its recipe list, then either calls loadCached
|
||||
# from it's main thread or parse from separate processes to generate an up to
|
||||
# date cache
|
||||
#
|
||||
class Cache(object):
|
||||
def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
"""
|
||||
Parse a recipe
|
||||
"""
|
||||
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
return bb_data
|
||||
|
||||
|
||||
class NoCache(object):
|
||||
|
||||
def __init__(self, databuilder):
|
||||
self.databuilder = databuilder
|
||||
self.data = databuilder.data
|
||||
|
||||
def loadDataFull(self, virtualfn, appends):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
|
||||
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
|
||||
def load_bbfile(self, bbfile, appends, virtonly = False, mc=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
|
||||
if virtonly:
|
||||
(bbfile, virtual, mc) = virtualfn2realfn(bbfile)
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
datastores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
return datastores
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
return parse_recipe(bb_data, bbfile, appends, mc)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = parse_recipe(bb_data, bbfile, appends)
|
||||
|
||||
for mc in self.databuilder.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
newstores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
class Cache(NoCache):
|
||||
"""
|
||||
BitBake Cache implementation
|
||||
"""
|
||||
def __init__(self, databuilder, mc, data_hash, caches_array):
|
||||
self.databuilder = databuilder
|
||||
self.data = databuilder.data
|
||||
super().__init__(databuilder)
|
||||
data = databuilder.data
|
||||
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used later for deciding whether we
|
||||
@@ -397,7 +355,7 @@ class Cache(object):
|
||||
self.mc = mc
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else "default"), logger)
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = self.data.getVar("CACHE")
|
||||
self.cachedir = data.getVar("CACHE")
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
self.depends_cache = {}
|
||||
@@ -407,12 +365,20 @@ class Cache(object):
|
||||
self.filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+')
|
||||
|
||||
if self.cachedir in [None, '']:
|
||||
bb.fatal("Please ensure CACHE is set to the cache directory for BitBake to use")
|
||||
self.has_cache = False
|
||||
self.logger.info("Not using a cache. "
|
||||
"Set CACHE = <directory> to enable.")
|
||||
return
|
||||
|
||||
self.has_cache = True
|
||||
|
||||
def getCacheFile(self, cachefile):
|
||||
return getCacheFile(self.cachedir, cachefile, self.mc, self.data_hash)
|
||||
|
||||
def prepare_cache(self, progress):
|
||||
if not self.has_cache:
|
||||
return 0
|
||||
|
||||
loaded = 0
|
||||
|
||||
self.cachefile = self.getCacheFile("bb_cache.dat")
|
||||
@@ -441,7 +407,7 @@ class Cache(object):
|
||||
else:
|
||||
symlink = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
if os.path.exists(symlink) or os.path.islink(symlink):
|
||||
if os.path.exists(symlink):
|
||||
bb.utils.remove(symlink)
|
||||
try:
|
||||
os.symlink(os.path.basename(self.cachefile), symlink)
|
||||
@@ -451,6 +417,9 @@ class Cache(object):
|
||||
return loaded
|
||||
|
||||
def cachesize(self):
|
||||
if not self.has_cache:
|
||||
return 0
|
||||
|
||||
cachesize = 0
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
@@ -512,11 +481,11 @@ class Cache(object):
|
||||
|
||||
return len(self.depends_cache)
|
||||
|
||||
def parse(self, filename, appends, layername):
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc, layername=layername)
|
||||
datastores = self.load_bbfile(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
variants = []
|
||||
# Process the "real" fn last so we can store variants list
|
||||
@@ -538,19 +507,43 @@ class Cache(object):
|
||||
|
||||
return infos
|
||||
|
||||
def loadCached(self, filename, appends):
|
||||
def load(self, filename, appends):
|
||||
"""Obtain the recipe information for the specified filename,
|
||||
using cached values.
|
||||
"""
|
||||
using cached values if available, otherwise parsing.
|
||||
|
||||
infos = []
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
Note that if it does parse to obtain the info, it will not
|
||||
automatically add the information to the cache or to your
|
||||
CacheData. Use the add or add_info method to do so after
|
||||
running this, or use loadData instead."""
|
||||
cached = self.cacheValid(filename, appends)
|
||||
if cached:
|
||||
infos = []
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
else:
|
||||
return self.parse(filename, appends, configdata, self.caches_array)
|
||||
|
||||
return infos
|
||||
return cached, infos
|
||||
|
||||
def loadData(self, fn, appends, cacheData):
|
||||
"""Load the recipe info for the specified filename,
|
||||
parsing and adding to the cache if necessary, and adding
|
||||
the recipe information to the supplied CacheData instance."""
|
||||
skipped, virtuals = 0, 0
|
||||
|
||||
cached, infos = self.load(fn, appends)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
self.logger.debug("Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info_array, cacheData, not cached)
|
||||
virtuals += 1
|
||||
|
||||
return cached, skipped, virtuals
|
||||
|
||||
def cacheValid(self, fn, appends):
|
||||
"""
|
||||
@@ -559,6 +552,10 @@ class Cache(object):
|
||||
"""
|
||||
if fn not in self.checked:
|
||||
self.cacheValidUpdate(fn, appends)
|
||||
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
return False
|
||||
if fn in self.clean:
|
||||
return True
|
||||
return False
|
||||
@@ -568,6 +565,10 @@ class Cache(object):
|
||||
Is the cache valid for fn?
|
||||
Make thorough (slower) checks including timestamps.
|
||||
"""
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
return False
|
||||
|
||||
self.checked.add(fn)
|
||||
|
||||
# File isn't in depends_cache
|
||||
@@ -618,7 +619,7 @@ class Cache(object):
|
||||
for f in flist:
|
||||
if not f:
|
||||
continue
|
||||
f, exist = f.rsplit(":", 1)
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
self.logger.debug2("%s's file checksum list file %s changed",
|
||||
fn, f)
|
||||
@@ -674,6 +675,10 @@ class Cache(object):
|
||||
Save the cache
|
||||
Called from the parser when complete (or exiting)
|
||||
"""
|
||||
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if self.cacheclean:
|
||||
self.logger.debug2("Cache is clean, not saving.")
|
||||
return
|
||||
@@ -694,7 +699,6 @@ class Cache(object):
|
||||
p.dump(info)
|
||||
|
||||
del self.depends_cache
|
||||
SiggenRecipeInfo.reset()
|
||||
|
||||
@staticmethod
|
||||
def mtime(cachefile):
|
||||
@@ -717,11 +721,26 @@ class Cache(object):
|
||||
if watcher:
|
||||
watcher(info_array[0].file_depends)
|
||||
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
|
||||
if parsed:
|
||||
self.cacheclean = False
|
||||
self.depends_cache[filename] = info_array
|
||||
|
||||
def add(self, file_name, data, cacheData, parsed=None):
|
||||
"""
|
||||
Save data we need into the cache
|
||||
"""
|
||||
|
||||
realfn = virtualfn2realfn(file_name)[0]
|
||||
|
||||
info_array = []
|
||||
for cache_class in self.caches_array:
|
||||
info_array.append(cache_class(realfn, data))
|
||||
self.add_info(file_name, info_array, cacheData, parsed)
|
||||
|
||||
class MulticonfigCache(Mapping):
|
||||
def __init__(self, databuilder, data_hash, caches_array):
|
||||
def progress(p):
|
||||
@@ -758,7 +777,6 @@ class MulticonfigCache(Mapping):
|
||||
loaded = 0
|
||||
|
||||
for c in self.__caches.values():
|
||||
SiggenRecipeInfo.reset()
|
||||
loaded += c.prepare_cache(progress)
|
||||
previous_progress = current_progress
|
||||
|
||||
@@ -779,6 +797,25 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
||||
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
file we need to track:
|
||||
|
||||
* Its mtime
|
||||
* The mtimes of all its dependencies
|
||||
* Whether it caused a parse.SkipRecipe exception
|
||||
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
"""
|
||||
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
|
||||
|
||||
|
||||
class CacheData(object):
|
||||
"""
|
||||
@@ -817,10 +854,11 @@ class MultiProcessCache(object):
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
|
||||
def init_cache(self, cachedir, cache_file_name=None):
|
||||
if not cachedir:
|
||||
def init_cache(self, d, cache_file_name=None):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
if cachedir in [None, '']:
|
||||
return
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
@@ -847,24 +885,10 @@ class MultiProcessCache(object):
|
||||
data = [{}]
|
||||
return data
|
||||
|
||||
def clear_cache(self):
|
||||
if not self.cachefile:
|
||||
bb.fatal("Can't clear invalid cachefile")
|
||||
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
with bb.utils.fileslocked([self.cachefile + ".lock"]):
|
||||
bb.utils.remove(self.cachefile)
|
||||
bb.utils.remove(self.cachefile + "-*")
|
||||
|
||||
def save_extras(self):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
have_data = any(self.cachedata_extras)
|
||||
if not have_data:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
|
||||
|
||||
i = os.getpid()
|
||||
@@ -899,8 +923,6 @@ class MultiProcessCache(object):
|
||||
|
||||
data = self.cachedata
|
||||
|
||||
have_data = False
|
||||
|
||||
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
|
||||
f = os.path.join(os.path.dirname(self.cachefile), f)
|
||||
try:
|
||||
@@ -915,14 +937,12 @@ class MultiProcessCache(object):
|
||||
os.unlink(f)
|
||||
continue
|
||||
|
||||
have_data = True
|
||||
self.merge_data(extradata, data)
|
||||
os.unlink(f)
|
||||
|
||||
if have_data:
|
||||
with open(self.cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, -1)
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
with open(self.cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, -1)
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
@@ -978,11 +998,3 @@ class SimpleCache(object):
|
||||
p.dump([data, self.cacheversion])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
def copyfile(self, target):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
shutil.copy(self.cachefile, target)
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
@@ -11,13 +11,10 @@ import os
|
||||
import stat
|
||||
import bb.utils
|
||||
import logging
|
||||
import re
|
||||
from bb.cache import MultiProcessCache
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+')
|
||||
|
||||
# mtime cache (non-persistent)
|
||||
# based upon the assumption that files do not change during bitbake run
|
||||
class FileMtimeCache(object):
|
||||
@@ -112,12 +109,7 @@ class FileChecksumCache(MultiProcessCache):
|
||||
return dirchecksums
|
||||
|
||||
checksums = []
|
||||
for pth in filelist_regex.split(filelist):
|
||||
if not pth:
|
||||
continue
|
||||
pth = pth.strip()
|
||||
if not pth:
|
||||
continue
|
||||
for pth in filelist.split():
|
||||
exist = pth.split(":")[1]
|
||||
if exist == "False":
|
||||
continue
|
||||
@@ -142,28 +134,3 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
checksums.sort(key=operator.itemgetter(1))
|
||||
return checksums
|
||||
|
||||
class RevisionsCache(MultiProcessCache):
|
||||
cache_file_name = "local_srcrevisions.dat"
|
||||
CACHE_VERSION = 1
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
|
||||
def get_revs(self):
|
||||
return self.cachedata[0]
|
||||
|
||||
def get_rev(self, k):
|
||||
if k in self.cachedata_extras[0]:
|
||||
return self.cachedata_extras[0][k]
|
||||
if k in self.cachedata[0]:
|
||||
return self.cachedata[0][k]
|
||||
return None
|
||||
|
||||
def set_rev(self, k, v):
|
||||
self.cachedata[0][k] = v
|
||||
self.cachedata_extras[0][k] = v
|
||||
|
||||
def merge_data(self, source, dest):
|
||||
for h in source[0]:
|
||||
dest[0][h] = source[0][h]
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -27,7 +25,6 @@ import ast
|
||||
import sys
|
||||
import codegen
|
||||
import logging
|
||||
import inspect
|
||||
import bb.pysh as pysh
|
||||
import bb.utils, bb.data
|
||||
import hashlib
|
||||
@@ -59,48 +56,10 @@ def check_indent(codestr):
|
||||
|
||||
return codestr
|
||||
|
||||
modulecode_deps = {}
|
||||
|
||||
def add_module_functions(fn, functions, namespace):
|
||||
import os
|
||||
fstat = os.stat(fn)
|
||||
fixedhash = fn + ":" + str(fstat.st_size) + ":" + str(fstat.st_mtime)
|
||||
for f in functions:
|
||||
name = "%s.%s" % (namespace, f)
|
||||
parser = PythonParser(name, logger)
|
||||
try:
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
#bb.warn("Not cached %s" % f)
|
||||
execs = parser.execs.copy()
|
||||
# Expand internal module exec references
|
||||
for e in parser.execs:
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
visitorcode = None
|
||||
if hasattr(functions[f], 'visitorcode'):
|
||||
visitorcode = getattr(functions[f], "visitorcode")
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra, visitorcode]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4], modulecode_deps[mod][5]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
|
||||
|
||||
class SetCache(object):
|
||||
def __init__(self):
|
||||
self.setcache = {}
|
||||
@@ -120,22 +79,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -164,7 +122,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 14
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -178,8 +136,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -194,12 +152,12 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.shellcachelines[h] = cacheline
|
||||
return cacheline
|
||||
|
||||
def init_cache(self, cachedir):
|
||||
def init_cache(self, d):
|
||||
# Check if we already have the caches
|
||||
if self.pythoncache:
|
||||
return
|
||||
|
||||
MultiProcessCache.init_cache(self, cachedir)
|
||||
MultiProcessCache.init_cache(self, d)
|
||||
|
||||
# cachedata gets re-assigned in the parent
|
||||
self.pythoncache = self.cachedata[0]
|
||||
@@ -211,8 +169,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
|
||||
codeparsercache = CodeParserCache()
|
||||
|
||||
def parser_cache_init(cachedir):
|
||||
codeparsercache.init_cache(cachedir)
|
||||
def parser_cache_init(d):
|
||||
codeparsercache.init_cache(d)
|
||||
|
||||
def parser_cache_save():
|
||||
codeparsercache.save_extras()
|
||||
@@ -264,28 +222,20 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and name in modulecode_deps and modulecode_deps[name][5]:
|
||||
visitorcode = modulecode_deps[name][5]
|
||||
contains, execs, warn = visitorcode(name, node.args)
|
||||
for i in contains:
|
||||
self.contains[i] = contains[i]
|
||||
self.execs |= execs
|
||||
if warn:
|
||||
self.warn(node.func, warn)
|
||||
elif name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].value)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant):
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].value.split())
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Constant):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].value))
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
@@ -293,8 +243,8 @@ class PythonParser():
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
value = node.args[0].value
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
@@ -304,8 +254,8 @@ class PythonParser():
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
self.var_execs.add(node.args[0].value)
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
|
||||
@@ -337,17 +287,11 @@ class PythonParser():
|
||||
self.unhandled_message = "in call of %s, argument '%s' is not a string literal"
|
||||
self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message)
|
||||
|
||||
# For the python module code it is expensive to have the function text so it is
|
||||
# uses a different fixedhash to cache against. We can take the hit on obtaining the
|
||||
# text if it isn't in the cache.
|
||||
def parse_python(self, node, lineno=0, filename="<string>", fixedhash=None):
|
||||
if not fixedhash and (not node or not node.strip()):
|
||||
def parse_python(self, node, lineno=0, filename="<string>"):
|
||||
if not node or not node.strip():
|
||||
return
|
||||
|
||||
if fixedhash:
|
||||
h = fixedhash
|
||||
else:
|
||||
h = bbhash(str(node))
|
||||
h = bbhash(str(node))
|
||||
|
||||
if h in codeparsercache.pythoncache:
|
||||
self.references = set(codeparsercache.pythoncache[h].refs)
|
||||
@@ -355,7 +299,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -364,12 +307,8 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
raise KeyError
|
||||
|
||||
# Need to parse so take the hit on the real log buffer
|
||||
self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, self._log)
|
||||
|
||||
@@ -383,11 +322,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
@@ -506,34 +442,19 @@ class ShellParser():
|
||||
"""
|
||||
|
||||
words = list(words)
|
||||
for word in words:
|
||||
for word in list(words):
|
||||
wtree = pyshlex.make_wordtree(word[1])
|
||||
for part in wtree:
|
||||
if not isinstance(part, list):
|
||||
continue
|
||||
|
||||
candidates = [part]
|
||||
if part[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(part[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
# If command is of type:
|
||||
#
|
||||
# var="... $(cmd [...]) ..."
|
||||
#
|
||||
# Then iterate on what's between the quotes and if we find a
|
||||
# list, make that what we check for below.
|
||||
if len(part) >= 3 and part[0] == '"':
|
||||
for p in part[1:-1]:
|
||||
if isinstance(p, list):
|
||||
candidates.append(p)
|
||||
|
||||
for candidate in candidates:
|
||||
if len(candidate) >= 2:
|
||||
if candidate[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(candidate[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
|
||||
usetoken = False
|
||||
for word in words:
|
||||
|
||||
@@ -24,7 +24,6 @@ import io
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
import bb.parse
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
@@ -52,21 +51,20 @@ class Command:
|
||||
"""
|
||||
A queue of asynchronous commands for bitbake
|
||||
"""
|
||||
def __init__(self, cooker, process_server):
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
self.remotedatastores = None
|
||||
|
||||
self.process_server = process_server
|
||||
# Access with locking using process_server.{get/set/clear}_async_cmd()
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
|
||||
def runCommand(self, commandline, process_server, ro_only=False):
|
||||
def runCommand(self, commandline, ro_only = False):
|
||||
command = commandline.pop(0)
|
||||
|
||||
# Ensure cooker is ready for commands
|
||||
if command not in ["updateConfig", "setFeatures", "ping"]:
|
||||
if command != "updateConfig" and command != "setFeatures":
|
||||
try:
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
@@ -86,6 +84,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
@@ -100,47 +99,51 @@ class Command:
|
||||
return None, traceback.format_exc()
|
||||
else:
|
||||
return result, None
|
||||
if self.currentAsyncCommand is not None:
|
||||
return None, "Busy (%s in progress)" % self.currentAsyncCommand[0]
|
||||
if command not in CommandsAsync.__dict__:
|
||||
return None, "No such command"
|
||||
if not process_server.set_async_cmd((command, commandline)):
|
||||
return None, "Busy (%s in progress)" % self.process_server.get_async_cmd()[0]
|
||||
self.cooker.idleCallBackRegister(self.runAsyncCommand, process_server)
|
||||
self.currentAsyncCommand = (command, commandline)
|
||||
self.cooker.idleCallBackRegister(self.cooker.runCommands, self.cooker)
|
||||
return True, None
|
||||
|
||||
def runAsyncCommand(self, _, process_server, halt):
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
if self.cooker.state in (bb.cooker.State.ERROR, bb.cooker.State.SHUTDOWN, bb.cooker.State.FORCE_SHUTDOWN):
|
||||
self.cooker.process_inotify_updates()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
self.cooker.updateCache()
|
||||
return bb.server.process.idleFinish("Cooker in error state")
|
||||
cmd = process_server.get_async_cmd()
|
||||
if cmd is not None:
|
||||
(command, options) = cmd
|
||||
return False
|
||||
if self.currentAsyncCommand is not None:
|
||||
(command, options) = self.currentAsyncCommand
|
||||
commandmethod = getattr(CommandsAsync, command)
|
||||
needcache = getattr( commandmethod, "needcache" )
|
||||
if needcache and self.cooker.state != bb.cooker.State.RUNNING:
|
||||
if needcache and self.cooker.state != bb.cooker.state.running:
|
||||
self.cooker.updateCache()
|
||||
return True
|
||||
else:
|
||||
commandmethod(self.cmds_async, self, options)
|
||||
return False
|
||||
else:
|
||||
return bb.server.process.idleFinish("Nothing to do, no async command?")
|
||||
return False
|
||||
except KeyboardInterrupt as exc:
|
||||
return bb.server.process.idleFinish("Interrupted")
|
||||
self.finishAsyncCommand("Interrupted")
|
||||
return False
|
||||
except SystemExit as exc:
|
||||
arg = exc.args[0]
|
||||
if isinstance(arg, str):
|
||||
return bb.server.process.idleFinish(arg)
|
||||
self.finishAsyncCommand(arg)
|
||||
else:
|
||||
return bb.server.process.idleFinish("Exited with %s" % arg)
|
||||
self.finishAsyncCommand("Exited with %s" % arg)
|
||||
return False
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
if isinstance(exc, bb.BBHandledException):
|
||||
return bb.server.process.idleFinish("")
|
||||
self.finishAsyncCommand("")
|
||||
else:
|
||||
return bb.server.process.idleFinish(traceback.format_exc())
|
||||
self.finishAsyncCommand(traceback.format_exc())
|
||||
return False
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg or msg == "":
|
||||
@@ -149,8 +152,8 @@ class Command:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
self.currentAsyncCommand = None
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
|
||||
def reset(self):
|
||||
if self.remotedatastores:
|
||||
@@ -163,14 +166,6 @@ class CommandsSync:
|
||||
These must not influence any running synchronous command.
|
||||
"""
|
||||
|
||||
def ping(self, command, params):
|
||||
"""
|
||||
Allow a UI to check the server is still alive
|
||||
"""
|
||||
return "Still alive!"
|
||||
ping.needconfig = False
|
||||
ping.readonly = True
|
||||
|
||||
def stateShutdown(self, command, params):
|
||||
"""
|
||||
Trigger cooker 'shutdown' mode
|
||||
@@ -308,11 +303,6 @@ class CommandsSync:
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def revalidateCaches(self, command, params):
|
||||
"""Called by UI clients when metadata may have changed"""
|
||||
command.cooker.revalidateCaches()
|
||||
revalidateCaches.needconfig = False
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
@@ -421,30 +411,15 @@ class CommandsSync:
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
"""
|
||||
Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`).
|
||||
|
||||
Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes`
|
||||
|
||||
:param command: Internally used parameter.
|
||||
:param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed.
|
||||
:return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage`
|
||||
"""
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
def sortkey(x):
|
||||
vfn, _ = x
|
||||
realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn
|
||||
realfn, _, mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return (-command.cooker.collections[mc].calc_bbfile_priority(realfn)[0], vfn)
|
||||
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey))
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), key=sortkey))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
@@ -566,8 +541,8 @@ class CommandsSync:
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
virtualfn = params[0]
|
||||
(fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
fn = params[0]
|
||||
mc = bb.runqueue.mc_from_tid(fn)
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
@@ -582,7 +557,6 @@ class CommandsSync:
|
||||
appendfiles = command.cooker.collections[mc].get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
layername = command.cooker.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
@@ -590,21 +564,15 @@ class CommandsSync:
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc, layername)[cls]
|
||||
envdata = bb.cache.parse_recipe(config_data, fn, appendfiles, mc)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
envdata = command.cooker.databuilder.parseRecipe(virtualfn, appendfiles, layername)
|
||||
parser = bb.cache.NoCache(command.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
def finalizeData(self, command, params):
|
||||
newdata = command.cooker.data.createCopy()
|
||||
bb.data.expandKeys(newdata)
|
||||
bb.parse.ast.runAnonFuncs(newdata)
|
||||
idx = command.remotedatastores.store(newdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
@@ -773,7 +741,7 @@ class CommandsAsync:
|
||||
"""
|
||||
event = params[0]
|
||||
bb.event.fire(eval(event), command.cooker.data)
|
||||
process_server.clear_async_cmd()
|
||||
command.currentAsyncCommand = None
|
||||
triggerEvent.needcache = False
|
||||
|
||||
def resetCooker(self, command, params):
|
||||
@@ -800,14 +768,7 @@ class CommandsAsync:
|
||||
(mc, pn) = bb.runqueue.split_mc(params[0])
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
bb.siggen.check_siggen_version(bb.siggen)
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc])
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc])
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
def getTaskSignatures(self, command, params):
|
||||
res = command.cooker.getTaskSignatures(params[0], params[1])
|
||||
bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
getTaskSignatures.needcache = True
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Helper library to implement streaming compression and decompression using an
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -13,7 +11,7 @@ def open(*args, **kwargs):
|
||||
|
||||
class LZ4File(bb.compress._pipecompress.PipeFile):
|
||||
def get_compress(self):
|
||||
return ["lz4", "-z", "-c"]
|
||||
return ["lz4c", "-z", "-c"]
|
||||
|
||||
def get_decompress(self):
|
||||
return ["lz4", "-d", "-c"]
|
||||
return ["lz4c", "-d", "-c"]
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,3 @@
|
||||
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
@@ -161,7 +160,12 @@ def catch_parse_error(func):
|
||||
def wrapped(fn, *args):
|
||||
try:
|
||||
return func(fn, *args)
|
||||
except Exception as exc:
|
||||
except IOError as exc:
|
||||
import traceback
|
||||
parselog.critical(traceback.format_exc())
|
||||
parselog.critical("Unable to parse %s: %s" % (fn, exc))
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
import traceback
|
||||
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
@@ -173,11 +177,14 @@ def catch_parse_error(func):
|
||||
break
|
||||
parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
|
||||
raise bb.BBHandledException()
|
||||
except bb.parse.ParseError as exc:
|
||||
parselog.critical(str(exc))
|
||||
raise bb.BBHandledException()
|
||||
return wrapped
|
||||
|
||||
@catch_parse_error
|
||||
def parse_config_file(fn, data, include=True):
|
||||
return bb.parse.handle(fn, data, include, baseconfig=True)
|
||||
return bb.parse.handle(fn, data, include)
|
||||
|
||||
@catch_parse_error
|
||||
def _inherit(bbclass, data):
|
||||
@@ -241,13 +248,12 @@ class CookerDataBuilder(object):
|
||||
for k in cookercfg.env:
|
||||
self.savedenv.setVar(k, cookercfg.env[k])
|
||||
if k in bb.data_smart.bitbake_renamed_vars:
|
||||
bb.error('Shell environment variable %s has been renamed to %s' % (k, bb.data_smart.bitbake_renamed_vars[k]))
|
||||
bb.error('Variable %s from the shell environment has been renamed to %s' % (k, bb.data_smart.bitbake_renamed_vars[k]))
|
||||
bb.fatal("Exiting to allow enviroment variables to be corrected")
|
||||
|
||||
filtered_keys = bb.utils.approved_variables()
|
||||
bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
|
||||
self.basedata.setVar("BB_ORIGENV", self.savedenv)
|
||||
self.basedata.setVar("__bbclasstype", "global")
|
||||
|
||||
if worker:
|
||||
self.basedata.setVar("BB_WORKERCONTEXT", "1")
|
||||
@@ -255,22 +261,15 @@ class CookerDataBuilder(object):
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
|
||||
def calc_datastore_hashes(self):
|
||||
data_hash = hashlib.sha256()
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
data_hash.update(self.mcdata[config].get_hash().encode('utf-8'))
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
mcdata = {}
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
servercontext = self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker
|
||||
bb.fetch.fetcher_init(self.data, servercontext)
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.parse.init_parser(self.data)
|
||||
bb.codeparser.parser_cache_init(self.data)
|
||||
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
@@ -287,23 +286,30 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
mcdata[''] = self.data
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
if config[0].isdigit():
|
||||
bb.fatal("Multiconfig name '%s' is invalid as multiconfigs cannot start with a digit" % config)
|
||||
parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata)
|
||||
mcdata[config] = parsed_mcdata
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
data_hash.update(mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data)
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
except Exception:
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException()
|
||||
|
||||
bb.codeparser.update_module_dependencies(self.data)
|
||||
|
||||
# Handle obsolete variable names
|
||||
d = self.data
|
||||
@@ -324,24 +330,17 @@ class CookerDataBuilder(object):
|
||||
if issues:
|
||||
raise bb.BBHandledException()
|
||||
|
||||
for mc in mcdata:
|
||||
mcdata[mc].renameVar("__depends", "__base_depends")
|
||||
mcdata[mc].setVar("__bbclasstype", "recipe")
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.mcorigdata = mcdata
|
||||
for mc in mcdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(mcdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.calc_datastore_hashes()
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
if not hasattr(self, 'mcorigdata'):
|
||||
if not hasattr(self, 'origdata'):
|
||||
return
|
||||
for mc in self.mcorigdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(self.mcorigdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
@@ -356,17 +355,12 @@ class CookerDataBuilder(object):
|
||||
|
||||
layerconf = self._findLayerConf(data)
|
||||
if layerconf:
|
||||
parselog.debug2("Found bblayers.conf (%s)", layerconf)
|
||||
parselog.debug(2, "Found bblayers.conf (%s)", layerconf)
|
||||
# By definition bblayers.conf is in conf/ of TOPDIR.
|
||||
# We may have been called with cwd somewhere else so reset TOPDIR
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
data = parse_config_file(layerconf, data)
|
||||
|
||||
if not data.getVar("BB_CACHEDIR"):
|
||||
data.setVar("BB_CACHEDIR", "${TOPDIR}/cache")
|
||||
|
||||
bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR"))
|
||||
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
broken_layers = []
|
||||
|
||||
@@ -388,10 +382,8 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Please check BBLAYERS in %s" % (layerconf))
|
||||
raise bb.BBHandledException()
|
||||
|
||||
layerseries = None
|
||||
compat_entries = {}
|
||||
for layer in layers:
|
||||
parselog.debug2("Adding layer %s", layer)
|
||||
parselog.debug(2, "Adding layer %s", layer)
|
||||
if 'HOME' in approved and '~' in layer:
|
||||
layer = os.path.expanduser(layer)
|
||||
if layer.endswith('/'):
|
||||
@@ -402,27 +394,8 @@ class CookerDataBuilder(object):
|
||||
data.expandVarref('LAYERDIR')
|
||||
data.expandVarref('LAYERDIR_RE')
|
||||
|
||||
# Sadly we can't have nice things.
|
||||
# Some layers think they're going to be 'clever' and copy the values from
|
||||
# another layer, e.g. using ${LAYERSERIES_COMPAT_core}. The whole point of
|
||||
# this mechanism is to make it clear which releases a layer supports and
|
||||
# show when a layer master branch is bitrotting and is unmaintained.
|
||||
# We therefore avoid people doing this here.
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
for c in collections:
|
||||
compat_entry = data.getVar("LAYERSERIES_COMPAT_%s" % c)
|
||||
if compat_entry:
|
||||
compat_entries[c] = set(compat_entry.split())
|
||||
data.delVar("LAYERSERIES_COMPAT_%s" % c)
|
||||
if not layerseries:
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
if layerseries:
|
||||
data.delVar("LAYERSERIES_CORENAMES")
|
||||
|
||||
data.delVar('LAYERDIR_RE')
|
||||
data.delVar('LAYERDIR')
|
||||
for c in compat_entries:
|
||||
data.setVar("LAYERSERIES_COMPAT_%s" % c, " ".join(sorted(compat_entries[c])))
|
||||
|
||||
bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
@@ -441,15 +414,13 @@ class CookerDataBuilder(object):
|
||||
if invalid:
|
||||
bb.fatal("BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
|
||||
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
collections_tmp = collections[:]
|
||||
for c in collections:
|
||||
collections_tmp.remove(c)
|
||||
if c in collections_tmp:
|
||||
bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c)
|
||||
|
||||
compat = set()
|
||||
if c in compat_entries:
|
||||
compat = compat_entries[c]
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not layerseries:
|
||||
bb.fatal("No core layer found to work with layer '%s'. Missing entry in bblayers.conf?" % c)
|
||||
if compat and not (compat & layerseries):
|
||||
@@ -458,21 +429,16 @@ class CookerDataBuilder(object):
|
||||
elif not compat and not data.getVar("BB_WORKERCONTEXT"):
|
||||
bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
|
||||
|
||||
data.setVar("LAYERSERIES_CORENAMES", " ".join(sorted(layerseries)))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
msg = "The BBPATH variable is not set"
|
||||
if not layerconf:
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
" the expected location.\nMaybe you accidentally"
|
||||
" invoked bitbake from the wrong directory?")
|
||||
bb.fatal(msg)
|
||||
raise SystemExit(msg)
|
||||
|
||||
if not data.getVar("TOPDIR"):
|
||||
data.setVar("TOPDIR", os.path.abspath(os.getcwd()))
|
||||
if not data.getVar("BB_CACHEDIR"):
|
||||
data.setVar("BB_CACHEDIR", "${TOPDIR}/cache")
|
||||
bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR"))
|
||||
|
||||
data = parse_config_file(os.path.join("conf", "bitbake.conf"), data)
|
||||
|
||||
@@ -499,54 +465,3 @@ class CookerDataBuilder(object):
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc, layername):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
bb_data.setVar("FILE_LAYERNAME", layername)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
|
||||
return bb.parse.handle(bbfile, bb_data)
|
||||
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None, layername=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
|
||||
if virtonly:
|
||||
(bbfile, virtual, mc) = bb.cache.virtualfn2realfn(bbfile)
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, '', layername)
|
||||
|
||||
for mc in self.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
def parseRecipe(self, virtualfn, appends, layername):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
datastores = self.parseRecipeVariants(virtualfn, appends, virtonly=True, layername=layername)
|
||||
return datastores[virtual]
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -4,16 +4,14 @@ BitBake 'Data' implementations
|
||||
Functions for interacting with the data structure used by the
|
||||
BitBake build tools.
|
||||
|
||||
expandKeys and datastore iteration are the most expensive
|
||||
operations. Updating overrides is now "on the fly" but still based
|
||||
on the idea of the cookie monster introduced by zecke:
|
||||
"At night the cookie monster came by and
|
||||
The expandKeys and update_data are the most expensive
|
||||
operations. At night the cookie monster came by and
|
||||
suggested 'give me cookies on setting the variables and
|
||||
things will work out'. Taking this suggestion into account
|
||||
applying the skills from the not yet passed 'Entwurf und
|
||||
Analyse von Algorithmen' lecture and the cookie
|
||||
monster seems to be right. We will track setVar more carefully
|
||||
to have faster datastore operations."
|
||||
to have faster update_data and expandKeys operations.
|
||||
|
||||
This is a trade-off between speed and memory again but
|
||||
the speed is more critical here.
|
||||
@@ -28,6 +26,11 @@ the speed is more critical here.
|
||||
|
||||
import sys, os, re
|
||||
import hashlib
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[0]))
|
||||
sys.path.insert(0, path)
|
||||
from itertools import groupby
|
||||
|
||||
from bb import data_smart
|
||||
@@ -67,6 +70,10 @@ def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
return d.keys()
|
||||
|
||||
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
def expand(s, d, varname = None):
|
||||
"""Variable expansion using the data store"""
|
||||
return d.expand(s, varname)
|
||||
@@ -114,8 +121,8 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
if d.getVarFlag(var, 'python', False) and func:
|
||||
return False
|
||||
|
||||
export = bb.utils.to_boolean(d.getVarFlag(var, "export"))
|
||||
unexport = bb.utils.to_boolean(d.getVarFlag(var, "unexport"))
|
||||
export = d.getVarFlag(var, "export", False)
|
||||
unexport = d.getVarFlag(var, "unexport", False)
|
||||
if not all and not export and not unexport and not func:
|
||||
return False
|
||||
|
||||
@@ -188,8 +195,8 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
|
||||
def exported_keys(d):
|
||||
return (key for key in d.keys() if not key.startswith('__') and
|
||||
bb.utils.to_boolean(d.getVarFlag(key, 'export')) and
|
||||
not bb.utils.to_boolean(d.getVarFlag(key, 'unexport')))
|
||||
d.getVarFlag(key, 'export', False) and
|
||||
not d.getVarFlag(key, 'unexport', False))
|
||||
|
||||
def exported_vars(d):
|
||||
k = list(exported_keys(d))
|
||||
@@ -261,66 +268,60 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparsedata):
|
||||
def handle_contains(value, contains, exclusions, d):
|
||||
newvalue = []
|
||||
if value:
|
||||
newvalue.append(str(value))
|
||||
for k in sorted(contains):
|
||||
if k in exclusions or k in ignored_vars:
|
||||
continue
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue.append("\n%s{%s} = Unset" % (k, item))
|
||||
break
|
||||
else:
|
||||
newvalue.append("\n%s{%s} = Set" % (k, item))
|
||||
return "".join(newvalue)
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
value = handle_contains(value, r2.contains, exclusions, d)
|
||||
return value
|
||||
def update_data(d):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
d.finalize(parent = True)
|
||||
|
||||
def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
deps = set()
|
||||
try:
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
if vf[1] == "vardepvalueexclude":
|
||||
return deps, ""
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
deps -= ignored_vars
|
||||
return frozenset(deps), value
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
exclusions = varflags.get("vardepsexclude", "").split()
|
||||
|
||||
def handle_contains(value, contains, exclusions, d):
|
||||
newvalue = []
|
||||
if value:
|
||||
newvalue.append(str(value))
|
||||
for k in sorted(contains):
|
||||
if k in exclusions or k in ignored_vars:
|
||||
continue
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue.append("\n%s{%s} = Unset" % (k, item))
|
||||
break
|
||||
else:
|
||||
newvalue.append("\n%s{%s} = Set" % (k, item))
|
||||
return "".join(newvalue)
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
value = codeparsedata.getVarFlag(key, "_content", False)
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, exclusions, d)
|
||||
else:
|
||||
value, parsedvar = codeparsedata.getVarFlag(key, "_content", False, retparser=True)
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
@@ -362,43 +363,36 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(exclusions)
|
||||
deps -= ignored_vars
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
bb.warn("Exception during build_dependencies for %s" % key)
|
||||
raise
|
||||
return frozenset(deps), value
|
||||
return deps, value
|
||||
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
|
||||
#d.setVarFlag(key, "vardeps", deps)
|
||||
|
||||
def generate_dependencies(d, ignored_vars):
|
||||
|
||||
mod_funcs = set(bb.codeparser.modulecode_deps.keys())
|
||||
keys = set(key for key in d if not key.startswith("__")) | mod_funcs
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if bb.utils.to_boolean(d.getVarFlag(key, "export")) and not bb.utils.to_boolean(d.getVarFlag(key, "unexport")))
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
|
||||
|
||||
codeparserd = d.createCopy()
|
||||
for forced in (d.getVar('BB_HASH_CODEPARSER_VALS') or "").split():
|
||||
key, value = forced.split("=", 1)
|
||||
codeparserd.setVar(key, value)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
for task in tasklist:
|
||||
deps[task], values[task] = build_dependencies(task, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd)
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
newdeps = deps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
nextdeps = newdeps - ignored_vars
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep not in deps:
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd)
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
newdeps |= deps[dep]
|
||||
newdeps -= seen
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
@@ -417,6 +411,7 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
else:
|
||||
data = [data]
|
||||
|
||||
gendeps[task] -= ignored_vars
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
@@ -424,6 +419,9 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in ignored_vars:
|
||||
continue
|
||||
gendeps[dep] -= ignored_vars
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
@@ -435,13 +433,13 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
data.append(str(var))
|
||||
k = fn + ":" + task
|
||||
basehash[k] = hashlib.sha256("".join(data).encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = frozenset(seen)
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
needle = '/%s.bbclass' % klass
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
for v in val:
|
||||
if v.endswith(needle):
|
||||
return True
|
||||
|
||||
@@ -16,10 +16,7 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import builtins
|
||||
import copy
|
||||
import re
|
||||
import sys
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
@@ -32,7 +29,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
|
||||
@@ -95,11 +92,10 @@ def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
loginfo['func'] = func
|
||||
|
||||
class VariableParse:
|
||||
def __init__(self, varname, d, unexpanded_value = None, val = None):
|
||||
def __init__(self, varname, d, val = None):
|
||||
self.varname = varname
|
||||
self.d = d
|
||||
self.value = val
|
||||
self.unexpanded_value = unexpanded_value
|
||||
|
||||
self.references = set()
|
||||
self.execs = set()
|
||||
@@ -123,11 +119,6 @@ class VariableParse:
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
@@ -153,21 +144,19 @@ class VariableParse:
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
|
||||
class DataContext(dict):
|
||||
excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe'])
|
||||
|
||||
class DataContext(dict):
|
||||
def __init__(self, metadata, **kwargs):
|
||||
self.metadata = metadata
|
||||
dict.__init__(self, **kwargs)
|
||||
self['d'] = metadata
|
||||
self.context = set(bb.utils.get_context())
|
||||
|
||||
def __missing__(self, key):
|
||||
if key in self.excluded or key in self.context:
|
||||
# Skip commonly accessed invalid variables
|
||||
if key in ['bb', 'oe', 'int', 'bool', 'time', 'str', 'os']:
|
||||
raise KeyError(key)
|
||||
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None:
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
@@ -272,9 +261,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +325,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +379,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
@@ -451,9 +442,9 @@ class DataSmart(MutableMapping):
|
||||
def expandWithRefs(self, s, varname):
|
||||
|
||||
if not isinstance(s, str): # sanity check
|
||||
return VariableParse(varname, self, s, s)
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
varparse = VariableParse(varname, self, s)
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
olds = s
|
||||
@@ -485,19 +476,24 @@ class DataSmart(MutableMapping):
|
||||
def expand(self, s, varname = None):
|
||||
return self.expandWithRefs(s, varname).value
|
||||
|
||||
def finalize(self, parent = False):
|
||||
return
|
||||
|
||||
def internal_finalize(self, parent = False):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
self.overrides = None
|
||||
|
||||
def need_overrides(self):
|
||||
if self.overrides is not None:
|
||||
return
|
||||
if self.inoverride:
|
||||
return
|
||||
overrride_stack = []
|
||||
for count in range(5):
|
||||
self.inoverride = True
|
||||
# Can end up here recursively so setup dummy values
|
||||
self.overrides = []
|
||||
self.overridesset = set()
|
||||
self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
overrride_stack.append(self.overrides)
|
||||
self.overridesset = set(self.overrides)
|
||||
self.inoverride = False
|
||||
self.expand_cache = {}
|
||||
@@ -507,7 +503,7 @@ class DataSmart(MutableMapping):
|
||||
self.overrides = newoverrides
|
||||
self.overridesset = set(self.overrides)
|
||||
else:
|
||||
bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work. The list of failing override expansions: %s" % "\n".join(str(s) for s in overrride_stack))
|
||||
bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work.")
|
||||
|
||||
def initVar(self, var):
|
||||
self.expand_cache = {}
|
||||
@@ -518,18 +514,18 @@ class DataSmart(MutableMapping):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var]
|
||||
return dest[var], self.overridedata.get(var, None)
|
||||
|
||||
if "_data" not in dest:
|
||||
break
|
||||
dest = dest["_data"]
|
||||
return None
|
||||
return None, self.overridedata.get(var, None)
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
return
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
|
||||
if local_var:
|
||||
self.dict[var] = copy.copy(local_var)
|
||||
@@ -637,7 +633,7 @@ class DataSmart(MutableMapping):
|
||||
nextnew.update(vardata.references)
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.overrides = None
|
||||
self.internal_finalize(True)
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
@@ -724,7 +720,7 @@ class DataSmart(MutableMapping):
|
||||
if ':' in var:
|
||||
override = var[var.rfind(':')+1:]
|
||||
shortvar = var[:var.rfind(':')]
|
||||
while override and __override_regexp__.match(override):
|
||||
while override and override.islower():
|
||||
try:
|
||||
if shortvar in self.overridedata:
|
||||
# Force CoW by recreating the list first
|
||||
@@ -779,18 +775,13 @@ class DataSmart(MutableMapping):
|
||||
return None
|
||||
cachename = var + "[" + flag + "]"
|
||||
|
||||
if not expand and retparser and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].unexpanded_value, self.expand_cache[cachename]
|
||||
|
||||
if expand and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].value
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, overridedata = self._findVar(var)
|
||||
value = None
|
||||
removes = set()
|
||||
if flag == "_content" and not parsing:
|
||||
overridedata = self.overridedata.get(var, None)
|
||||
if flag == "_content" and not parsing and overridedata is not None:
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
@@ -827,8 +818,6 @@ class DataSmart(MutableMapping):
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval"])
|
||||
elif "_defaultval_flag_"+flag in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval_flag_"+flag])
|
||||
|
||||
|
||||
if flag == "_content" and local_var is not None and ":append" in local_var and not parsing:
|
||||
@@ -907,7 +896,7 @@ class DataSmart(MutableMapping):
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
self.expand_cache = {}
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
if not var in self.dict:
|
||||
@@ -950,7 +939,7 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import bb.exceptions
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -67,39 +68,29 @@ _catchall_handlers = {}
|
||||
_eventfilter = None
|
||||
_uiready = False
|
||||
_thread_lock = threading.Lock()
|
||||
_heartbeat_enabled = False
|
||||
_should_exit = threading.Event()
|
||||
_thread_lock_enabled = False
|
||||
|
||||
if hasattr(__builtins__, '__setitem__'):
|
||||
builtins = __builtins__
|
||||
else:
|
||||
builtins = __builtins__.__dict__
|
||||
|
||||
def enable_threadlock():
|
||||
# Always needed now
|
||||
return
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = True
|
||||
|
||||
def disable_threadlock():
|
||||
# Always needed now
|
||||
return
|
||||
|
||||
def enable_heartbeat():
|
||||
global _heartbeat_enabled
|
||||
_heartbeat_enabled = True
|
||||
|
||||
def disable_heartbeat():
|
||||
global _heartbeat_enabled
|
||||
_heartbeat_enabled = False
|
||||
|
||||
#
|
||||
# In long running code, this function should be called periodically
|
||||
# to check if we should exit due to an interuption (.e.g Ctrl+C from the UI)
|
||||
#
|
||||
def check_for_interrupts(d):
|
||||
global _should_exit
|
||||
if _should_exit.is_set():
|
||||
bb.warn("Exiting due to interrupt.")
|
||||
raise bb.BBHandledException()
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = False
|
||||
|
||||
def execute_handler(name, handler, event, d):
|
||||
event.data = d
|
||||
addedd = False
|
||||
if 'd' not in builtins:
|
||||
builtins['d'] = d
|
||||
addedd = True
|
||||
try:
|
||||
ret = handler(event, d)
|
||||
ret = handler(event)
|
||||
except (bb.parse.SkipRecipe, bb.BBHandledException):
|
||||
raise
|
||||
except Exception:
|
||||
@@ -113,7 +104,8 @@ def execute_handler(name, handler, event, d):
|
||||
raise
|
||||
finally:
|
||||
del event.data
|
||||
|
||||
if addedd:
|
||||
del builtins['d']
|
||||
|
||||
def fire_class_handlers(event, d):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
@@ -140,14 +132,8 @@ def print_ui_queue():
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.flush()
|
||||
except:
|
||||
pass
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
@@ -188,30 +174,36 @@ def print_ui_queue():
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
global _thread_lock
|
||||
global _thread_lock_enabled
|
||||
|
||||
if not _uiready:
|
||||
# No UI handlers registered yet, queue up the messages
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
try:
|
||||
if not _ui_logfilters[h].filter(event):
|
||||
continue
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
||||
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
||||
else:
|
||||
_ui_handlers[h].event.send(event)
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.acquire()
|
||||
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
try:
|
||||
if not _ui_logfilters[h].filter(event):
|
||||
continue
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
||||
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
||||
else:
|
||||
_ui_handlers[h].event.send(event)
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.release()
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
@@ -255,16 +247,15 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
if handler is not None:
|
||||
# handle string containing python code
|
||||
if isinstance(handler, str):
|
||||
tmp = "def %s(e, d):\n%s" % (name, handler)
|
||||
# Inject empty lines to make code match lineno in filename
|
||||
if lineno is not None:
|
||||
tmp = "\n" * (lineno-1) + tmp
|
||||
tmp = "def %s(e):\n%s" % (name, handler)
|
||||
try:
|
||||
code = bb.methodpool.compile_cache(tmp)
|
||||
if not code:
|
||||
if filename is None:
|
||||
filename = "%s(e, d)" % name
|
||||
filename = "%s(e)" % name
|
||||
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
if lineno is not None:
|
||||
ast.increment_lineno(code, lineno-1)
|
||||
code = compile(code, filename, "exec")
|
||||
bb.methodpool.compile_cache_add(tmp, code)
|
||||
except SyntaxError:
|
||||
@@ -326,23 +317,21 @@ def set_eventfilter(func):
|
||||
_eventfilter = func
|
||||
|
||||
def register_UIHhandler(handler, mainui=False):
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
|
||||
def unregister_UIHhandler(handlerNum, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = False
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
|
||||
def get_uihandler():
|
||||
@@ -758,7 +747,13 @@ class LogHandler(logging.Handler):
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
@@ -850,19 +845,3 @@ class FindSigInfoResult(Event):
|
||||
def __init__(self, result):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
class GetTaskSignatureResult(Event):
|
||||
"""
|
||||
Event to return results from GetTaskSignatures command
|
||||
"""
|
||||
def __init__(self, sig):
|
||||
Event.__init__(self)
|
||||
self.sig = sig
|
||||
|
||||
class ParseError(Event):
|
||||
"""
|
||||
Event to indicate parse failed
|
||||
"""
|
||||
def __init__(self, msg):
|
||||
super().__init__()
|
||||
self._msg = msg
|
||||
|
||||
94
bitbake/lib/bb/exceptions.py
Normal file
94
bitbake/lib/bb/exceptions.py
Normal file
@@ -0,0 +1,94 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
|
||||
try:
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
# There have been reports from the field of python 2.6 which doesn't
|
||||
# return a namedtuple here but simply a tuple so fallback gracefully if
|
||||
# args isn't present.
|
||||
except AttributeError:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -23,18 +23,17 @@ import collections
|
||||
import subprocess
|
||||
import pickle
|
||||
import errno
|
||||
import bb.utils
|
||||
import bb.persist_data, bb.utils
|
||||
import bb.checksum
|
||||
import bb.process
|
||||
import bb.event
|
||||
|
||||
__version__ = "2"
|
||||
_checksum_cache = bb.checksum.FileChecksumCache()
|
||||
_revisions_cache = bb.checksum.RevisionsCache()
|
||||
|
||||
logger = logging.getLogger("BitBake.Fetcher")
|
||||
|
||||
CHECKSUM_LIST = [ "goh1", "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
CHECKSUM_LIST = [ "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
SHOWN_CHECKSUM_LIST = ["sha256"]
|
||||
|
||||
class BBFetchException(Exception):
|
||||
@@ -238,7 +237,7 @@ class URI(object):
|
||||
# to RFC compliant URL format. E.g.:
|
||||
# file://foo.diff -> file:foo.diff
|
||||
if urlp.scheme in self._netloc_forbidden:
|
||||
uri = re.sub(r"(?<=:)//(?!/)", "", uri, count=1)
|
||||
uri = re.sub("(?<=:)//(?!/)", "", uri, 1)
|
||||
reparse = 1
|
||||
|
||||
if reparse:
|
||||
@@ -291,12 +290,12 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) if kvdelim in x else (x, None) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
def _param_str_join(self, dict_, elmdelim, kvdelim="="):
|
||||
return elmdelim.join([kvdelim.join([k, v]) if v else k for k, v in dict_.items()])
|
||||
return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
|
||||
|
||||
@property
|
||||
def hostport(self):
|
||||
@@ -389,7 +388,7 @@ def decodeurl(url):
|
||||
if s:
|
||||
if not '=' in s:
|
||||
raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
|
||||
s1, s2 = s.split('=', 1)
|
||||
s1, s2 = s.split('=')
|
||||
p[s1] = s2
|
||||
|
||||
return type, host, urllib.parse.unquote(path), user, pswd, p
|
||||
@@ -461,7 +460,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
for k in replacements:
|
||||
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
|
||||
#bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], count=1)
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1)
|
||||
if loc == 2:
|
||||
# Handle path manipulations
|
||||
basename = None
|
||||
@@ -470,7 +469,6 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
basename = os.path.basename(mirrortarball)
|
||||
# Kill parameters, they make no sense for mirror tarballs
|
||||
uri_decoded[5] = {}
|
||||
uri_find_decoded[5] = {}
|
||||
elif ud.localpath and ud.method.supports_checksum(ud):
|
||||
basename = os.path.basename(ud.localpath)
|
||||
if basename:
|
||||
@@ -494,23 +492,18 @@ methods = []
|
||||
urldata_cache = {}
|
||||
saved_headrevs = {}
|
||||
|
||||
def fetcher_init(d, servercontext=True):
|
||||
def fetcher_init(d):
|
||||
"""
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_revisions_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
if not servercontext:
|
||||
return
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = _revisions_cache.get_revs()
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -520,10 +513,11 @@ def fetcher_init(d, servercontext=True):
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
_revisions_cache.clear_cache()
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d)
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
@@ -531,11 +525,9 @@ def fetcher_init(d, servercontext=True):
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
_revisions_cache.save_extras()
|
||||
|
||||
def fetcher_parse_done():
|
||||
_checksum_cache.save_merge()
|
||||
_revisions_cache.save_merge()
|
||||
|
||||
def fetcher_compare_revisions(d):
|
||||
"""
|
||||
@@ -543,7 +535,7 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
headrevs = _revisions_cache.get_revs()
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
@@ -553,7 +545,7 @@ def mirror_from_string(data):
|
||||
bb.warn('Invalid mirror data %s, should have paired members.' % data)
|
||||
return list(zip(*[iter(mirrors)]*2))
|
||||
|
||||
def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True):
|
||||
def verify_checksum(ud, d, precomputed={}):
|
||||
"""
|
||||
verify the MD5 and SHA256 checksum for downloaded src
|
||||
|
||||
@@ -567,19 +559,17 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
file against those in the recipe each time, rather than only after
|
||||
downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571.
|
||||
"""
|
||||
|
||||
if ud.ignore_checksums or not ud.method.supports_checksum(ud):
|
||||
return {}
|
||||
|
||||
if localpath is None:
|
||||
localpath = ud.localpath
|
||||
|
||||
def compute_checksum_info(checksum_id):
|
||||
checksum_name = getattr(ud, "%s_name" % checksum_id)
|
||||
|
||||
if checksum_id in precomputed:
|
||||
checksum_data = precomputed[checksum_id]
|
||||
else:
|
||||
checksum_data = getattr(bb.utils, "%s_file" % checksum_id)(localpath)
|
||||
checksum_data = getattr(bb.utils, "%s_file" % checksum_id)(ud.localpath)
|
||||
|
||||
checksum_expected = getattr(ud, "%s_expected" % checksum_id)
|
||||
|
||||
@@ -605,13 +595,17 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
checksum_lines = ["SRC_URI[%s] = \"%s\"" % (ci["name"], ci["data"])]
|
||||
|
||||
# If no checksum has been provided
|
||||
if fatal_nochecksum and ud.method.recommends_checksum(ud) and all(ci["expected"] is None for ci in checksum_infos):
|
||||
if ud.method.recommends_checksum(ud) and all(ci["expected"] is None for ci in checksum_infos):
|
||||
messages = []
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
|
||||
|
||||
# If strict checking enabled and neither sum defined, raise error
|
||||
if strict == "1":
|
||||
raise NoChecksumError("\n".join(checksum_lines))
|
||||
messages.append("No checksum specified for '%s', please add at " \
|
||||
"least one to the recipe:" % ud.localpath)
|
||||
messages.extend(checksum_lines)
|
||||
logger.error("\n".join(messages))
|
||||
raise NoChecksumError("Missing SRC_URI checksum", ud.url)
|
||||
|
||||
bb.event.fire(MissingChecksumEvent(ud.url, **checksum_event), d)
|
||||
|
||||
@@ -633,7 +627,7 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
for ci in checksum_infos:
|
||||
if ci["expected"] and ci["expected"] != ci["data"]:
|
||||
messages.append("File: '%s' has %s checksum '%s' when '%s' was " \
|
||||
"expected" % (localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
"expected" % (ud.localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
bad_checksum = ci["data"]
|
||||
|
||||
if bad_checksum:
|
||||
@@ -751,16 +745,13 @@ def subprocess_setup():
|
||||
# SIGPIPE errors are known issues with gzip/bash
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
def mark_recipe_nocache(d):
|
||||
def get_autorev(d):
|
||||
# only not cache src rev in autorev case
|
||||
if d.getVar('BB_SRCREV_POLICY') != "cache":
|
||||
d.setVar('BB_DONT_CACHE', '1')
|
||||
|
||||
def get_autorev(d):
|
||||
mark_recipe_nocache(d)
|
||||
d.setVar("__BBAUTOREV_SEEN", True)
|
||||
return "AUTOINC"
|
||||
|
||||
def _get_srcrev(d, method_name='sortable_revision'):
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
@@ -774,14 +765,13 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
that fetcher provides a method with the given name and the same signature as sortable_revision.
|
||||
"""
|
||||
|
||||
d.setVar("__BBSRCREV_SEEN", "1")
|
||||
d.setVar("__BBSEENSRCREV", "1")
|
||||
recursion = d.getVar("__BBINSRCREV")
|
||||
if recursion:
|
||||
raise FetchError("There are recursive references in fetcher variables, likely through SRC_URI")
|
||||
d.setVar("__BBINSRCREV", True)
|
||||
|
||||
scms = []
|
||||
revs = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
@@ -789,19 +779,16 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
scms.append(u)
|
||||
|
||||
if not scms:
|
||||
d.delVar("__BBINSRCREV")
|
||||
return "", revs
|
||||
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
revs.append(rev)
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
d.delVar("__BBINSRCREV")
|
||||
if autoinc:
|
||||
return "AUTOINC+" + rev, revs
|
||||
return rev, revs
|
||||
return "AUTOINC+" + rev
|
||||
return rev
|
||||
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
@@ -817,7 +804,6 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
ud = urldata[scm]
|
||||
for name in ud.names:
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
@@ -835,21 +821,7 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
d.delVar("__BBINSRCREV")
|
||||
return format, revs
|
||||
|
||||
def get_hashvalue(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return " ".join(revs)
|
||||
|
||||
def get_pkgv_string(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return pkgv
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
if not pkgv:
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
return pkgv
|
||||
return format
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
@@ -867,7 +839,6 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSH_COMMAND',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -875,18 +846,10 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'DBUS_SESSION_BUS_ADDRESS',
|
||||
'P4CONFIG',
|
||||
'SSL_CERT_FILE',
|
||||
'NODE_EXTRA_CA_CERTS',
|
||||
'AWS_PROFILE',
|
||||
'AWS_ACCESS_KEY_ID',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_ROLE_ARN',
|
||||
'AWS_WEB_IDENTITY_TOKEN_FILE',
|
||||
'AWS_DEFAULT_REGION',
|
||||
'AWS_SESSION_TOKEN',
|
||||
'GIT_CACHE_PATH',
|
||||
'REMOTE_CONTAINERS_IPC',
|
||||
'GITHUB_TOKEN',
|
||||
'SSL_CERT_DIR']
|
||||
'AWS_DEFAULT_REGION']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
newenv = {}
|
||||
@@ -951,10 +914,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
elif e.stderr:
|
||||
output = "output:\n%s" % e.stderr
|
||||
else:
|
||||
if log:
|
||||
output = "see logfile for output"
|
||||
else:
|
||||
output = "no output"
|
||||
output = "no output"
|
||||
error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
|
||||
except bb.process.CmdError as e:
|
||||
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
|
||||
@@ -1016,7 +976,6 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.ignore_checksums = True
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
@@ -1126,8 +1085,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
try:
|
||||
if ud.method.cleanup_upon_failure():
|
||||
ud.method.clean(ud, ld)
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
return False
|
||||
@@ -1138,8 +1096,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
dirname = os.path.dirname(link_name)
|
||||
bb.utils.mkdirhier(dirname)
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
@@ -1252,7 +1208,6 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
|
||||
if srcrev == "AUTOINC":
|
||||
d.setVar("__BBAUTOREV_ACTED_UPON", True)
|
||||
srcrev = ud.method.latest_revision(ud, d, name)
|
||||
|
||||
return srcrev
|
||||
@@ -1264,21 +1219,23 @@ def get_checksum_file_list(d):
|
||||
SRC_URI as a space-separated string
|
||||
"""
|
||||
fetch = Fetch([], d, cache = False, localonly = True)
|
||||
|
||||
dl_dir = d.getVar('DL_DIR')
|
||||
filelist = []
|
||||
for u in fetch.urls:
|
||||
ud = fetch.ud[u]
|
||||
|
||||
if ud and isinstance(ud.method, local.Local):
|
||||
found = False
|
||||
paths = ud.method.localfile_searchpaths(ud, d)
|
||||
paths = ud.method.localpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
found = True
|
||||
if f.startswith(dl_dir):
|
||||
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
|
||||
if os.path.exists(f):
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
|
||||
else:
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
if not found:
|
||||
bb.fatal(("Unable to get checksum for %s SRC_URI entry %s: file could not be found"
|
||||
"\nThe following paths were searched:"
|
||||
"\n%s") % (d.getVar('PN'), os.path.basename(f), '\n'.join(paths)))
|
||||
|
||||
return " ".join(filelist)
|
||||
|
||||
@@ -1316,25 +1273,27 @@ class FetchData(object):
|
||||
self.setup = False
|
||||
|
||||
def configure_checksum(checksum_id):
|
||||
checksum_plain_name = "%ssum" % checksum_id
|
||||
if "name" in self.parm:
|
||||
checksum_name = "%s.%ssum" % (self.parm["name"], checksum_id)
|
||||
else:
|
||||
checksum_name = checksum_plain_name
|
||||
checksum_name = "%ssum" % checksum_id
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif checksum_plain_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_plain_name]
|
||||
checksum_name = checksum_plain_name
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs", "gomod", "npm"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
setattr(self, "%s_expected" % checksum_id, checksum_expected)
|
||||
|
||||
for checksum_id in CHECKSUM_LIST:
|
||||
configure_checksum(checksum_id)
|
||||
|
||||
self.ignore_checksums = False
|
||||
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
|
||||
self.method = None
|
||||
@@ -1356,11 +1315,6 @@ class FetchData(object):
|
||||
if hasattr(self.method, "urldata_init"):
|
||||
self.method.urldata_init(self, d)
|
||||
|
||||
for checksum_id in CHECKSUM_LIST:
|
||||
configure_checksum(checksum_id)
|
||||
|
||||
self.ignore_checksums = False
|
||||
|
||||
if "localpath" in self.parm:
|
||||
# if user sets localpath for file, use it instead.
|
||||
self.localpath = self.parm["localpath"]
|
||||
@@ -1440,9 +1394,6 @@ class FetchMethod(object):
|
||||
Is localpath something that can be represented by a checksum?
|
||||
"""
|
||||
|
||||
# We cannot compute checksums for None
|
||||
if urldata.localpath is None:
|
||||
return False
|
||||
# We cannot compute checksums for directories
|
||||
if os.path.isdir(urldata.localpath):
|
||||
return False
|
||||
@@ -1455,12 +1406,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
"""
|
||||
When a fetch fails, should clean() be called?
|
||||
"""
|
||||
return True
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
"""
|
||||
Verify the donestamp file
|
||||
@@ -1521,7 +1466,7 @@ class FetchMethod(object):
|
||||
(file, urldata.parm.get('unpack')))
|
||||
|
||||
base, ext = os.path.splitext(file)
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz', '.zst']:
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']:
|
||||
efile = os.path.join(rootdir, os.path.basename(base))
|
||||
else:
|
||||
efile = file
|
||||
@@ -1603,7 +1548,6 @@ class FetchMethod(object):
|
||||
unpackdir = rootdir
|
||||
|
||||
if not unpack or not cmd:
|
||||
urldata.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
# If file == dest, then avoid any copies, as we already put the file into dest!
|
||||
dest = os.path.join(unpackdir, os.path.basename(file))
|
||||
if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
|
||||
@@ -1617,9 +1561,7 @@ class FetchMethod(object):
|
||||
if urlpath.find("/") != -1:
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp --force --preserve=timestamps --no-dereference --recursive -H "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
@@ -1673,13 +1615,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
|
||||
rev = _revisions_cache.get_rev(key)
|
||||
if rev is None:
|
||||
rev = self._latest_revision(ud, d, name)
|
||||
_revisions_cache.set_rev(key, rev)
|
||||
return rev
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
@@ -1711,55 +1653,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class DummyUnpackTracer(object):
|
||||
"""
|
||||
Abstract API definition for a class that traces unpacked source files back
|
||||
to their respective upstream SRC_URI entries, for software composition
|
||||
analysis, license compliance and detailed SBOM generation purposes.
|
||||
User may load their own unpack tracer class (instead of the dummy
|
||||
one) by setting the BB_UNPACK_TRACER_CLASS config parameter.
|
||||
"""
|
||||
def start(self, unpackdir, urldata_dict, d):
|
||||
"""
|
||||
Start tracing the core Fetch.unpack process, using an index to map
|
||||
unpacked files to each SRC_URI entry.
|
||||
This method is called by Fetch.unpack and it may receive nested calls by
|
||||
gitsm and npmsw fetchers, that expand SRC_URI entries by adding implicit
|
||||
URLs and by recursively calling Fetch.unpack from new (nested) Fetch
|
||||
instances.
|
||||
"""
|
||||
return
|
||||
def start_url(self, url):
|
||||
"""Start tracing url unpack process.
|
||||
This method is called by Fetch.unpack before the fetcher-specific unpack
|
||||
method starts, and it may receive nested calls by gitsm and npmsw
|
||||
fetchers.
|
||||
"""
|
||||
return
|
||||
def unpack(self, unpack_type, destdir):
|
||||
"""
|
||||
Set unpack_type and destdir for current url.
|
||||
This method is called by the fetcher-specific unpack method after url
|
||||
tracing started.
|
||||
"""
|
||||
return
|
||||
def finish_url(self, url):
|
||||
"""Finish tracing url unpack process and update the file index.
|
||||
This method is called by Fetch.unpack after the fetcher-specific unpack
|
||||
method finished its job, and it may receive nested calls by gitsm
|
||||
and npmsw fetchers.
|
||||
"""
|
||||
return
|
||||
def complete(self):
|
||||
"""
|
||||
Finish tracing the Fetch.unpack process, and check if all nested
|
||||
Fecth.unpack calls (if any) have been completed; if so, save collected
|
||||
metadata.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
if localonly and cache:
|
||||
@@ -1780,30 +1673,10 @@ class Fetch(object):
|
||||
if key in urldata_cache:
|
||||
self.ud = urldata_cache[key]
|
||||
|
||||
# the unpack_tracer object needs to be made available to possible nested
|
||||
# Fetch instances (when those are created by gitsm and npmsw fetchers)
|
||||
# so we set it as a global variable
|
||||
global unpack_tracer
|
||||
try:
|
||||
unpack_tracer
|
||||
except NameError:
|
||||
class_path = d.getVar("BB_UNPACK_TRACER_CLASS")
|
||||
if class_path:
|
||||
# use user-defined unpack tracer class
|
||||
import importlib
|
||||
module_name, _, class_name = class_path.rpartition(".")
|
||||
module = importlib.import_module(module_name)
|
||||
class_ = getattr(module, class_name)
|
||||
unpack_tracer = class_()
|
||||
else:
|
||||
# fall back to the dummy/abstract class
|
||||
unpack_tracer = DummyUnpackTracer()
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
try:
|
||||
self.ud[url] = FetchData(url, d, localonly)
|
||||
self.ud[url].unpack_tracer = unpack_tracer
|
||||
except NonLocalMethod:
|
||||
if localonly:
|
||||
self.ud[url] = None
|
||||
@@ -1842,7 +1715,6 @@ class Fetch(object):
|
||||
network = self.d.getVar("BB_NO_NETWORK")
|
||||
premirroronly = bb.utils.to_boolean(self.d.getVar("BB_FETCH_PREMIRRORONLY"))
|
||||
|
||||
checksum_missing_messages = []
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1854,6 +1726,7 @@ class Fetch(object):
|
||||
|
||||
try:
|
||||
self.d.setVar("BB_NO_NETWORK", network)
|
||||
|
||||
if m.verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
done = True
|
||||
elif m.try_premirror(ud, self.d):
|
||||
@@ -1906,7 +1779,7 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp and m.cleanup_upon_failure():
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
@@ -1925,20 +1798,13 @@ class Fetch(object):
|
||||
raise ChecksumError("Stale Error Detected")
|
||||
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, NoChecksumError):
|
||||
(message, _) = e.args
|
||||
checksum_missing_messages.append(message)
|
||||
continue
|
||||
elif isinstance(e, ChecksumError):
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.error("Checksum failure fetching %s" % u)
|
||||
raise
|
||||
|
||||
finally:
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
if checksum_missing_messages:
|
||||
logger.error("Missing SRC_URI checksum, please add those to the recipe: \n%s", "\n".join(checksum_missing_messages))
|
||||
raise BBFetchException("There was some missing checksums in the recipe")
|
||||
|
||||
def checkstatus(self, urls=None):
|
||||
"""
|
||||
@@ -1969,7 +1835,7 @@ class Fetch(object):
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
raise FetchError("URL doesn't work", u)
|
||||
raise FetchError("URL %s doesn't work" % u, u)
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
@@ -1979,8 +1845,6 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
unpack_tracer.start(root, self.ud, self.d)
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1988,15 +1852,11 @@ class Fetch(object):
|
||||
if ud.lockfile:
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
|
||||
unpack_tracer.start_url(u)
|
||||
ud.method.unpack(ud, root, self.d)
|
||||
unpack_tracer.finish_url(u)
|
||||
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
unpack_tracer.complete()
|
||||
|
||||
def clean(self, urls=None):
|
||||
"""
|
||||
Clean files that the fetcher gets or places
|
||||
@@ -2098,8 +1958,6 @@ from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
from . import gomod
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2121,6 +1979,3 @@ methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
methods.append(gomod.GoMod())
|
||||
methods.append(gomod.GoModGit())
|
||||
|
||||
@@ -108,7 +108,7 @@ class ClearCase(FetchMethod):
|
||||
ud.module.replace("/", "."),
|
||||
ud.label.replace("/", "."))
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME"))
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
@@ -196,7 +196,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME")
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
|
||||
return True
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
|
||||
@@ -13,6 +13,7 @@ BitBake 'Fetch' implementation for crates.io
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import bb
|
||||
from bb.fetch2 import logger, subprocess_setup, UnpackError
|
||||
@@ -33,7 +34,7 @@ class Crate(Wget):
|
||||
return ud.type in ['crate']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
@@ -56,26 +57,22 @@ class Crate(Wget):
|
||||
if len(parts) < 5:
|
||||
raise bb.fetch2.ParameterError("Invalid URL: Must be crate://HOST/NAME/VERSION", ud.url)
|
||||
|
||||
# version is expected to be the last token
|
||||
# but ignore possible url parameters which will be used
|
||||
# by the top fetcher class
|
||||
version = parts[-1].split(";")[0]
|
||||
# last field is version
|
||||
version = parts[len(parts) - 1]
|
||||
# second to last field is name
|
||||
name = parts[-2]
|
||||
name = parts[len(parts) - 2]
|
||||
# host (this is to allow custom crate registries to be specified
|
||||
host = '/'.join(parts[2:-2])
|
||||
host = '/'.join(parts[2:len(parts) - 2])
|
||||
|
||||
# if using upstream just fix it up nicely
|
||||
if host == 'crates.io':
|
||||
host = 'crates.io/api/v1/crates'
|
||||
|
||||
ud.url = "https://%s/%s/%s/download" % (host, name, version)
|
||||
ud.versionsurl = "https://%s/%s/versions" % (host, name)
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
if 'name' not in ud.parm:
|
||||
ud.parm['name'] = '%s-%s' % (name, version)
|
||||
ud.parm['name'] = name
|
||||
|
||||
logger.debug2("Fetching %s to %s" % (ud.url, ud.parm['downloadfilename']))
|
||||
logger.debug(2, "Fetching %s to %s" % (ud.url, ud.parm['downloadfilename']))
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""
|
||||
@@ -99,13 +96,11 @@ class Crate(Wget):
|
||||
save_cwd = os.getcwd()
|
||||
os.chdir(rootdir)
|
||||
|
||||
bp = d.getVar('BP')
|
||||
if bp == ud.parm.get('name'):
|
||||
pn = d.getVar('BPN')
|
||||
if pn == ud.parm.get('name'):
|
||||
cmd = "tar -xz --no-same-owner -f %s" % thefile
|
||||
ud.unpack_tracer.unpack("crate-extract", rootdir)
|
||||
else:
|
||||
cargo_bitbake = self._cargo_bitbake_path(rootdir)
|
||||
ud.unpack_tracer.unpack("cargo-extract", cargo_bitbake)
|
||||
|
||||
cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake)
|
||||
|
||||
@@ -140,11 +135,3 @@ class Crate(Wget):
|
||||
mdpath = os.path.join(bbpath, cratepath, mdfile)
|
||||
with open(mdpath, "w") as f:
|
||||
json.dump(metadata, f)
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
from functools import cmp_to_key
|
||||
json_data = json.loads(self._fetch_index(ud.versionsurl, ud, d))
|
||||
versions = [(0, i["num"], "") for i in json_data["versions"]]
|
||||
versions = sorted(versions, key=cmp_to_key(bb.utils.vercmp))
|
||||
|
||||
return (versions[-1][1], "")
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Google Cloup Platform Storage.
|
||||
|
||||
Class for fetching files from Google Cloud Storage using the
|
||||
Google Cloud Storage Python Client. The GCS Python Client must
|
||||
be correctly installed, configured and authenticated prior to use.
|
||||
Additionally, gsutil must also be installed.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2023, Snap Inc.
|
||||
#
|
||||
# Based in part on bb.fetch2.s3:
|
||||
# Copyright (C) 2017 Andre McCurdy
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class GCP(FetchMethod):
|
||||
"""
|
||||
Class to fetch urls via GCP's Python API.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.gcp_client = None
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with GCP.
|
||||
"""
|
||||
return ud.type in ['gs']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
self.gcp_client = storage.Client(project=None)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
from google.api_core.exceptions import NotFound
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "blob.download_to_filename", f"gs://{ud.host}{ud.path}")
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
try:
|
||||
blob.download_to_filename(ud.localpath)
|
||||
except NotFound:
|
||||
raise FetchError("The GCP API threw a NotFound exception")
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
# tool with a little healthy suspicion).
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError(f"The GCP API returned success for gs://{ud.host}{ud.path} but {ud.localpath} doesn't exist?!")
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError(f"The downloaded file for gs://{ud.host}{ud.path} resulted in a zero size file?! Deleting and failing since this isn't right.")
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL.
|
||||
"""
|
||||
logger.debug2(f"Checking status of gs://{ud.host}{ud.path}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "gcp_client.bucket(ud.host).blob(path).exists()", f"gs://{ud.host}{ud.path}")
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
if self.gcp_client.bucket(ud.host).blob(path).exists() == False:
|
||||
raise FetchError(f"The GCP API reported that gs://{ud.host}{ud.path} does not exist")
|
||||
else:
|
||||
return True
|
||||
@@ -44,27 +44,13 @@ Supported SRC_URI options are:
|
||||
|
||||
- nobranch
|
||||
Don't check the SHA validation for branch. set this option for the recipe
|
||||
referring to commit which is valid in any namespace (branch, tag, ...)
|
||||
instead of branch.
|
||||
referring to commit which is valid in tag instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- subpath
|
||||
Limit the checkout to a specific subpath of the tree.
|
||||
By default, checkout the whole tree, set subpath=<path> if needed
|
||||
|
||||
- destsuffix
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is git/, set destsuffix=<suffix> if needed
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
- lfs
|
||||
Enable the checkout to use LFS for large files. This will download all LFS files
|
||||
in the download step, as the unpack step does not have network access.
|
||||
The default is "1", set lfs=0 to skip.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2005 Richard Purdie
|
||||
@@ -78,7 +64,6 @@ import fnmatch
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
@@ -87,12 +72,8 @@ from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import trusted_network
|
||||
|
||||
|
||||
sha1_re = re.compile(r'^[0-9a-f]{40}$')
|
||||
slash_re = re.compile(r"/+")
|
||||
|
||||
class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
"""Extract progress information from git output"""
|
||||
def __init__(self, d):
|
||||
@@ -150,9 +131,6 @@ class Git(FetchMethod):
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init git specific variable within url data
|
||||
@@ -262,7 +240,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all -c clone.defaultRemoteName=origin"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -271,20 +249,20 @@ class Git(FetchMethod):
|
||||
ud.setup_revisions(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revisions[name] or ''):
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
|
||||
if ud.revisions[name]:
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_'))
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_'))
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
# For a rebaseable git repo, it is necessary to keep a mirror tar ball
|
||||
# per revision, so that even if the revision disappears from the
|
||||
# for rebaseable git repo, it is necessary to keep mirror tar ball
|
||||
# per revision, so that even the revision disappears from the
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
# contain the revision
|
||||
# contains the revision
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
@@ -328,10 +306,7 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) \
|
||||
or self.shallow_tarball_need_update(ud) \
|
||||
or self.tarball_need_update(ud) \
|
||||
or self.lfs_need_update(ud, d)
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
@@ -343,15 +318,6 @@ class Git(FetchMethod):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
for name in ud.names:
|
||||
if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
for rev in ud.shallow_revs:
|
||||
try:
|
||||
@@ -371,16 +337,6 @@ class Git(FetchMethod):
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
return True
|
||||
# If the url is not in trusted network, that is, BB_NO_NETWORK is set to 0
|
||||
# and BB_ALLOWED_NETWORKS does not contain the host that ud.url uses, then
|
||||
# we need to try premirrors first as using upstream is destined to fail.
|
||||
if not trusted_network(d, ud.url):
|
||||
return True
|
||||
# the following check is to ensure incremental fetch in downloads, this is
|
||||
# because the premirror might be old and does not contain the new rev required,
|
||||
# and this will cause a total removal and new clone. So if we can reach to
|
||||
# network, we prefer upstream over premirror, though the premirror might contain
|
||||
# the new rev.
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
return True
|
||||
@@ -394,54 +350,17 @@ class Git(FetchMethod):
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and self.need_update(ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
else:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir)
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if 'mirror' in output:
|
||||
runfetchcmd("%s remote rm mirror" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s remote add --mirror=fetch mirror %s" % (ud.basecmd, tmpdir), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --update-head-ok --progress mirror " % (ud.basecmd)
|
||||
runfetchcmd(fetch_cmd, d, workdir=ud.clonedir)
|
||||
elif os.path.exists(ud.fullmirror) and not os.path.exists(ud.clonedir):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
needs_clone = False
|
||||
if os.path.exists(ud.clonedir):
|
||||
# The directory may exist, but not be the top level of a bare git
|
||||
# repository in which case it needs to be deleted and re-cloned.
|
||||
try:
|
||||
# Since clones can be bare, use --absolute-git-dir instead of --show-toplevel
|
||||
output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
toplevel = output.rstrip()
|
||||
|
||||
if not bb.utils.path_is_descendant(toplevel, ud.clonedir):
|
||||
logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir)
|
||||
needs_clone = True
|
||||
except bb.fetch2.FetchError as e:
|
||||
logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e)
|
||||
needs_clone = True
|
||||
except FileNotFoundError as e:
|
||||
logger.warning("%s", e)
|
||||
needs_clone = True
|
||||
|
||||
if needs_clone:
|
||||
shutil.rmtree(ud.clonedir)
|
||||
else:
|
||||
needs_clone = True
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if needs_clone:
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible,
|
||||
# but it doesn't work when git/objects is a symlink, only works when it is a directory.
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible
|
||||
if repourl.startswith("file://"):
|
||||
repourl_path = repourl[7:]
|
||||
objects = os.path.join(repourl_path, 'objects')
|
||||
if os.path.isdir(objects) and not os.path.islink(objects):
|
||||
repourl = repourl_path
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
@@ -455,11 +374,7 @@ class Git(FetchMethod):
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.nobranch:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
else:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
@@ -482,14 +397,15 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self.lfs_need_update(ud, d):
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
#
|
||||
# It would be nice to just do this inline here by running 'git-lfs fetch'
|
||||
# on the bare clonedir, but that operation requires a working copy on some
|
||||
# releases of Git LFS.
|
||||
with tempfile.TemporaryDirectory(dir=d.getVar('DL_DIR')) as tmpdir:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
try:
|
||||
# Do the checkout. This implicitly involves a Git LFS fetch.
|
||||
Git.unpack(self, ud, tmpdir, d)
|
||||
|
||||
@@ -505,8 +421,10 @@ class Git(FetchMethod):
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(ud.destdir, ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/.git" % ud.destdir)
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
finally:
|
||||
bb.utils.remove(tmpdir, recurse=True)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
@@ -544,38 +462,25 @@ class Git(FetchMethod):
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d,
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
runfetchcmd("tar -czf %s --owner pokybuild --group users --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""
|
||||
Shallow fetch from ud.clonedir (${DL_DIR}/git2/<gitrepo> by default):
|
||||
- For BB_GIT_SHALLOW_DEPTH: git fetch --depth <depth> rev
|
||||
- For BB_GIT_SHALLOW_REVS: git fetch --shallow-exclude=<revs> rev
|
||||
"""
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
bb.utils.mkdirhier(dest)
|
||||
init_cmd = "%s init -q" % ud.basecmd
|
||||
if ud.bareclone:
|
||||
init_cmd += " --bare"
|
||||
runfetchcmd(init_cmd, d, workdir=dest)
|
||||
runfetchcmd("%s remote add origin %s" % (ud.basecmd, ud.clonedir), d, workdir=dest)
|
||||
|
||||
# Check the histories which should be excluded
|
||||
shallow_exclude = ''
|
||||
for revision in ud.shallow_revs:
|
||||
shallow_exclude += " --shallow-exclude=%s" % revision
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
|
||||
# The --depth and --shallow-exclude can't be used together
|
||||
if depth and shallow_exclude:
|
||||
raise bb.fetch2.FetchError("BB_GIT_SHALLOW_REVS is set, but BB_GIT_SHALLOW_DEPTH is not 0.")
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
@@ -588,49 +493,36 @@ class Git(FetchMethod):
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
fetch_cmd = "%s fetch origin %s" % (ud.basecmd, revision)
|
||||
if depth:
|
||||
fetch_cmd += " --depth %s" % depth
|
||||
|
||||
if shallow_exclude:
|
||||
fetch_cmd += shallow_exclude
|
||||
|
||||
# Advertise the revision for lower version git such as 2.25.1:
|
||||
# error: Server does not allow request for unadvertised object.
|
||||
# The ud.clonedir is a local temporary dir, will be removed when
|
||||
# fetch is done, so we can do anything on it.
|
||||
adv_cmd = 'git branch -f advertise-%s %s' % (revision, revision)
|
||||
runfetchcmd(adv_cmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd(fetch_cmd, d, workdir=dest)
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs_remote = runfetchcmd("%s ls-remote origin 'refs/*'" % ud.basecmd, \
|
||||
d, workdir=dest).splitlines()
|
||||
all_refs = []
|
||||
for line in all_refs_remote:
|
||||
all_refs.append(line.split()[-1])
|
||||
extra_refs = []
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
extra_refs.extend(matches)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
extra_refs.append(r)
|
||||
shallow_branches.append(r)
|
||||
|
||||
for ref in extra_refs:
|
||||
ref_fetch = os.path.basename(ref)
|
||||
runfetchcmd("%s fetch origin --depth 1 %s" % (ud.basecmd, ref_fetch), d, workdir=dest)
|
||||
revision = runfetchcmd("%s rev-parse FETCH_HEAD" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# The url is local ud.clonedir, set it to upstream one
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=dest)
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
@@ -658,8 +550,6 @@ class Git(FetchMethod):
|
||||
destdir = ud.destdir = os.path.join(destdir, destsuffix)
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
if not ud.bareclone:
|
||||
ud.unpack_tracer.unpack("git", destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
|
||||
@@ -669,12 +559,13 @@ class Git(FetchMethod):
|
||||
source_found = False
|
||||
source_error = []
|
||||
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
if not source_found:
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
|
||||
if not source_found:
|
||||
if ud.shallow:
|
||||
@@ -698,8 +589,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)" % (repourl))
|
||||
elif not need_lfs:
|
||||
bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
|
||||
else:
|
||||
runfetchcmd("%s lfs install --local" % ud.basecmd, d, workdir=destdir)
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
@@ -727,13 +616,8 @@ class Git(FetchMethod):
|
||||
clonedir = os.path.realpath(ud.localpath)
|
||||
to_remove.append(clonedir)
|
||||
|
||||
# Remove shallow mirror tarball
|
||||
if ud.shallow:
|
||||
to_remove.append(ud.fullshallow)
|
||||
to_remove.append(ud.fullshallow + ".done")
|
||||
|
||||
for r in to_remove:
|
||||
if os.path.exists(r) or os.path.islink(r):
|
||||
if os.path.exists(r):
|
||||
bb.note('Removing %s' % r)
|
||||
bb.utils.remove(r, True)
|
||||
|
||||
@@ -756,35 +640,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _lfs_objects_downloaded(self, ud, d, name, wd):
|
||||
"""
|
||||
Verifies whether the LFS objects for requested revisions have already been downloaded
|
||||
"""
|
||||
# Bail out early if this repository doesn't use LFS
|
||||
if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
|
||||
return True
|
||||
|
||||
# The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revisions[name])
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
return True
|
||||
|
||||
# Match all lines beginning with the hexadecimal OID
|
||||
oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
|
||||
for line in output.split("\n"):
|
||||
oid = re.search(oid_regex, line)
|
||||
if not oid:
|
||||
bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
|
||||
if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
@@ -793,11 +648,13 @@ class Git(FetchMethod):
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
"""
|
||||
|
||||
if ud.nobranch:
|
||||
# If no branch is specified, use the current git commit
|
||||
refname = self._build_revision(ud, d, ud.names[0])
|
||||
elif wd == ud.clonedir:
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if not ud.nobranch:
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
branchname = "master"
|
||||
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
@@ -840,6 +697,7 @@ class Git(FetchMethod):
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
slash_re = re.compile(r"/+")
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev[name]
|
||||
|
||||
def _lsremote(self, ud, d, search):
|
||||
@@ -872,11 +730,11 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSRCREV_SEEN"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
if not d.getVar("__BBSEENSRCREV"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE).")
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.mark_recipe_nocache(d)
|
||||
bb.fetch2.get_autorev(d)
|
||||
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
@@ -902,42 +760,38 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
return pupver
|
||||
|
||||
rev_tag_re = re.compile(r"([0-9a-f]{40})\s+refs/tags/(.*)")
|
||||
pver_re = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
nonrel_re = re.compile(r"(alpha|beta|rc|final)+")
|
||||
|
||||
verstring = ""
|
||||
revision = ""
|
||||
for line in output.split("\n"):
|
||||
if not line:
|
||||
break
|
||||
|
||||
m = rev_tag_re.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
(revision, tag) = m.groups()
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
if nonrel_re.search(tag):
|
||||
m = re.search(r"(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# search for version in the line
|
||||
m = pver_re.search(tag)
|
||||
if not m:
|
||||
tag = tagregex.search(tag_head)
|
||||
if tag is None:
|
||||
continue
|
||||
|
||||
pver = m.group('pver').replace("_", ".")
|
||||
tag = tag.group('pver')
|
||||
tag = tag.replace("_", ".")
|
||||
|
||||
if verstring and bb.utils.vercmp(("0", pver, ""), ("0", verstring, "")) < 0:
|
||||
if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
|
||||
continue
|
||||
|
||||
verstring = pver
|
||||
verstring = tag
|
||||
revision = line.split()[0]
|
||||
pupver = (verstring, revision)
|
||||
|
||||
return pupver
|
||||
@@ -957,8 +811,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -88,9 +88,9 @@ class GitSM(Git):
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
@@ -115,21 +115,10 @@ class GitSM(Git):
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
if url.endswith("{}{}".format(ud.host, ud.path)):
|
||||
raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \
|
||||
"Consider using git fetcher instead.")
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % module
|
||||
url += ";nobranch=1"
|
||||
url += ";lfs=%s" % self._need_lfs(ud)
|
||||
# Note that adding "user=" here to give credentials to the
|
||||
# submodule is not supported. Since using SRC_URI to give git://
|
||||
# URL a password is not supported, one have to use one of the
|
||||
# recommended way (eg. ~/.netrc or SSH config) which does specify
|
||||
# the user (See comment in git.py).
|
||||
# So, we will not take patches adding "user=" support here.
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
@@ -147,19 +136,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and extra_check:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -177,7 +153,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -200,7 +184,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -214,10 +207,6 @@ class GitSM(Git):
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
# modpath is needed by unpack tracer to calculate submodule
|
||||
# checkout dir
|
||||
new_ud = newfetch.ud[url]
|
||||
new_ud.modpath = modpath
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
@@ -243,25 +232,10 @@ class GitSM(Git):
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# All submodules should already be downloaded and configured in the tree. This simply
|
||||
# sets up the configuration and checks out the files. The main project config should
|
||||
# remain unmodified, and no download from the internet should occur. As such, lfs smudge
|
||||
# should also be skipped as these files were already smudged in the fetch stage if lfs
|
||||
# was enabled.
|
||||
runfetchcmd("GIT_LFS_SKIP_SMUDGE=1 %s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
def clean(self, ud, d):
|
||||
def clean_submodule(ud, url, module, modpath, workdir, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.clean()
|
||||
except Exception as e:
|
||||
logger.warning('gitsm: submodule clean failed: %s %s' % (type(e).__name__, str(e)))
|
||||
|
||||
self.call_process_submodules(ud, d, True, clean_submodule)
|
||||
|
||||
# Clean top git dir
|
||||
Git.clean(self, ud, d)
|
||||
# All submodules should already be downloaded and configured in the tree. This simply sets
|
||||
# up the configuration and checks out the files. The main project config should remain
|
||||
# unmodified, and no download from the internet should occur.
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
def implicit_urldata(self, ud, d):
|
||||
import shutil, subprocess, tempfile
|
||||
@@ -272,6 +246,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Go modules
|
||||
|
||||
The gomod/gomodgit fetchers are used to download Go modules to the module cache
|
||||
from a module proxy or directly from a version control repository.
|
||||
|
||||
Example SRC_URI:
|
||||
|
||||
SRC_URI += "gomod://golang.org/x/net;version=v0.9.0;sha256sum=..."
|
||||
SRC_URI += "gomodgit://golang.org/x/net;version=v0.9.0;repo=go.googlesource.com/net;srcrev=..."
|
||||
|
||||
Required SRC_URI parameters:
|
||||
|
||||
- version
|
||||
The version of the module.
|
||||
|
||||
Optional SRC_URI parameters:
|
||||
|
||||
- mod
|
||||
Fetch and unpack the go.mod file only instead of the complete module.
|
||||
The go command may need to download go.mod files for many different modules
|
||||
when computing the build list, and go.mod files are much smaller than
|
||||
module zip files.
|
||||
The default is "0", set mod=1 for the go.mod file only.
|
||||
|
||||
- sha256sum
|
||||
The checksum of the module zip file, or the go.mod file in case of fetching
|
||||
only the go.mod file. Alternatively, set the SRC_URI varible flag for
|
||||
"module@version.sha256sum".
|
||||
|
||||
- protocol
|
||||
The method used when fetching directly from a version control repository.
|
||||
The default is "https" for git.
|
||||
|
||||
- repo
|
||||
The URL when fetching directly from a version control repository. Required
|
||||
when the URL is different from the module path.
|
||||
|
||||
- srcrev
|
||||
The revision identifier used when fetching directly from a version control
|
||||
repository. Alternatively, set the SRCREV varible for "module@version".
|
||||
|
||||
- subdir
|
||||
The module subdirectory when fetching directly from a version control
|
||||
repository. Required when the module is not located in the root of the
|
||||
repository.
|
||||
|
||||
Related variables:
|
||||
|
||||
- GO_MOD_PROXY
|
||||
The module proxy used by the fetcher.
|
||||
|
||||
- GO_MOD_CACHE_DIR
|
||||
The directory where the module cache is located.
|
||||
This must match the exported GOMODCACHE variable for the go command to find
|
||||
the downloaded modules.
|
||||
|
||||
See the Go modules reference, https://go.dev/ref/mod, for more information
|
||||
about the module cache, module proxies and version control systems.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import zipfile
|
||||
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import subprocess_setup
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
def escape(path):
|
||||
"""Escape capital letters using exclamation points."""
|
||||
return re.sub(r'([A-Z])', lambda m: '!' + m.group(1).lower(), path)
|
||||
|
||||
|
||||
class GoMod(Wget):
|
||||
"""Class to fetch Go modules from a Go module proxy via wget"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomod'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the module proxy.
|
||||
|
||||
Set up to download the module zip file to the module cache directory
|
||||
and unpack the go.mod file (unless downloading only the go.mod file):
|
||||
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
proxy = d.getVar('GO_MOD_PROXY') or 'proxy.golang.org'
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set URL and filename for wget download
|
||||
path = escape(module + '/@v/' + ud.parm['version'])
|
||||
if ud.parm.get('mod', '0') == '1':
|
||||
path += '.mod'
|
||||
else:
|
||||
path += '.zip'
|
||||
ud.parm['unpack'] = '0'
|
||||
ud.url = bb.fetch2.encodeurl(
|
||||
('https', proxy, '/' + path, None, None, None))
|
||||
ud.parm['downloadfilename'] = path
|
||||
|
||||
ud.parm['name'] = f"{module}@{ud.parm['version']}"
|
||||
|
||||
# Set subdir for unpack
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/download',
|
||||
os.path.dirname(path))
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the module zip file or go.mod file
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
if ud.localpath.endswith('.zip'):
|
||||
# Unpack the go.mod file from the zip file
|
||||
module = ud.parm['module']
|
||||
unpackdir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
name = os.path.basename(ud.localpath).rsplit('.', 1)[0] + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(ud.localpath) as zf:
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
try:
|
||||
f = module + '@' + ud.parm['version'] + '/go.mod'
|
||||
shutil.copyfileobj(zf.open(f), mf)
|
||||
except KeyError:
|
||||
# If the module does not have a go.mod file, synthesize
|
||||
# one containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
|
||||
class GoModGit(Git):
|
||||
"""Class to fetch Go modules directly from a git repository"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomodgit'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the git repository.
|
||||
|
||||
Set up to download the git repository to the module cache directory and
|
||||
unpack the module zip file and the go.mod file:
|
||||
|
||||
cache/vcs/<hash>: The bare git repository.
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set host, path and srcrev for git download
|
||||
if 'repo' in ud.parm:
|
||||
repo = ud.parm['repo']
|
||||
idx = repo.find('/')
|
||||
if idx != -1:
|
||||
ud.host = repo[:idx]
|
||||
ud.path = repo[idx:]
|
||||
else:
|
||||
ud.host = repo
|
||||
ud.path = ''
|
||||
if 'protocol' not in ud.parm:
|
||||
ud.parm['protocol'] = 'https'
|
||||
name = f"{module}@{ud.parm['version']}"
|
||||
ud.names = [name]
|
||||
srcrev = d.getVar('SRCREV_' + name)
|
||||
if srcrev:
|
||||
if 'srcrev' not in ud.parm:
|
||||
ud.parm['srcrev'] = srcrev
|
||||
else:
|
||||
if 'srcrev' in ud.parm:
|
||||
d.setVar('SRCREV_' + name, ud.parm['srcrev'])
|
||||
if 'branch' not in ud.parm:
|
||||
ud.parm['nobranch'] = '1'
|
||||
|
||||
# Set subpath, subdir and bareclone for git unpack
|
||||
if 'subdir' in ud.parm:
|
||||
ud.parm['subpath'] = ud.parm['subdir']
|
||||
key = f"git3:{ud.parm['protocol']}://{ud.host}{ud.path}".encode()
|
||||
ud.parm['key'] = key
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/vcs',
|
||||
hashlib.sha256(key).hexdigest())
|
||||
ud.parm['bareclone'] = '1'
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the bare git repository
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
# Create the info file
|
||||
module = ud.parm['module']
|
||||
repodir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
with open(repodir + '.info', 'wb') as f:
|
||||
f.write(ud.parm['key'])
|
||||
|
||||
# Unpack the go.mod file from the repository
|
||||
unpackdir = os.path.join(rootdir, moddir, 'cache/download',
|
||||
escape(module), '@v')
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
srcrev = ud.parm['srcrev']
|
||||
version = ud.parm['version']
|
||||
escaped_version = escape(version)
|
||||
cmd = f"git ls-tree -r --name-only '{srcrev}'"
|
||||
if 'subpath' in ud.parm:
|
||||
cmd += f" '{ud.parm['subpath']}'"
|
||||
files = runfetchcmd(cmd, d, workdir=repodir).split()
|
||||
name = escaped_version + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
f = 'go.mod'
|
||||
if 'subpath' in ud.parm:
|
||||
f = os.path.join(ud.parm['subpath'], f)
|
||||
if f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
subprocess.check_call(cmd, stdout=mf, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
else:
|
||||
# If the module does not have a go.mod file, synthesize one
|
||||
# containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
# Synthesize the module zip file from the repository
|
||||
name = escaped_version + '.zip'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(os.path.join(unpackdir, name), mode='w') as zf:
|
||||
prefix = module + '@' + version + '/'
|
||||
for f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
data = subprocess.check_output(cmd, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
zf.writestr(prefix + f, data)
|
||||
@@ -242,7 +242,6 @@ class Hg(FetchMethod):
|
||||
revflag = "-r %s" % ud.revision
|
||||
subdir = ud.parm.get("destsuffix", ud.module)
|
||||
codir = "%s/%s" % (destdir, subdir)
|
||||
ud.unpack_tracer.unpack("hg", codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
|
||||
@@ -41,9 +41,9 @@ class Local(FetchMethod):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
return self.localfile_searchpaths(urldata, d)[-1]
|
||||
return self.localpaths(urldata, d)[-1]
|
||||
|
||||
def localfile_searchpaths(self, urldata, d):
|
||||
def localpaths(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
@@ -51,14 +51,18 @@ class Local(FetchMethod):
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
logger.debug2("Using absolute %s" % (path))
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
logger.debug2("Using %s for %s" % (newpath, path))
|
||||
searched.extend(hist)
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
logger.debug2("Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
return searched
|
||||
return searched
|
||||
|
||||
def need_update(self, ud, d):
|
||||
@@ -74,7 +78,9 @@ class Local(FetchMethod):
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
msg = "Unable to find file " + urldata.url + " anywhere to download to " + urldata.localpath + ". The paths that were searched were:\n " + "\n ".join(locations)
|
||||
locations.append(d.getVar("DL_DIR"))
|
||||
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
return True
|
||||
|
||||
@@ -42,15 +42,11 @@ from bb.utils import is_semver
|
||||
|
||||
def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# For scoped package names ('@user/package') the '/' is replaced by a '-'.
|
||||
# This is similar to what 'npm pack' does, but 'npm pack' also strips the
|
||||
# leading '@', which can lead to ambiguous package names.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9@]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
if package.startswith("@"):
|
||||
return re.sub("/", "-", package[1:])
|
||||
return package
|
||||
|
||||
def npm_filename(package, version):
|
||||
"""Get the filename of a npm package"""
|
||||
@@ -107,7 +103,6 @@ class NpmEnvironment(object):
|
||||
"""Run npm command in a controlled environment"""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("PATH", d.getVar("PATH")) # PATH might contain $HOME - evaluate it before patching
|
||||
d.setVar("HOME", tmpdir)
|
||||
|
||||
if not workdir:
|
||||
@@ -161,7 +156,7 @@ class Npm(FetchMethod):
|
||||
raise ParameterError("Invalid 'version' parameter", ud.url)
|
||||
|
||||
# Extract the 'registry' part of the url
|
||||
ud.registry = re.sub(r"^npm://", "https://", ud.url.split(";")[0])
|
||||
ud.registry = re.sub(r"^npm://", "http://", ud.url.split(";")[0])
|
||||
|
||||
# Using the 'downloadfilename' parameter as local filename
|
||||
# or the npm package name.
|
||||
@@ -299,7 +294,6 @@ class Npm(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix", "npm")
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
npm_unpack(ud.localpath, destdir, d)
|
||||
ud.unpack_tracer.unpack("npm", destdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
"""Clean any existing full or partial download"""
|
||||
|
||||
@@ -37,26 +37,24 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
"""
|
||||
Run a callback for each dependencies of a shrinkwrap file.
|
||||
The callback is using the format:
|
||||
callback(name, data, location)
|
||||
callback(name, params, deptree)
|
||||
with:
|
||||
name = the package name (string)
|
||||
data = the package data (dictionary)
|
||||
location = the location of the package (string)
|
||||
params = the package parameters (dictionary)
|
||||
deptree = the package dependency tree (array of strings)
|
||||
"""
|
||||
packages = shrinkwrap.get("packages")
|
||||
if not packages:
|
||||
raise FetchError("Invalid shrinkwrap file format")
|
||||
def _walk_deps(deps, deptree):
|
||||
for name in deps:
|
||||
subtree = [*deptree, name]
|
||||
_walk_deps(deps[name].get("dependencies", {}), subtree)
|
||||
if callback is not None:
|
||||
if deps[name].get("dev", False) and not dev:
|
||||
continue
|
||||
elif deps[name].get("bundled", False):
|
||||
continue
|
||||
callback(name, deps[name], subtree)
|
||||
|
||||
for location, data in packages.items():
|
||||
# Skip empty main and local link target packages
|
||||
if not location.startswith('node_modules/'):
|
||||
continue
|
||||
elif not dev and data.get("dev", False):
|
||||
continue
|
||||
elif data.get("inBundle", False):
|
||||
continue
|
||||
name = location.split('node_modules/')[-1]
|
||||
callback(name, data, location)
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
|
||||
class NpmShrinkWrap(FetchMethod):
|
||||
"""Class to fetch all package from a shrinkwrap file"""
|
||||
@@ -77,24 +75,20 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# Resolve the dependencies
|
||||
ud.deps = []
|
||||
|
||||
def _resolve_dependency(name, params, destsuffix):
|
||||
def _resolve_dependency(name, params, deptree):
|
||||
url = None
|
||||
localpath = None
|
||||
extrapaths = []
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity")
|
||||
resolved = params.get("resolved")
|
||||
version = params.get("version")
|
||||
link = params.get("link", False)
|
||||
|
||||
# Handle link sources
|
||||
if link:
|
||||
localpath = resolved
|
||||
unpack = False
|
||||
integrity = params.get("integrity", None)
|
||||
resolved = params.get("resolved", None)
|
||||
version = params.get("version", None)
|
||||
|
||||
# Handle registry sources
|
||||
elif version and is_semver(version) and integrity:
|
||||
if is_semver(version) and integrity:
|
||||
# Handle duplicate dependencies without url
|
||||
if not resolved:
|
||||
return
|
||||
@@ -122,10 +116,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
extrapaths.append(resolvefile)
|
||||
|
||||
# Handle http tarball sources
|
||||
elif resolved.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(resolved))
|
||||
elif version.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(version))
|
||||
|
||||
uri = URI(resolved)
|
||||
uri = URI(version)
|
||||
uri.params["downloadfilename"] = localfile
|
||||
|
||||
checksum_name, checksum_expected = npm_integrity(integrity)
|
||||
@@ -135,12 +129,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), localfile)
|
||||
|
||||
# Handle local tarball sources
|
||||
elif resolved.startswith("file"):
|
||||
localpath = resolved[5:]
|
||||
|
||||
# Handle git sources
|
||||
elif resolved.startswith("git"):
|
||||
elif version.startswith("git"):
|
||||
if version.startswith("github:"):
|
||||
version = "git+https://github.com/" + version[len("github:"):]
|
||||
regex = re.compile(r"""
|
||||
^
|
||||
git\+
|
||||
@@ -152,26 +144,30 @@ class NpmShrinkWrap(FetchMethod):
|
||||
$
|
||||
""", re.VERBOSE)
|
||||
|
||||
match = regex.match(resolved)
|
||||
match = regex.match(version)
|
||||
|
||||
if not match:
|
||||
raise ParameterError("Invalid git url: %s" % resolved, ud.url)
|
||||
raise ParameterError("Invalid git url: %s" % version, ud.url)
|
||||
|
||||
groups = match.groupdict()
|
||||
|
||||
uri = URI("git://" + str(groups["url"]))
|
||||
uri.params["protocol"] = str(groups["protocol"])
|
||||
uri.params["rev"] = str(groups["rev"])
|
||||
uri.params["nobranch"] = "1"
|
||||
uri.params["destsuffix"] = destsuffix
|
||||
|
||||
url = str(uri)
|
||||
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
else:
|
||||
raise ParameterError("Unsupported dependency: %s" % name, ud.url)
|
||||
|
||||
# name is needed by unpack tracer for module mapping
|
||||
ud.deps.append({
|
||||
"name": name,
|
||||
"url": url,
|
||||
"localpath": localpath,
|
||||
"extrapaths": extrapaths,
|
||||
@@ -197,23 +193,19 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# This fetcher resolves multiple URIs from a shrinkwrap file and then
|
||||
# forwards it to a proxy fetcher. The management of the donestamp file,
|
||||
# the lockfile and the checksums are forwarded to the proxy fetcher.
|
||||
shrinkwrap_urls = [dep["url"] for dep in ud.deps if dep["url"]]
|
||||
if shrinkwrap_urls:
|
||||
ud.proxy = Fetch(shrinkwrap_urls, data)
|
||||
ud.proxy = Fetch([dep["url"] for dep in ud.deps if dep["url"]], data)
|
||||
ud.needdonestamp = False
|
||||
|
||||
@staticmethod
|
||||
def _foreach_proxy_method(ud, handle):
|
||||
returns = []
|
||||
#Check if there are dependencies before try to fetch them
|
||||
if len(ud.deps) > 0:
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
@@ -246,11 +238,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the downloaded dependencies"""
|
||||
destdir = rootdir
|
||||
destdir = d.getVar("S")
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
ud.unpack_tracer.unpack("npm-shrinkwrap", destdir)
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
bb.utils.copyfile(ud.shrinkwrap_file,
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""
|
||||
@@ -11,7 +9,6 @@ Based on the svn "Fetch" implementation.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import bb
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
@@ -39,7 +36,6 @@ class Osc(FetchMethod):
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.oscdir = oscdir
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
@@ -47,13 +43,13 @@ class Osc(FetchMethod):
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = d.getVar("PV", False)
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d, '')
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d)
|
||||
if rev:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), relpath.replace('/', '.'), ud.revision))
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -63,49 +59,26 @@ class Osc(FetchMethod):
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc"
|
||||
|
||||
proto = ud.parm.get('protocol', 'https')
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
options = []
|
||||
|
||||
config = "-c %s" % self.generate_config(ud, d)
|
||||
|
||||
if getattr(ud, 'revision', ''):
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
coroot = self._strip_leading_slashes(ud.path)
|
||||
|
||||
if command == "fetch":
|
||||
osccmd = "%s %s -A %s://%s co %s/%s %s" % (basecmd, config, proto, ud.host, coroot, ud.module, " ".join(options))
|
||||
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
|
||||
elif command == "update":
|
||||
osccmd = "%s %s -A %s://%s up %s" % (basecmd, config, proto, ud.host, " ".join(options))
|
||||
elif command == "api_source":
|
||||
osccmd = "%s %s -A %s://%s api source/%s/%s" % (basecmd, config, proto, ud.host, coroot, ud.module)
|
||||
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid osc command %s" % command, ud.url)
|
||||
|
||||
return osccmd
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
"""
|
||||
Fetch latest revision for the given package
|
||||
"""
|
||||
api_source_cmd = self._buildosccommand(ud, d, "api_source")
|
||||
|
||||
output = runfetchcmd(api_source_cmd, d)
|
||||
match = re.match(r'<directory ?.* rev="(\d+)".*>', output)
|
||||
if match is None:
|
||||
raise FetchError("Unable to parse osc response", ud.url)
|
||||
return match.groups()[0]
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
slash_re = re.compile(r"/+")
|
||||
rev = getattr(ud, 'revision', "latest")
|
||||
return "osc:%s%s.%s.%s" % (ud.host, slash_re.sub(".", ud.path), name, rev)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch url
|
||||
@@ -113,7 +86,7 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(ud.moddir, os.R_OK):
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
@@ -141,23 +114,20 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(ud.oscdir, "oscrc")
|
||||
if not os.path.exists(ud.oscdir):
|
||||
bb.utils.mkdirhier(ud.oscdir)
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
f = open(config_path, 'w')
|
||||
proto = ud.parm.get('protocol', 'https')
|
||||
f.write("[general]\n")
|
||||
f.write("apiurl = %s://%s\n" % (proto, ud.host))
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s://%s]\n" % (proto, ud.host))
|
||||
f.write("[%s]\n" % ud.host)
|
||||
f.write("user = %s\n" % ud.parm["user"])
|
||||
f.write("pass = %s\n" % ud.parm["pswd"])
|
||||
f.close()
|
||||
|
||||
@@ -103,7 +103,7 @@ class SFTP(FetchMethod):
|
||||
if path[:3] == '/~/':
|
||||
path = path[3:]
|
||||
|
||||
remote = '"%s%s:%s"' % (user, urlo.hostname, path)
|
||||
remote = '%s%s:%s' % (user, urlo.hostname, path)
|
||||
|
||||
cmd = '%s %s %s %s' % (basecmd, port, remote, lpath)
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ IETF secsh internet draft:
|
||||
|
||||
import re, os
|
||||
from bb.fetch2 import check_network_access, FetchMethod, ParameterError, runfetchcmd
|
||||
import urllib
|
||||
|
||||
|
||||
__pattern__ = re.compile(r'''
|
||||
@@ -71,7 +70,6 @@ class SSH(FetchMethod):
|
||||
"git:// prefix with protocol=ssh", urldata.url)
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
path = urllib.parse.unquote(path)
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
@@ -101,7 +99,7 @@ class SSH(FetchMethod):
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
path = path.replace("%3A", ":")
|
||||
|
||||
fr += ':%s' % path
|
||||
|
||||
@@ -141,7 +139,7 @@ class SSH(FetchMethod):
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
path = path.replace("%3A", ":")
|
||||
|
||||
cmd = 'ssh -o BatchMode=true %s %s [ -f %s ]' % (
|
||||
portarg,
|
||||
@@ -150,6 +148,8 @@ class SSH(FetchMethod):
|
||||
)
|
||||
|
||||
check_network_access(d, cmd, urldata.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
return True
|
||||
if runfetchcmd(cmd, d):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -210,6 +210,3 @@ class Svn(FetchMethod):
|
||||
|
||||
def _build_revision(self, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
@@ -26,6 +26,7 @@ from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.utils import export_proxies
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4 import SoupStrainer
|
||||
|
||||
@@ -53,6 +54,11 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
|
||||
# CDNs like CloudFlare may do a 'browser integrity test' which can fail
|
||||
# with the standard wget/urllib User-Agent, so pretend to be a modern
|
||||
# browser.
|
||||
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
|
||||
|
||||
def check_certs(self, d):
|
||||
"""
|
||||
Should certificates be checked?
|
||||
@@ -82,10 +88,7 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 100"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
@@ -103,10 +106,10 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
if 'downloadfilename' in ud.parm:
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --auth-no-challenge"
|
||||
@@ -124,30 +127,21 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri)
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
if os.path.getsize(localpath) == 0:
|
||||
os.remove(localpath)
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
bb.fetch2.verify_checksum(ud, d, localpath=localpath, fatal_nochecksum=False)
|
||||
|
||||
# Remove the ".tmp" and move the file into position atomically
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
@@ -239,12 +233,7 @@ class Wget(FetchMethod):
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
r = h.getresponse()
|
||||
except TimeoutError as e:
|
||||
if fetch.connection_cache:
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise TimeoutError(e)
|
||||
r = h.getresponse()
|
||||
|
||||
# Pick apart the HTTPResponse object to get the addinfourl
|
||||
# object initialized properly.
|
||||
@@ -305,45 +294,13 @@ class Wget(FetchMethod):
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
urllib2.HTTPRedirectHandler before 3.13 has two flaws:
|
||||
|
||||
It resets the method to GET on redirect when we want to follow
|
||||
redirects using the original method (typically HEAD). This was fixed
|
||||
in 759e8e7.
|
||||
|
||||
It also doesn't handle 308 (Permanent Redirect). This was fixed in
|
||||
c379bc5.
|
||||
|
||||
Until we depend on Python 3.13 onwards, copy the redirect_request
|
||||
method to fix these issues.
|
||||
urllib2.HTTPRedirectHandler resets the method to GET on redirect,
|
||||
when we want to follow redirects using the original method.
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
m = req.get_method()
|
||||
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
|
||||
or code in (301, 302, 303) and m == "POST")):
|
||||
raise urllib.HTTPError(req.full_url, code, msg, headers, fp)
|
||||
|
||||
# Strictly (according to RFC 2616), 301 or 302 in response to
|
||||
# a POST MUST NOT cause a redirection without confirmation
|
||||
# from the user (of urllib.request, in this case). In practice,
|
||||
# essentially all clients do redirect in this case, so we do
|
||||
# the same.
|
||||
|
||||
# Be conciliant with URIs containing a space. This is mainly
|
||||
# redundant with the more complete encoding done in http_error_302(),
|
||||
# but it is kept for compatibility with other callers.
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
|
||||
CONTENT_HEADERS = ("content-length", "content-type")
|
||||
newheaders = {k: v for k, v in req.headers.items()
|
||||
if k.lower() not in CONTENT_HEADERS}
|
||||
return urllib.request.Request(newurl,
|
||||
method="HEAD" if m == "HEAD" else "GET",
|
||||
headers=newheaders,
|
||||
origin_req_host=req.origin_req_host,
|
||||
unverifiable=True)
|
||||
|
||||
http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = req.get_method
|
||||
return newreq
|
||||
|
||||
# We need to update the environment here as both the proxy and HTTPS
|
||||
# handlers need variables set. The proxy needs http_proxy and friends to
|
||||
@@ -376,14 +333,13 @@ class Wget(FetchMethod):
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
try:
|
||||
parts = urllib.parse.urlparse(ud.url.split(";")[0])
|
||||
uri = "{}://{}{}".format(parts.scheme, parts.netloc, parts.path)
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", "bitbake/{}".format(bb.__version__))
|
||||
r.add_header("User-Agent", self.user_agent)
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
@@ -396,22 +352,29 @@ class Wget(FetchMethod):
|
||||
|
||||
try:
|
||||
import netrc
|
||||
auth_data = netrc.netrc().authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
if auth_data:
|
||||
login, _, password = auth_data
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed for %s: %s" % (uri,e))
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
except ConnectionResetError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -490,7 +453,7 @@ class Wget(FetchMethod):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " '" + uri + "'"
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
@@ -673,10 +636,10 @@ class Wget(FetchMethod):
|
||||
# search for version matches on folders inside the path, like:
|
||||
# "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
|
||||
dirver_regex = re.compile(r"(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
m = dirver_regex.findall(path)
|
||||
m = dirver_regex.search(path)
|
||||
if m:
|
||||
pn = d.getVar('PN')
|
||||
dirver = m[-1][0]
|
||||
dirver = m.group('dirver')
|
||||
|
||||
dirver_pn_regex = re.compile(r"%s\d?" % (re.escape(pn)))
|
||||
if not dirver_pn_regex.search(dirver):
|
||||
|
||||
@@ -12,12 +12,11 @@
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
import fcntl
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
import bb
|
||||
from bb import event
|
||||
@@ -44,18 +43,18 @@ def present_options(optionlist):
|
||||
else:
|
||||
return optionlist[0]
|
||||
|
||||
class BitbakeHelpFormatter(argparse.HelpFormatter):
|
||||
def _get_help_string(self, action):
|
||||
class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
def format_option(self, option):
|
||||
# We need to do this here rather than in the text we supply to
|
||||
# add_option() because we don't want to call list_extension_modules()
|
||||
# on every execution (since it imports all of the modules)
|
||||
# Note also that we modify option.help rather than the returned text
|
||||
# - this is so that we don't have to re-format the text ourselves
|
||||
if action.dest == 'ui':
|
||||
if option.dest == 'ui':
|
||||
valid_uis = list_extension_modules(bb.ui, 'main')
|
||||
return action.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
|
||||
return action.help
|
||||
return optparse.IndentedHelpFormatter.format_option(self, option)
|
||||
|
||||
def list_extension_modules(pkg, checkattr):
|
||||
"""
|
||||
@@ -115,207 +114,180 @@ def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
warnings.showwarning = _showwarning
|
||||
|
||||
def create_bitbake_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""\
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.
|
||||
""",
|
||||
formatter_class=BitbakeHelpFormatter,
|
||||
allow_abbrev=False,
|
||||
add_help=False, # help is manually added below in a specific argument group
|
||||
)
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
general_group = parser.add_argument_group('General options')
|
||||
task_group = parser.add_argument_group('Task control options')
|
||||
exec_group = parser.add_argument_group('Execution control options')
|
||||
logging_group = parser.add_argument_group('Logging/output control options')
|
||||
server_group = parser.add_argument_group('Server options')
|
||||
config_group = parser.add_argument_group('Configuration options')
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
general_group.add_argument("targets", nargs="*", metavar="recipename/target",
|
||||
help="Execute the specified task (default is 'build') for these target "
|
||||
"recipes (.bb files).")
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
general_group.add_argument("-s", "--show-versions", action="store_true",
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="halt", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
general_group.add_argument("-e", "--environment", action="store_true",
|
||||
dest="show_environment",
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
|
||||
general_group.add_argument("-g", "--graphviz", action="store_true", dest="dot_graph",
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
general_group.add_argument("-u", "--ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %(default)s).")
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
general_group.add_argument("--version", action="store_true",
|
||||
help="Show programs version and exit.")
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
general_group.add_argument('-h', '--help', action='help',
|
||||
help='Show this help message and exit.')
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
task_group.add_argument("-f", "--force", action="store_true",
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
task_group.add_argument("-c", "--cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
task_group.add_argument("-C", "--clear-stamp", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
|
||||
task_group.add_argument("--runall", action="append", default=[],
|
||||
help="Run the specified task for any recipe in the taskgraph of the "
|
||||
"specified target (even if it wouldn't otherwise have run).")
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
|
||||
task_group.add_argument("--runonly", action="append",
|
||||
help="Run only the specified task within the taskgraph of the "
|
||||
"specified targets (and any task dependencies those tasks may have).")
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
|
||||
task_group.add_argument("--no-setscene", action="store_true",
|
||||
dest="nosetscene",
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
task_group.add_argument("--skip-setscene", action="store_true",
|
||||
dest="skipsetscene",
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene.")
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
task_group.add_argument("--setscene-only", action="store_true",
|
||||
dest="setsceneonly",
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
exec_group.add_argument("-n", "--dry-run", action="store_true",
|
||||
help="Don't execute, just go through the motions.")
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
exec_group.add_argument("-p", "--parse-only", action="store_true",
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
exec_group.add_argument("-k", "--continue", action="store_false", dest="halt",
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
exec_group.add_argument("-P", "--profile", action="store_true",
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
exec_group.add_argument("-S", "--dump-signatures", action="append",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means recursively compare the dumped signature with the most recent"
|
||||
" one in a local build or sstate cache (can be used to find out why tasks re-run"
|
||||
" when that is not expected)")
|
||||
|
||||
exec_group.add_argument("--revisions-changed", action="store_true",
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
exec_group.add_argument("-b", "--buildfile",
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
logging_group.add_argument("-D", "--debug", action="count", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
logging_group.add_argument("-l", "--log-domains", action="append", dest="debug_domains",
|
||||
default=[],
|
||||
help="Show debug logging for the specified logging domains.")
|
||||
|
||||
logging_group.add_argument("-v", "--verbose", action="store_true",
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
logging_group.add_argument("-q", "--quiet", action="count", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this "
|
||||
"more than once.")
|
||||
|
||||
logging_group.add_argument("-w", "--write-log", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
|
||||
server_group.add_argument("-B", "--bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
server_group.add_argument("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
server_group.add_argument("--remote-server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
server_group.add_argument("-m", "--kill-server", action="store_true",
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
server_group.add_argument("--token", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
server_group.add_argument("--observe-only", action="store_true",
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
server_group.add_argument("--status-only", action="store_true",
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
server_group.add_argument("--server-only", action="store_true",
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
|
||||
config_group.add_argument("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
config_group.add_argument("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
|
||||
config_group.add_argument("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing.")
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
return parser
|
||||
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = create_bitbake_parser()
|
||||
options = parser.parse_intermixed_args(argv[1:])
|
||||
|
||||
if options.version:
|
||||
print("BitBake Build Tool Core version %s" % bb.__version__)
|
||||
sys.exit(0)
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
parser.error("options --quiet and --verbose are mutually exclusive")
|
||||
@@ -347,7 +319,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
else:
|
||||
options.xmlrpcinterface = (None, 0)
|
||||
|
||||
return options, options.targets
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def bitbake_main(configParams, configuration):
|
||||
@@ -412,9 +384,6 @@ def bitbake_main(configParams, configuration):
|
||||
|
||||
return 1
|
||||
|
||||
def timestamp():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def setup_bitbake(configParams, extrafeatures=None):
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
@@ -422,11 +391,6 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
if configParams.dump_signatures:
|
||||
if extrafeatures is None:
|
||||
extrafeatures = []
|
||||
extrafeatures.append(bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO)
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
@@ -454,7 +418,7 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
retries = 8
|
||||
while retries:
|
||||
try:
|
||||
topdir, lock, lockfile = lockBitbake()
|
||||
topdir, lock = lockBitbake()
|
||||
sockname = topdir + "/bitbake.sock"
|
||||
if lock:
|
||||
if configParams.status_only or configParams.kill_server:
|
||||
@@ -465,22 +429,18 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
logger.info("Starting bitbake server...")
|
||||
# Clear the event queue since we already displayed messages
|
||||
bb.event.ui_queue = []
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface, configParams.profile)
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface)
|
||||
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry... (%s)" % timestamp())
|
||||
procs = bb.server.process.get_lockfile_process_msg(lockfile)
|
||||
if procs:
|
||||
logger.info("Processes holding bitbake.lock (missing socket %s):\n%s" % (sockname, procs))
|
||||
logger.info("Directory listing: %s" % (str(os.listdir(topdir))))
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
while not lock and i < 50:
|
||||
time.sleep(0.1)
|
||||
_, lock, _ = lockBitbake()
|
||||
_, lock = lockBitbake()
|
||||
i += 1
|
||||
if lock:
|
||||
bb.utils.unlockfile(lock)
|
||||
@@ -499,9 +459,9 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
retries -= 1
|
||||
tryno = 8 - retries
|
||||
if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError, EOFError, SystemExit)):
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, timestamp()))
|
||||
logger.info("Retrying server connection (#%d)..." % tryno)
|
||||
else:
|
||||
logger.info("Retrying server connection (#%d)... (%s, %s)" % (tryno, traceback.format_exc(), timestamp()))
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
|
||||
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).")
|
||||
@@ -530,5 +490,5 @@ def lockBitbake():
|
||||
bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBPATH is unset and/or not in a build directory?")
|
||||
raise BBMainFatal
|
||||
lockfile = topdir + "/bitbake.lock"
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False), lockfile
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False)
|
||||
|
||||
|
||||
@@ -234,10 +234,9 @@ class diskMonitor:
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out.
|
||||
# This is reported by the inode count being 0 (btrfs) or the free
|
||||
# inode count being -1 (cephfs).
|
||||
if st.f_files == 0 or st.f_favail == -1:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
if st.f_files == 0:
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
|
||||
@@ -89,6 +89,10 @@ class BBLogFormatter(logging.Formatter):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
|
||||
def colorize(self, record):
|
||||
@@ -129,6 +133,7 @@ class LogFilterShowOnce(logging.Filter):
|
||||
self.seen_errors = set()
|
||||
|
||||
def filter(self, record):
|
||||
msg = record.msg
|
||||
if record.levelno == bb.msg.BBLogFormatter.WARNONCE:
|
||||
if record.msg in self.seen_warnings:
|
||||
return False
|
||||
@@ -226,7 +231,7 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers
|
||||
console = logging.StreamHandler(output)
|
||||
console.addFilter(bb.msg.LogFilterShowOnce())
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty() and os.environ.get('NO_COLOR', '') == ''):
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
|
||||
@@ -49,32 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def check_mtime(f, mtime):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = current_mtime
|
||||
except OSError:
|
||||
current_mtime = 0
|
||||
return current_mtime == mtime
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
@@ -111,12 +99,12 @@ def supports(fn, data):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def handle(fn, data, include=0, baseconfig=False):
|
||||
def handle(fn, data, include = 0):
|
||||
"""Call the handler that is appropriate for this file"""
|
||||
for h in handlers:
|
||||
if h['supports'](fn, data):
|
||||
with data.inchistory.include(fn):
|
||||
return h['handle'](fn, data, include, baseconfig)
|
||||
return h['handle'](fn, data, include)
|
||||
raise ParseError("not a BitBake file", fn)
|
||||
|
||||
def init(fn, data):
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import sys
|
||||
import bb
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
@@ -43,21 +42,6 @@ class IncludeNode(AstNode):
|
||||
else:
|
||||
bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False)
|
||||
|
||||
class IncludeAllNode(AstNode):
|
||||
def __init__(self, filename, lineno, what_file):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.what_file = what_file
|
||||
|
||||
def eval(self, data):
|
||||
"""
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = data.expand(self.what_file)
|
||||
logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
for path in data.getVar("BBPATH").split(":"):
|
||||
bb.parse.ConfHandler.include(self.filename, os.path.join(path, s), self.lineno, data, False)
|
||||
|
||||
class ExportNode(AstNode):
|
||||
def __init__(self, filename, lineno, var):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -152,10 +136,7 @@ class DataNode(AstNode):
|
||||
|
||||
flag = None
|
||||
if 'flag' in groupd and groupd['flag'] is not None:
|
||||
if groupd["lazyques"]:
|
||||
flag = "_defaultval_flag_"+groupd['flag']
|
||||
else:
|
||||
flag = groupd['flag']
|
||||
flag = groupd['flag']
|
||||
elif groupd["lazyques"]:
|
||||
flag = "_defaultval"
|
||||
|
||||
@@ -229,12 +210,10 @@ class ExportFuncsNode(AstNode):
|
||||
|
||||
def eval(self, data):
|
||||
|
||||
sentinel = " # Export function set\n"
|
||||
for func in self.n:
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
basevar = data.getVar(func, False)
|
||||
if basevar and sentinel not in basevar:
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
continue
|
||||
|
||||
if data.getVar(func, False):
|
||||
@@ -244,30 +223,29 @@ class ExportFuncsNode(AstNode):
|
||||
for flag in [ "func", "python" ]:
|
||||
if data.getVarFlag(calledfunc, flag, False):
|
||||
data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
|
||||
for flag in ["dirs", "cleandirs", "fakeroot"]:
|
||||
for flag in [ "dirs" ]:
|
||||
if data.getVarFlag(func, flag, False):
|
||||
data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
|
||||
data.setVarFlag(func, "filename", "autogenerated")
|
||||
data.setVarFlag(func, "lineno", 1)
|
||||
|
||||
if data.getVarFlag(calledfunc, "python", False):
|
||||
data.setVar(func, sentinel + " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
else:
|
||||
if "-" in self.classname:
|
||||
bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
data.setVar(func, " " + calledfunc + "\n", parsing=True)
|
||||
data.setVarFlag(func, 'export_func', '1')
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks, before, after):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.tasks = tasks
|
||||
self.func = func
|
||||
self.before = before
|
||||
self.after = after
|
||||
|
||||
def eval(self, data):
|
||||
tasks = self.tasks.split()
|
||||
for task in tasks:
|
||||
bb.build.addtask(task, self.before, self.after, data)
|
||||
bb.build.addtask(self.func, self.before, self.after, data)
|
||||
|
||||
class DelTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks):
|
||||
@@ -291,41 +269,6 @@ class BBHandlerNode(AstNode):
|
||||
data.setVarFlag(h, "handler", 1)
|
||||
data.setVar('__BBHANDLERS', bbhands)
|
||||
|
||||
class PyLibNode(AstNode):
|
||||
def __init__(self, filename, lineno, libdir, namespace):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.libdir = libdir
|
||||
self.namespace = namespace
|
||||
|
||||
def eval(self, data):
|
||||
global_mods = (data.getVar("BB_GLOBAL_PYMODULES") or "").split()
|
||||
for m in global_mods:
|
||||
if m not in bb.utils._context:
|
||||
bb.utils._context[m] = __import__(m)
|
||||
|
||||
libdir = data.expand(self.libdir)
|
||||
if libdir not in sys.path:
|
||||
sys.path.append(libdir)
|
||||
try:
|
||||
bb.utils._context[self.namespace] = __import__(self.namespace)
|
||||
toimport = getattr(bb.utils._context[self.namespace], "BBIMPORTS", [])
|
||||
for i in toimport:
|
||||
bb.utils._context[self.namespace] = __import__(self.namespace + "." + i)
|
||||
mod = getattr(bb.utils._context[self.namespace], i)
|
||||
fn = getattr(mod, "__file__")
|
||||
funcs = {}
|
||||
for f in dir(mod):
|
||||
if f.startswith("_"):
|
||||
continue
|
||||
fcall = getattr(mod, f)
|
||||
if not callable(fcall):
|
||||
continue
|
||||
funcs[f] = fcall
|
||||
bb.codeparser.add_module_functions(fn, funcs, "%s.%s" % (self.namespace, i))
|
||||
|
||||
except AttributeError as e:
|
||||
bb.error("Error importing OE modules: %s" % str(e))
|
||||
|
||||
class InheritNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -334,59 +277,9 @@ class InheritNode(AstNode):
|
||||
def eval(self, data):
|
||||
bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data)
|
||||
|
||||
class InheritDeferredNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.inherit = (classes, filename, lineno)
|
||||
|
||||
def eval(self, data):
|
||||
inherits = data.getVar('__BBDEFINHERITS', False) or []
|
||||
inherits.append(self.inherit)
|
||||
data.setVar('__BBDEFINHERITS', inherits)
|
||||
|
||||
class AddFragmentsNode(AstNode):
|
||||
def __init__(self, filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.fragments_path_prefix = fragments_path_prefix
|
||||
self.fragments_variable = fragments_variable
|
||||
self.flagged_variables_list_variable = flagged_variables_list_variable
|
||||
|
||||
def eval(self, data):
|
||||
# No need to use mark_dependency since we would only match a fragment
|
||||
# from a specific layer and there can only be a single layer with a
|
||||
# given namespace.
|
||||
def find_fragment(layers, layerid, full_fragment_name):
|
||||
for layerpath in layers.split():
|
||||
candidate_fragment_path = os.path.join(layerpath, full_fragment_name)
|
||||
if os.path.exists(candidate_fragment_path) and bb.utils.get_file_layer(candidate_fragment_path, data) == layerid:
|
||||
return candidate_fragment_path
|
||||
return None
|
||||
|
||||
fragments = data.getVar(self.fragments_variable)
|
||||
layers = data.getVar('BBLAYERS')
|
||||
flagged_variables = data.getVar(self.flagged_variables_list_variable).split()
|
||||
|
||||
if not fragments:
|
||||
return
|
||||
for f in fragments.split():
|
||||
layerid, fragment_name = f.split('/', 1)
|
||||
full_fragment_name = data.expand("{}/{}.conf".format(self.fragments_path_prefix, fragment_name))
|
||||
fragment_path = find_fragment(layers, layerid, full_fragment_name)
|
||||
if fragment_path:
|
||||
bb.parse.ConfHandler.include(self.filename, fragment_path, self.lineno, data, "include fragment")
|
||||
for flagged_var in flagged_variables:
|
||||
val = data.getVar(flagged_var)
|
||||
data.setVarFlag(flagged_var, f, val)
|
||||
data.setVar(flagged_var, None)
|
||||
else:
|
||||
bb.error("Could not find fragment {} in enabled layers: {}".format(f, layers))
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
def handleIncludeAll(statements, filename, lineno, m):
|
||||
statements.append(IncludeAllNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleExport(statements, filename, lineno, m):
|
||||
statements.append(ExportNode(filename, lineno, m.group(1)))
|
||||
|
||||
@@ -408,62 +301,35 @@ def handlePythonMethod(statements, filename, lineno, funcname, modulename, body)
|
||||
def handleExportFuncs(statements, filename, lineno, m, classname):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
|
||||
|
||||
def handleAddTask(statements, filename, lineno, tasks, before, after):
|
||||
statements.append(AddTaskNode(filename, lineno, tasks, before, after))
|
||||
def handleAddTask(statements, filename, lineno, m):
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
|
||||
def handleDelTask(statements, filename, lineno, tasks):
|
||||
statements.append(DelTaskNode(filename, lineno, tasks))
|
||||
statements.append(AddTaskNode(filename, lineno, func, before, after))
|
||||
|
||||
def handleDelTask(statements, filename, lineno, m):
|
||||
func = m.group(1)
|
||||
if func is None:
|
||||
return
|
||||
|
||||
statements.append(DelTaskNode(filename, lineno, func))
|
||||
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handlePyLib(statements, filename, lineno, m):
|
||||
statements.append(PyLibNode(filename, lineno, m.group(1), m.group(2)))
|
||||
|
||||
def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def handleInheritDeferred(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritDeferredNode(filename, lineno, classes))
|
||||
|
||||
def handleAddFragments(statements, filename, lineno, m):
|
||||
fragments_path_prefix = m.group(1)
|
||||
fragments_variable = m.group(2)
|
||||
flagged_variables_list_variable = m.group(3)
|
||||
statements.append(AddFragmentsNode(filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
|
||||
# Handle recipe level PREFERRED_PROVIDERs
|
||||
def handleVirtRecipeProviders(tasklist, d):
|
||||
depends = (d.getVar("DEPENDS") or "").split()
|
||||
virtprovs = (d.getVar("BB_RECIPE_VIRTUAL_PROVIDERS") or "").split()
|
||||
newdeps = []
|
||||
for dep in depends:
|
||||
if dep in virtprovs:
|
||||
newdep = d.getVar("PREFERRED_PROVIDER_" + dep)
|
||||
if not newdep:
|
||||
bb.fatal("Error, recipe virtual provider PREFERRED_PROVIDER_%s not set" % dep)
|
||||
newdeps.append(newdep)
|
||||
else:
|
||||
newdeps.append(dep)
|
||||
d.setVar("DEPENDS", " ".join(newdeps))
|
||||
for task in tasklist:
|
||||
taskdeps = (d.getVarFlag(task, "depends") or "").split()
|
||||
remapped = []
|
||||
for entry in taskdeps:
|
||||
r, t = entry.split(":")
|
||||
if r in virtprovs:
|
||||
r = d.getVar("PREFERRED_PROVIDER_" + r)
|
||||
remapped.append("%s:%s" % (r, t))
|
||||
d.setVarFlag(task, "depends", " ".join(remapped))
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
@@ -489,16 +355,12 @@ def finalize(fn, d, variant = None):
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
handleVirtRecipeProviders(tasklist, d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
if d.getVar('__BBAUTOREV_SEEN') and d.getVar('__BBSRCREV_SEEN') and not d.getVar("__BBAUTOREV_ACTED_UPON"):
|
||||
bb.fatal("AUTOREV/SRCPV set too late for the fetcher to work properly, please set the variables earlier in parsing. Erroring instead of later obtuse build failures.")
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
finally:
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
@@ -525,14 +387,6 @@ def multi_finalize(fn, d):
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
while True:
|
||||
inherits = d.getVar('__BBDEFINHERITS', False) or []
|
||||
if not inherits:
|
||||
break
|
||||
inherit, filename, lineno = inherits.pop(0)
|
||||
d.setVar('__BBDEFINHERITS', inherits)
|
||||
bb.parse.BBHandler.inherit(inherit, filename, lineno, d, deferred=True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
safe_d = d
|
||||
|
||||
@@ -21,10 +21,9 @@ from .ConfHandler import include, init
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
|
||||
@@ -34,7 +33,6 @@ __infunc__ = []
|
||||
__inpython__ = False
|
||||
__body__ = []
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
@@ -42,46 +40,31 @@ def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
|
||||
def inherit(files, fn, lineno, d, deferred=False):
|
||||
def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
#if "${" in files and not deferred:
|
||||
# bb.warn("%s:%s has non deferred conditional inherit" % (fn, lineno))
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
classtype = d.getVar("__bbclasstype", False)
|
||||
origfile = file
|
||||
for t in ["classes-" + classtype, "classes"]:
|
||||
file = origfile
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
file = os.path.join(t, '%s.bbclass' % file)
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
bb.parse.mark_dependency(d, af)
|
||||
if abs_fn:
|
||||
file = abs_fn
|
||||
|
||||
if os.path.exists(file):
|
||||
break
|
||||
|
||||
if not os.path.exists(file):
|
||||
raise ParseError("Could not inherit file %s" % (file), fn, lineno)
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
bb.parse.mark_dependency(d, af)
|
||||
if abs_fn:
|
||||
file = abs_fn
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.debug("Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
__inherit_cache.append( file )
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
try:
|
||||
bb.parse.handle(file, d, True)
|
||||
except (IOError, OSError) as exc:
|
||||
raise ParseError("Could not inherit file %s: %s" % (fn, exc.strerror), fn, lineno)
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements, __residue__, __body__
|
||||
global cached_statements
|
||||
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
@@ -101,17 +84,12 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
|
||||
if __residue__:
|
||||
raise ParseError("Unparsed lines %s: %s" % (filename, str(__residue__)), filename, lineno)
|
||||
if __body__:
|
||||
raise ParseError("Unparsed lines from unclosed function %s: %s" % (filename, str(__body__)), filename, lineno)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
return statements
|
||||
|
||||
def handle(fn, d, include, baseconfig=False):
|
||||
global __infunc__, __body__, __residue__, __classname__
|
||||
def handle(fn, d, include):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
|
||||
__body__ = []
|
||||
__infunc__ = []
|
||||
__classname__ = ""
|
||||
@@ -163,7 +141,7 @@ def handle(fn, d, include, baseconfig=False):
|
||||
return d
|
||||
|
||||
def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
global __inpython__, __infunc__, __body__, __residue__, __classname__
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
|
||||
|
||||
# Check tabs in python functions:
|
||||
# - def py_funcname(): covered by __inpython__
|
||||
@@ -200,10 +178,10 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
if s and s[0] == '#':
|
||||
if len(__residue__) != 0 and __residue__[0][0] != "#":
|
||||
bb.fatal("There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it." % (lineno, fn, s))
|
||||
bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
|
||||
|
||||
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
|
||||
bb.fatal("There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (lineno - len(__residue__), fn, "\n".join(__residue__)))
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
|
||||
if s and s[-1] == '\\':
|
||||
__residue__.append(s[:-1])
|
||||
@@ -239,38 +217,29 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
m = __addtask_regexp__.match(s)
|
||||
if m:
|
||||
after = ""
|
||||
before = ""
|
||||
if len(m.group().split()) == 2:
|
||||
# Check and warn for "addtask task1 task2"
|
||||
m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
|
||||
if m2 and m2.group('ignores'):
|
||||
logger.warning('addtask ignored: "%s"' % m2.group('ignores'))
|
||||
|
||||
# This code splits on 'before' and 'after' instead of on whitespace so we can defer
|
||||
# evaluation to as late as possible.
|
||||
tasks = m.group(1).split(" before ")[0].split(" after ")[0]
|
||||
|
||||
for exp in m.group(1).split(" before "):
|
||||
exp2 = exp.split(" after ")
|
||||
if len(exp2) > 1:
|
||||
after = after + " ".join(exp2[1:])
|
||||
|
||||
for exp in m.group(1).split(" after "):
|
||||
exp2 = exp.split(" before ")
|
||||
if len(exp2) > 1:
|
||||
before = before + " ".join(exp2[1:])
|
||||
|
||||
# Check and warn for having task with a keyword as part of task name
|
||||
# Check and warn for "addtask task1 before task2 before task3", the
|
||||
# similar to "after"
|
||||
taskexpression = s.split()
|
||||
for word in ('before', 'after'):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
|
||||
if tasks is not None:
|
||||
ast.handleAddTask(statements, fn, lineno, tasks, before, after)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __deltask_regexp__.match(s)
|
||||
if m:
|
||||
task = m.group(1)
|
||||
if task is not None:
|
||||
ast.handleDelTask(statements, fn, lineno, task)
|
||||
ast.handleDelTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addhandler_regexp__.match(s)
|
||||
@@ -283,12 +252,7 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
ast.handleInherit(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __inherit_def_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInheritDeferred(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
return ConfHandler.feeder(lineno, s, fn, statements, conffile=False)
|
||||
return ConfHandler.feeder(lineno, s, fn, statements)
|
||||
|
||||
# Add us to the handlers list
|
||||
from .. import handlers
|
||||
|
||||
@@ -21,7 +21,7 @@ __config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@/]*)\])?
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
(?P<colon>:=) |
|
||||
@@ -43,12 +43,9 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__includeall_regexp__ = re.compile( r"include_all\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]+)\]$" )
|
||||
__addpylib_regexp__ = re.compile(r"addpylib\s+(.+)\s+(.+)" )
|
||||
__addfragments_regexp__ = re.compile(r"addfragments\s+(.+)\s+(.+)\s+(.+)" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
|
||||
def init(data):
|
||||
return
|
||||
@@ -105,12 +102,12 @@ def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
# We have an issue where a UI might want to enforce particular settings such as
|
||||
# an empty DISTRO variable. If configuration files do something like assigning
|
||||
# a weak default, it turns out to be very difficult to filter out these changes,
|
||||
# particularly when the weak default might appear half way though parsing a chain
|
||||
# particularly when the weak default might appear half way though parsing a chain
|
||||
# of configuration files. We therefore let the UIs hook into configuration file
|
||||
# parsing. This turns out to be a hard problem to solve any other way.
|
||||
confFilters = []
|
||||
|
||||
def handle(fn, data, include, baseconfig=False):
|
||||
def handle(fn, data, include):
|
||||
init(data)
|
||||
|
||||
if include == 0:
|
||||
@@ -128,26 +125,21 @@ def handle(fn, data, include, baseconfig=False):
|
||||
s = f.readline()
|
||||
if not s:
|
||||
break
|
||||
origlineno = lineno
|
||||
origline = s
|
||||
w = s.strip()
|
||||
# skip empty lines
|
||||
if not w:
|
||||
continue
|
||||
s = s.rstrip()
|
||||
while s[-1] == '\\':
|
||||
line = f.readline()
|
||||
origline += line
|
||||
s2 = line.rstrip()
|
||||
s2 = f.readline().rstrip()
|
||||
lineno = lineno + 1
|
||||
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
|
||||
bb.fatal("There is a confusing multiline, partially commented expression starting on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (origlineno, fn, origline))
|
||||
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
s = s[:-1] + s2
|
||||
# skip comments
|
||||
if s[0] == '#':
|
||||
continue
|
||||
feeder(lineno, s, abs_fn, statements, baseconfig=baseconfig)
|
||||
feeder(lineno, s, abs_fn, statements)
|
||||
|
||||
# DONE WITH PARSING... time to evaluate
|
||||
data.setVar('FILE', abs_fn)
|
||||
@@ -155,14 +147,14 @@ def handle(fn, data, include, baseconfig=False):
|
||||
if oldfile:
|
||||
data.setVar('FILE', oldfile)
|
||||
|
||||
f.close()
|
||||
|
||||
for f in confFilters:
|
||||
f(fn, data)
|
||||
|
||||
return data
|
||||
|
||||
# baseconfig is set for the bblayers/layer.conf cookerdata config parsing
|
||||
# The function is also used by BBHandler, conffile would be False
|
||||
def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
def feeder(lineno, s, fn, statements):
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
@@ -179,11 +171,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handleInclude(statements, fn, lineno, m, True)
|
||||
return
|
||||
|
||||
m = __includeall_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleIncludeAll(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __export_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleExport(statements, fn, lineno, m)
|
||||
@@ -199,16 +186,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handleUnsetFlag(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addpylib_regexp__.match(s)
|
||||
if baseconfig and conffile and m:
|
||||
ast.handlePyLib(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addfragments_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleAddFragments(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
raise ParseError("unparsed line: '%s'" % s, fn, lineno);
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
252
bitbake/lib/bb/persist_data.py
Normal file
252
bitbake/lib/bb/persist_data.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""BitBake Persistent Data Store
|
||||
|
||||
Used to store data in a central location such that other threads/tasks can
|
||||
access them at some future date. Acts as a convenience wrapper around sqlite,
|
||||
currently, providing a key/value store accessed by 'domain'.
|
||||
"""
|
||||
|
||||
# Copyright (C) 2007 Richard Purdie
|
||||
# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import sqlite3
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
|
||||
@functools.total_ordering
|
||||
class SQLTable(collections.abc.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
"""
|
||||
Decorator that restarts a function if a database locked sqlite
|
||||
exception occurs. If reconnect is True, the database connection
|
||||
will be closed and reopened each time a failure occurs
|
||||
"""
|
||||
def retry_wrapper(f):
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Reconnect if necessary
|
||||
if self.connection is None and reconnect:
|
||||
self.reconnect()
|
||||
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if count < 500 and ('is locked' in str(exc) or 'locking protocol' in str(exc)):
|
||||
count = count + 1
|
||||
if reconnect:
|
||||
self.reconnect()
|
||||
continue
|
||||
raise
|
||||
return wrap_func
|
||||
return retry_wrapper
|
||||
|
||||
@staticmethod
|
||||
def transaction(f):
|
||||
"""
|
||||
Decorator that starts a database transaction and creates a database
|
||||
cursor for performing queries. If no exception is thrown, the
|
||||
database results are committed. If an exception occurs, the database
|
||||
is rolled back. In all cases, the cursor is closed after the
|
||||
function ends.
|
||||
|
||||
Note that the cursor is passed as an extra argument to the function
|
||||
after `self` and before any of the normal arguments
|
||||
"""
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Context manager will COMMIT the database on success,
|
||||
# or ROLLBACK on an exception
|
||||
with self.connection:
|
||||
# Automatically close the cursor when done
|
||||
with contextlib.closing(self.connection.cursor()) as cursor:
|
||||
return f(self, cursor, *args, **kwargs)
|
||||
return wrap_func
|
||||
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cachefile, table):
|
||||
self.cachefile = cachefile
|
||||
self.table = table
|
||||
|
||||
self.connection = None
|
||||
self._execute_single("CREATE TABLE IF NOT EXISTS %s(key TEXT PRIMARY KEY NOT NULL, value TEXT);" % table)
|
||||
|
||||
@_Decorators.retry(reconnect=False)
|
||||
@_Decorators.transaction
|
||||
def _setup_database(self, cursor):
|
||||
cursor.execute("pragma synchronous = off;")
|
||||
# Enable WAL and keep the autocheckpoint length small (the default is
|
||||
# usually 1000). Persistent caches are usually read-mostly, so keeping
|
||||
# this short will keep readers running quickly
|
||||
cursor.execute("pragma journal_mode = WAL;")
|
||||
cursor.execute("pragma wal_autocheckpoint = 100;")
|
||||
|
||||
def reconnect(self):
|
||||
if self.connection is not None:
|
||||
self.connection.close()
|
||||
self.connection = sqlite3.connect(self.cachefile, timeout=5)
|
||||
self.connection.text_factory = str
|
||||
self._setup_database()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def _execute_single(self, cursor, *query):
|
||||
"""
|
||||
Executes a single query and discards the results. This correctly closes
|
||||
the database cursor when finished
|
||||
"""
|
||||
cursor.execute(*query)
|
||||
|
||||
@_Decorators.retry()
|
||||
def _row_iter(self, f, *query):
|
||||
"""
|
||||
Helper function that returns a row iterator. Each time __next__ is
|
||||
called on the iterator, the provided function is evaluated to determine
|
||||
the return value
|
||||
"""
|
||||
class CursorIter(object):
|
||||
def __init__(self, cursor):
|
||||
self.cursor = cursor
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
row = self.cursor.fetchone()
|
||||
if row is None:
|
||||
self.cursor.close()
|
||||
raise StopIteration
|
||||
return f(row)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, traceback):
|
||||
self.cursor.close()
|
||||
return False
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(*query)
|
||||
return CursorIter(cursor)
|
||||
except:
|
||||
cursor.close()
|
||||
|
||||
def __enter__(self):
|
||||
self.connection.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __getitem__(self, cursor, key):
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[1]
|
||||
raise KeyError(key)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __delitem__(self, cursor, key):
|
||||
if key not in self:
|
||||
raise KeyError(key)
|
||||
cursor.execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __setitem__(self, cursor, key, value):
|
||||
if not isinstance(key, str):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, str):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
# Ensure the entire transaction (including SELECT) executes under write lock
|
||||
cursor.execute("BEGIN EXCLUSIVE")
|
||||
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
cursor.execute("UPDATE %s SET value=? WHERE key=?;" % self.table, [value, key])
|
||||
else:
|
||||
cursor.execute("INSERT into %s(key, value) values (?, ?);" % self.table, [key, value])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __contains__(self, cursor, key):
|
||||
cursor.execute('SELECT * from %s where key=?;' % self.table, [key])
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __len__(self, cursor):
|
||||
cursor.execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
|
||||
def __iter__(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT key from %s;" % self.table)
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplemented
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
def get_by_pattern(self, pattern):
|
||||
return self._row_iter(lambda row: row[1], "SELECT * FROM %s WHERE key LIKE ?;" %
|
||||
self.table, [pattern])
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
|
||||
def itervalues(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT value FROM %s;" %
|
||||
self.table)
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def iteritems(self):
|
||||
return self._row_iter(lambda row: (row[0], row[1]), "SELECT * FROM %s;" %
|
||||
self.table)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def clear(self, cursor):
|
||||
cursor.execute("DELETE FROM %s;" % self.table)
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
return SQLTable(cachefile, domain)
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -396,8 +396,8 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
return rproviders
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pat_key in dataCache.packages_dynamic:
|
||||
pattern = pat_key.replace(r'+', r"\+")
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
pattern = pattern.replace(r'+', r"\+")
|
||||
if pattern in regexp_cache:
|
||||
regexp = regexp_cache[pattern]
|
||||
else:
|
||||
@@ -408,7 +408,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
raise
|
||||
regexp_cache[pattern] = regexp
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pat_key]
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,7 @@ import os
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import errno
|
||||
@@ -28,7 +29,6 @@ import datetime
|
||||
import pickle
|
||||
import traceback
|
||||
import gc
|
||||
import stat
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
@@ -38,46 +38,9 @@ logger = logging.getLogger('BitBake')
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
|
||||
def currenttime():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def serverlog(msg):
|
||||
print(str(os.getpid()) + " " + currenttime() + " " + msg)
|
||||
#Seems a flush here triggers filesytem sync like behaviour and long hangs in the server
|
||||
#sys.stdout.flush()
|
||||
|
||||
#
|
||||
# When we have lockfile issues, try and find infomation about which process is
|
||||
# using the lockfile
|
||||
#
|
||||
def get_lockfile_process_msg(lockfile):
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
pass
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
pass
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs:
|
||||
return procs.decode("utf-8")
|
||||
return None
|
||||
|
||||
class idleFinish():
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
print(str(os.getpid()) + " " + datetime.datetime.now().strftime('%H:%M:%S.%f') + " " + msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
class ProcessServer():
|
||||
profile_filename = "profile.log"
|
||||
@@ -96,19 +59,12 @@ class ProcessServer():
|
||||
self.maxuiwait = 30
|
||||
self.xmlrpc = False
|
||||
|
||||
self.idle = None
|
||||
# Need a lock for _idlefuns changes
|
||||
self._idlefuns = {}
|
||||
self._idlefuncsLock = threading.Lock()
|
||||
self.idle_cond = threading.Condition(self._idlefuncsLock)
|
||||
|
||||
self.bitbake_lock = lock
|
||||
self.bitbake_lock_name = lockname
|
||||
self.sock = sock
|
||||
self.sockname = sockname
|
||||
# It is possible the directory may be renamed. Cache the inode of the socket file
|
||||
# so we can tell if things changed.
|
||||
self.sockinode = os.stat(self.sockname)[stat.ST_INO]
|
||||
|
||||
self.server_timeout = server_timeout
|
||||
self.timeout = self.server_timeout
|
||||
@@ -117,9 +73,7 @@ class ProcessServer():
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
self._idlefuns[function] = data
|
||||
serverlog("Registering idle function %s" % str(function))
|
||||
self._idlefuns[function] = data
|
||||
|
||||
def run(self):
|
||||
|
||||
@@ -158,31 +112,6 @@ class ProcessServer():
|
||||
|
||||
return ret
|
||||
|
||||
def _idle_check(self):
|
||||
return len(self._idlefuns) == 0 and self.cooker.command.currentAsyncCommand is None
|
||||
|
||||
def wait_for_idle(self, timeout=30):
|
||||
# Wait for the idle loop to have cleared
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
return self.idle_cond.wait_for(self._idle_check, timeout) is not False
|
||||
|
||||
def set_async_cmd(self, cmd):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
ret = self.idle_cond.wait_for(self._idle_check, 30)
|
||||
if ret is False:
|
||||
return False
|
||||
self.cooker.command.currentAsyncCommand = cmd
|
||||
return True
|
||||
|
||||
def clear_async_cmd(self):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
self.cooker.command.currentAsyncCommand = None
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
def get_async_cmd(self):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
return self.cooker.command.currentAsyncCommand
|
||||
|
||||
def main(self):
|
||||
self.cooker.pre_serve()
|
||||
|
||||
@@ -197,19 +126,14 @@ class ProcessServer():
|
||||
fds.append(self.xmlrpc)
|
||||
seendata = False
|
||||
serverlog("Entering server connection loop")
|
||||
serverlog("Lockfile is: %s\nSocket is %s (%s)" % (self.bitbake_lock_name, self.sockname, os.path.exists(self.sockname)))
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
serverlog("Disconnecting Client (socket: %s)" % os.path.exists(self.sockname))
|
||||
serverlog("Disconnecting Client")
|
||||
if self.controllersock:
|
||||
fds.remove(self.controllersock)
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
if self.haveui:
|
||||
# Wait for the idle loop to have cleared (30s max)
|
||||
if not self.wait_for_idle(30):
|
||||
serverlog("Idle loop didn't finish queued commands after 30s, exiting.")
|
||||
self.quit = True
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
@@ -221,7 +145,7 @@ class ProcessServer():
|
||||
self.cooker.clientComplete()
|
||||
self.haveui = False
|
||||
ready = select.select(fds,[],[],0)[0]
|
||||
if newconnections and not self.quit:
|
||||
if newconnections:
|
||||
serverlog("Starting new client")
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
@@ -293,10 +217,8 @@ class ProcessServer():
|
||||
continue
|
||||
try:
|
||||
serverlog("Running command %s" % command)
|
||||
reply = self.cooker.command.runCommand(command, self)
|
||||
serverlog("Sending reply %s" % repr(reply))
|
||||
self.command_channel_reply.send(reply)
|
||||
serverlog("Command Completed (socket: %s)" % os.path.exists(self.sockname))
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
serverlog("Command Completed")
|
||||
except Exception as e:
|
||||
stack = traceback.format_exc()
|
||||
serverlog('Exception in server main event loop running command %s (%s)' % (command, stack))
|
||||
@@ -321,42 +243,18 @@ class ProcessServer():
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
seendata = True
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
nextsleep = 1.0
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
ready = select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
ready = []
|
||||
|
||||
if self.idle:
|
||||
self.idle.join()
|
||||
|
||||
serverlog("Exiting (socket: %s)" % os.path.exists(self.sockname))
|
||||
serverlog("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
# The build directory could have been renamed so if the file isn't the one we created
|
||||
# we shouldn't delete it.
|
||||
try:
|
||||
sockinode = os.stat(self.sockname)[stat.ST_INO]
|
||||
if sockinode == self.sockinode:
|
||||
os.unlink(self.sockname)
|
||||
else:
|
||||
serverlog("bitbake.sock inode mismatch (%s vs %s), not deleting." % (sockinode, self.sockinode))
|
||||
except Exception as err:
|
||||
serverlog("Removing socket file '%s' failed (%s)" % (self.sockname, err))
|
||||
os.unlink(self.sockname)
|
||||
except:
|
||||
pass
|
||||
self.sock.close()
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True, idle=False)
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
@@ -382,21 +280,20 @@ class ProcessServer():
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
lockcontents = get_lock_contents(lockfile)
|
||||
serverlog("Original lockfile contents: " + str(lockcontents))
|
||||
|
||||
lock.close()
|
||||
lock = None
|
||||
|
||||
while not lock:
|
||||
i = 0
|
||||
lock = None
|
||||
if not os.path.exists(os.path.basename(lockfile)):
|
||||
serverlog("Lockfile directory gone, exiting.")
|
||||
return
|
||||
|
||||
while not lock and i < 30:
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False)
|
||||
if not lock:
|
||||
newlockcontents = get_lock_contents(lockfile)
|
||||
if not newlockcontents[0].startswith([f"{os.getpid()}\n", f"{os.getpid()} "]):
|
||||
if newlockcontents != lockcontents:
|
||||
# A new server was started, the lockfile contents changed, we can exit
|
||||
serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents))
|
||||
return
|
||||
@@ -410,95 +307,92 @@ class ProcessServer():
|
||||
return
|
||||
|
||||
if not lock:
|
||||
procs = get_lockfile_process_msg(lockfile)
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
continue
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
continue
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
msg = ["Delaying shutdown due to active processes which appear to be holding bitbake.lock"]
|
||||
if procs:
|
||||
msg.append(":\n%s" % procs)
|
||||
msg.append(":\n%s" % str(procs.decode("utf-8")))
|
||||
serverlog("".join(msg))
|
||||
|
||||
def idle_thread(self):
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.idle_thread_internal)
|
||||
|
||||
prof.dump_stats("profile-mainloop.log")
|
||||
bb.utils.process_profilelog("profile-mainloop.log")
|
||||
serverlog("Raw profiling information saved to profile-mainloop.log and processed statistics to profile-mainloop.log.processed")
|
||||
else:
|
||||
self.idle_thread_internal()
|
||||
|
||||
def idle_thread_internal(self):
|
||||
def remove_idle_func(function):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
del self._idlefuns[function]
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
while not self.quit:
|
||||
nextsleep = 1.0
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
if not fds:
|
||||
fds = []
|
||||
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
items = list(self._idlefuns.items())
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
nextsleep = None
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif isinstance(retval, float) and nextsleep:
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running idle function')
|
||||
del self._idlefuns[function]
|
||||
self.quit = True
|
||||
|
||||
for function, data in items:
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if isinstance(retval, idleFinish):
|
||||
serverlog("Removing idle function %s at idleFinish" % str(function))
|
||||
remove_idle_func(function)
|
||||
self.cooker.command.finishAsyncCommand(retval.msg)
|
||||
nextsleep = None
|
||||
elif retval is False:
|
||||
serverlog("Removing idle function %s" % str(function))
|
||||
remove_idle_func(function)
|
||||
nextsleep = None
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif isinstance(retval, float) and nextsleep:
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running idle function')
|
||||
remove_idle_func(function)
|
||||
serverlog("Exception %s broke the idle_thread, exiting" % traceback.format_exc())
|
||||
logger.exception('Running heartbeat function')
|
||||
self.quit = True
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if items and bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running heartbeat function')
|
||||
serverlog("Exception %s broke in idle_thread, exiting" % traceback.format_exc())
|
||||
self.quit = True
|
||||
if nextsleep and bb.event._heartbeat_enabled and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
if nextsleep is not None:
|
||||
select.select(fds,[],[],nextsleep)[0]
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
@@ -506,18 +400,12 @@ class ServerCommunicator():
|
||||
self.recv = recv
|
||||
|
||||
def runCommand(self, command):
|
||||
try:
|
||||
self.connection.send(command)
|
||||
except BrokenPipeError as e:
|
||||
raise BrokenPipeError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
logger.info("No reply from server in 30s (for command %s at %s)" % (command[0], currenttime()))
|
||||
logger.info("No reply from server in 30s")
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s at %s)" % currenttime())
|
||||
try:
|
||||
ret, exc = self.recv.get()
|
||||
except EOFError as e:
|
||||
raise EOFError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)")
|
||||
ret, exc = self.recv.get()
|
||||
# Should probably turn all exceptions in exc back into exceptions?
|
||||
# For now, at least handle BBHandledException
|
||||
if exc and ("BBHandledException" in exc or "SystemExit" in exc):
|
||||
@@ -550,7 +438,6 @@ class BitBakeProcessServerConnection(object):
|
||||
self.socket_connection = sock
|
||||
|
||||
def terminate(self):
|
||||
self.events.close()
|
||||
self.socket_connection.close()
|
||||
self.connection.connection.close()
|
||||
self.connection.recv.close()
|
||||
@@ -561,14 +448,13 @@ start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
|
||||
|
||||
class BitBakeServer(object):
|
||||
|
||||
def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface, profile):
|
||||
def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface):
|
||||
|
||||
self.server_timeout = server_timeout
|
||||
self.xmlrpcinterface = xmlrpcinterface
|
||||
self.featureset = featureset
|
||||
self.sockname = sockname
|
||||
self.bitbake_lock = lock
|
||||
self.profile = profile
|
||||
self.readypipe, self.readypipein = os.pipe()
|
||||
|
||||
# Place the log in the builddirectory alongside the lock file
|
||||
@@ -632,9 +518,9 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile):
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface):
|
||||
|
||||
import bb.cookerdata
|
||||
import bb.cooker
|
||||
@@ -646,7 +532,6 @@ def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpc
|
||||
|
||||
# Create server control socket
|
||||
if os.path.exists(sockname):
|
||||
serverlog("WARNING: removing existing socket file '%s'" % sockname)
|
||||
os.unlink(sockname)
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
@@ -663,8 +548,7 @@ def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpc
|
||||
writer = ConnectionWriter(readypipeinfd)
|
||||
try:
|
||||
featureset = []
|
||||
cooker = bb.cooker.BBCooker(featureset, server)
|
||||
cooker.configuration.profile = profile
|
||||
cooker = bb.cooker.BBCooker(featureset, server.register_idle_function)
|
||||
except bb.BBHandledException:
|
||||
return None
|
||||
writer.send("r")
|
||||
@@ -779,18 +663,23 @@ class BBUIEventQueue:
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.daemon = True
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
if len(self.eventQueue) == 0:
|
||||
return None
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
@@ -798,9 +687,10 @@ class BBUIEventQueue:
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
@@ -809,17 +699,13 @@ class BBUIEventQueue:
|
||||
bb.utils.set_process_name("UIEventQueue")
|
||||
while True:
|
||||
try:
|
||||
ready = self.reader.wait(0.25)
|
||||
if ready:
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except (EOFError, OSError, TypeError):
|
||||
self.reader.wait()
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except EOFError:
|
||||
# Easiest way to exit is to close the file descriptor to cause an exit
|
||||
break
|
||||
|
||||
def close(self):
|
||||
self.reader.close()
|
||||
self.t.join()
|
||||
|
||||
class ConnectionReader(object):
|
||||
|
||||
@@ -834,7 +720,7 @@ class ConnectionReader(object):
|
||||
return self.reader.poll(timeout)
|
||||
|
||||
def get(self):
|
||||
with bb.utils.lock_timeout(self.rlock):
|
||||
with self.rlock:
|
||||
res = self.reader.recv_bytes()
|
||||
return multiprocessing.reduction.ForkingPickler.loads(res)
|
||||
|
||||
@@ -855,7 +741,7 @@ class ConnectionWriter(object):
|
||||
|
||||
def _send(self, obj):
|
||||
gc.disable()
|
||||
with bb.utils.lock_timeout(self.wlock):
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
gc.enable()
|
||||
|
||||
@@ -868,13 +754,11 @@ class ConnectionWriter(object):
|
||||
# pthread_sigmask block/unblock would be nice but doesn't work, https://bugs.python.org/issue47139
|
||||
process = multiprocessing.current_process()
|
||||
if process and hasattr(process, "queue_signals"):
|
||||
with bb.utils.lock_timeout(process.signal_threadlock):
|
||||
with process.signal_threadlock:
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
|
||||
while len(process.signal_received) > 0:
|
||||
sig = process.signal_received.pop()
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
else:
|
||||
self._send(obj)
|
||||
|
||||
@@ -14,8 +14,6 @@ from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
import bb
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
@@ -56,7 +54,7 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if interface[1] == -1:
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
@@ -89,12 +87,11 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands:
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
self.event_handle = None
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
@@ -103,8 +100,8 @@ class BitBakeXMLRPCServerCommands:
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if self.server.cooker.state in [bb.cooker.State.PARSING, bb.cooker.State.RUNNING]:
|
||||
return None, f"Cooker is busy: {self.server.cooker.state.name}"
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
@@ -121,7 +118,7 @@ class BitBakeXMLRPCServerCommands:
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.server.cooker.command.runCommand(command, self.server.parent, self.server.readonly)
|
||||
return self.server.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -14,8 +12,6 @@ import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
import json
|
||||
import types
|
||||
from contextlib import contextmanager
|
||||
import bb.compress.zstd
|
||||
from bb.checksum import FileChecksumCache
|
||||
from bb import runqueue
|
||||
@@ -25,33 +21,15 @@ import hashserv.client
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv')
|
||||
|
||||
#find_siginfo and find_siginfo_version are set by the metadata siggen
|
||||
# The minimum version of the find_siginfo function we need
|
||||
find_siginfo_minversion = 2
|
||||
|
||||
HASHSERV_ENVVARS = [
|
||||
"SSL_CERT_DIR",
|
||||
"SSL_CERT_FILE",
|
||||
"NO_PROXY",
|
||||
"HTTPS_PROXY",
|
||||
"HTTP_PROXY"
|
||||
]
|
||||
|
||||
def check_siggen_version(siggen):
|
||||
if not hasattr(siggen, "find_siginfo_version"):
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (no version found)")
|
||||
if siggen.find_siginfo_version < siggen.find_siginfo_minversion:
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (%s vs %s)" % (siggen.find_siginfo_version, siggen.find_siginfo_minversion))
|
||||
|
||||
class SetEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, set) or isinstance(obj, frozenset):
|
||||
if isinstance(obj, set):
|
||||
return dict(_set_object=list(sorted(obj)))
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
def SetDecoder(dct):
|
||||
if '_set_object' in dct:
|
||||
return frozenset(dct['_set_object'])
|
||||
return set(dct['_set_object'])
|
||||
return dct
|
||||
|
||||
def init(d):
|
||||
@@ -62,6 +40,7 @@ def init(d):
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
break
|
||||
else:
|
||||
logger.error("Invalid signature generator '%s', using default 'noop'\n"
|
||||
"Available generators: %s", desired,
|
||||
@@ -73,6 +52,11 @@ class SignatureGenerator(object):
|
||||
"""
|
||||
name = "noop"
|
||||
|
||||
# If the derived class supports multiconfig datacaches, set this to True
|
||||
# The default is False for backward compatibility with derived signature
|
||||
# generators that do not understand multiconfig caches
|
||||
supports_multiconfig_datacaches = False
|
||||
|
||||
def __init__(self, data):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
@@ -90,39 +74,9 @@ class SignatureGenerator(object):
|
||||
def postparsing_clean_cache(self):
|
||||
return
|
||||
|
||||
def setup_datacache(self, datacaches):
|
||||
self.datacaches = datacaches
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
# In task context we have no cache so setup internal data structures
|
||||
# from the fully parsed data store provided
|
||||
|
||||
mc = d.getVar("__BBMULTICONFIG", False) or ""
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
|
||||
self.datacaches = {}
|
||||
self.datacaches[mc] = types.SimpleNamespace()
|
||||
setattr(self.datacaches[mc], "stamp", {})
|
||||
self.datacaches[mc].stamp[mcfn] = d.getVar('STAMP')
|
||||
setattr(self.datacaches[mc], "stamp_extrainfo", {})
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn] = {}
|
||||
for t in tasks:
|
||||
flag = d.getVarFlag(t, "stamp-extra-info")
|
||||
if flag:
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn][t] = flag
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
return None
|
||||
|
||||
def get_unihash(self, tid):
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
return unihash
|
||||
return self.taskhash[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
return {tid: self.get_unihash(tid) for tid in tids}
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
return
|
||||
|
||||
@@ -134,51 +88,17 @@ class SignatureGenerator(object):
|
||||
"""Write/update the file checksum cache onto disk"""
|
||||
return
|
||||
|
||||
def stampfile_base(self, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
return self.datacaches[mc].stamp[mcfn]
|
||||
|
||||
def stampfile_mcfn(self, taskname, mcfn, extrainfo=True):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
if not stamp:
|
||||
return
|
||||
|
||||
stamp_extrainfo = ""
|
||||
if extrainfo:
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene"):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or ""
|
||||
|
||||
return self.stampfile(stamp, mcfn, taskname, stamp_extrainfo)
|
||||
|
||||
def stampfile(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
def stampcleanmask_mcfn(self, taskname, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
if not stamp:
|
||||
return []
|
||||
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene"):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or ""
|
||||
|
||||
return self.stampcleanmask(stamp, mcfn, taskname, stamp_extrainfo)
|
||||
|
||||
def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
return
|
||||
|
||||
def invalidate_task(self, task, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
bb.utils.remove(stamp)
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.build.del_stamp(task, d, fn)
|
||||
|
||||
def dump_sigs(self, dataCache, options):
|
||||
return
|
||||
@@ -204,14 +124,41 @@ class SignatureGenerator(object):
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def get_data_caches(cls, dataCaches, mc):
|
||||
"""
|
||||
This function returns the datacaches that should be passed to signature
|
||||
generator functions. If the signature generator supports multiconfig
|
||||
caches, the entire dictionary of data caches is sent, otherwise a
|
||||
special proxy is sent that support both index access to all
|
||||
multiconfigs, and also direct access for the default multiconfig.
|
||||
|
||||
The proxy class allows code in this class itself to always use
|
||||
multiconfig aware code (to ease maintenance), but derived classes that
|
||||
are unaware of multiconfig data caches can still access the default
|
||||
multiconfig as expected.
|
||||
|
||||
Do not override this function in derived classes; it will be removed in
|
||||
the future when support for multiconfig data caches is mandatory
|
||||
"""
|
||||
class DataCacheProxy(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __getitem__(self, key):
|
||||
return dataCaches[key]
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(dataCaches[mc], name)
|
||||
|
||||
if cls.supports_multiconfig_datacaches:
|
||||
return dataCaches
|
||||
|
||||
return DataCacheProxy()
|
||||
|
||||
def exit(self):
|
||||
return
|
||||
|
||||
def build_pnid(mc, pn, taskname):
|
||||
if mc:
|
||||
return "mc:" + mc + ":" + pn + ":" + taskname
|
||||
return pn + ":" + taskname
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -221,9 +168,12 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
self.unihash = {}
|
||||
self.taskdeps = {}
|
||||
self.runtaskdeps = {}
|
||||
self.file_checksum_values = {}
|
||||
self.taints = {}
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.setscenetasks = set()
|
||||
self.basehash_ignore_vars = set((data.getVar("BB_BASEHASH_IGNORE_VARS") or "").split())
|
||||
self.taskhash_ignore_tasks = None
|
||||
@@ -247,15 +197,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
else:
|
||||
self.twl = None
|
||||
|
||||
def _build_data(self, mcfn, d):
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, self.basehash_ignore_vars)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, mcfn)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, fn)
|
||||
|
||||
for task in tasklist:
|
||||
tid = mcfn + ":" + task
|
||||
tid = fn + ":" + task
|
||||
if not ignore_mismatch and tid in self.basehash and self.basehash[tid] != basehash[tid]:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (tid, self.basehash[tid], basehash[tid]))
|
||||
bb.error("The following commands may help:")
|
||||
@@ -266,7 +216,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.error("%s -Sprintdiff\n" % cmd)
|
||||
self.basehash[tid] = basehash[tid]
|
||||
|
||||
return taskdeps, gendeps, lookupcache
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
self.lookupcache[fn] = lookupcache
|
||||
|
||||
return taskdeps
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
self.setscenetasks = set(setscene_tasks)
|
||||
@@ -274,42 +228,31 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def finalise(self, fn, d, variant):
|
||||
|
||||
mc = d.getVar("__BBMULTICONFIG", False) or ""
|
||||
mcfn = fn
|
||||
if variant or mc:
|
||||
mcfn = bb.cache.realfn2virtual(fn, variant, mc)
|
||||
fn = bb.cache.realfn2virtual(fn, variant, mc)
|
||||
|
||||
try:
|
||||
taskdeps, gendeps, lookupcache = self._build_data(mcfn, d)
|
||||
taskdeps = self._build_data(fn, d)
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except:
|
||||
bb.warn("Error during finalise of %s" % mcfn)
|
||||
bb.warn("Error during finalise of %s" % fn)
|
||||
raise
|
||||
|
||||
basehashes = {}
|
||||
for task in taskdeps:
|
||||
basehashes[task] = self.basehash[mcfn + ":" + task]
|
||||
|
||||
d.setVar("__siggen_basehashes", basehashes)
|
||||
d.setVar("__siggen_gendeps", gendeps)
|
||||
d.setVar("__siggen_varvals", lookupcache)
|
||||
d.setVar("__siggen_taskdeps", taskdeps)
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#self.setup_datacache_from_datastore(mcfn, d)
|
||||
#for task in taskdeps:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
super().setup_datacache_from_datastore(mcfn, d)
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH:task-%s" % task, self.basehash[fn + ":" + task])
|
||||
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
for attr in ["siggen_varvals", "siggen_taskdeps", "siggen_gendeps"]:
|
||||
if not hasattr(self.datacaches[mc], attr):
|
||||
setattr(self.datacaches[mc], attr, {})
|
||||
self.datacaches[mc].siggen_varvals[mcfn] = d.getVar("__siggen_varvals")
|
||||
self.datacaches[mc].siggen_taskdeps[mcfn] = d.getVar("__siggen_taskdeps")
|
||||
self.datacaches[mc].siggen_gendeps[mcfn] = d.getVar("__siggen_gendeps")
|
||||
def postparsing_clean_cache(self):
|
||||
#
|
||||
# After parsing we can remove some things from memory to reduce our memory footprint
|
||||
#
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.taskdeps = {}
|
||||
|
||||
def rundep_check(self, fn, recipename, task, dep, depname, dataCaches):
|
||||
# Return True if we should keep the dependency, False to drop it
|
||||
@@ -332,37 +275,38 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
(mc, _, task, mcfn) = bb.runqueue.split_tid_mcfn(tid)
|
||||
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
|
||||
|
||||
self.basehash[tid] = dataCaches[mc].basetaskhash[tid]
|
||||
self.runtaskdeps[tid] = []
|
||||
self.file_checksum_values[tid] = []
|
||||
recipename = dataCaches[mc].pkg_fn[mcfn]
|
||||
recipename = dataCaches[mc].pkg_fn[fn]
|
||||
|
||||
self.tidtopn[tid] = recipename
|
||||
# save hashfn for deps into siginfo?
|
||||
for dep in deps:
|
||||
(depmc, _, deptask, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
dep_pn = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, dep_pn, dataCaches):
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
(depmc, _, _, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
depname = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
if not self.supports_multiconfig_datacaches and mc != depmc:
|
||||
# If the signature generator doesn't understand multiconfig
|
||||
# data caches, any dependency not in the same multiconfig must
|
||||
# be skipped for backward compatibility
|
||||
continue
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCaches):
|
||||
continue
|
||||
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
self.runtaskdeps[tid].append(dep)
|
||||
|
||||
dep_pnid = build_pnid(depmc, dep_pn, deptask)
|
||||
self.runtaskdeps[tid].append((dep_pnid, dep))
|
||||
|
||||
if task in dataCaches[mc].file_checksums[mcfn]:
|
||||
if task in dataCaches[mc].file_checksums[fn]:
|
||||
if self.checksum_cache:
|
||||
checksums = self.checksum_cache.get_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude)
|
||||
checksums = self.checksum_cache.get_checksums(dataCaches[mc].file_checksums[fn][task], recipename, self.localdirsexclude)
|
||||
else:
|
||||
checksums = bb.fetch2.get_file_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude)
|
||||
checksums = bb.fetch2.get_file_checksums(dataCaches[mc].file_checksums[fn][task], recipename, self.localdirsexclude)
|
||||
for (f,cs) in checksums:
|
||||
self.file_checksum_values[tid].append((f,cs))
|
||||
|
||||
taskdep = dataCaches[mc].task_deps[mcfn]
|
||||
taskdep = dataCaches[mc].task_deps[fn]
|
||||
if 'nostamp' in taskdep and task in taskdep['nostamp']:
|
||||
# Nostamp tasks need an implicit taint so that they force any dependent tasks to run
|
||||
if tid in self.taints and self.taints[tid].startswith("nostamp:"):
|
||||
@@ -373,30 +317,30 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
taint = str(uuid.uuid4())
|
||||
self.taints[tid] = "nostamp:" + taint
|
||||
|
||||
taint = self.read_taint(mcfn, task, dataCaches[mc].stamp[mcfn])
|
||||
taint = self.read_taint(fn, task, dataCaches[mc].stamp[fn])
|
||||
if taint:
|
||||
self.taints[tid] = taint
|
||||
logger.warning("%s is tainted from a forced run" % tid)
|
||||
|
||||
return set(dep for _, dep in self.runtaskdeps[tid])
|
||||
return
|
||||
|
||||
def get_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in sorted(self.runtaskdeps[tid]):
|
||||
data += self.get_unihash(dep[1])
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data = data + self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data += "./" + f.split("/./")[1]
|
||||
data += cs
|
||||
data = data + "./" + f.split("/./")[1]
|
||||
data = data + cs
|
||||
|
||||
if tid in self.taints:
|
||||
if self.taints[tid].startswith("nostamp:"):
|
||||
data += self.taints[tid][8:]
|
||||
data = data + self.taints[tid][8:]
|
||||
else:
|
||||
data += self.taints[tid]
|
||||
data = data + self.taints[tid]
|
||||
|
||||
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
self.taskhash[tid] = h
|
||||
@@ -415,9 +359,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def save_unitaskhashes(self):
|
||||
self.unihash_cache.save(self.unitaskhashes)
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
tid = mcfn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
|
||||
tid = fn + ":" + task
|
||||
referencestamp = stampbase
|
||||
if isinstance(runtime, str) and runtime.startswith("customfile"):
|
||||
sigfile = stampbase
|
||||
@@ -434,32 +378,32 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data['task'] = task
|
||||
data['basehash_ignore_vars'] = self.basehash_ignore_vars
|
||||
data['taskhash_ignore_tasks'] = self.taskhash_ignore_tasks
|
||||
data['taskdeps'] = self.datacaches[mc].siggen_taskdeps[mcfn][task]
|
||||
data['taskdeps'] = self.taskdeps[fn][task]
|
||||
data['basehash'] = self.basehash[tid]
|
||||
data['gendeps'] = {}
|
||||
data['varvals'] = {}
|
||||
data['varvals'][task] = self.datacaches[mc].siggen_varvals[mcfn][task]
|
||||
for dep in self.datacaches[mc].siggen_taskdeps[mcfn][task]:
|
||||
data['varvals'][task] = self.lookupcache[fn][task]
|
||||
for dep in self.taskdeps[fn][task]:
|
||||
if dep in self.basehash_ignore_vars:
|
||||
continue
|
||||
data['gendeps'][dep] = self.datacaches[mc].siggen_gendeps[mcfn][dep]
|
||||
data['varvals'][dep] = self.datacaches[mc].siggen_varvals[mcfn][dep]
|
||||
data['gendeps'][dep] = self.gendeps[fn][dep]
|
||||
data['varvals'][dep] = self.lookupcache[fn][dep]
|
||||
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = [dep[0] for dep in sorted(self.runtaskdeps[tid])]
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
data['file_checksum_values'].append((os.path.basename(f), cs))
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data['runtaskhashes'][dep[0]] = self.get_unihash(dep[1])
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
data['taskhash'] = self.taskhash[tid]
|
||||
data['unihash'] = self.get_unihash(tid)
|
||||
|
||||
taint = self.read_taint(mcfn, task, referencestamp)
|
||||
taint = self.read_taint(fn, task, referencestamp)
|
||||
if taint:
|
||||
data['taint'] = taint
|
||||
|
||||
@@ -476,7 +420,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
|
||||
sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)
|
||||
|
||||
fd, tmpfile = bb.utils.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
try:
|
||||
with bb.compress.zstd.open(fd, "wt", encoding="utf-8", num_threads=1) as f:
|
||||
json.dump(data, f, sort_keys=True, separators=(",", ":"), cls=SetEncoder)
|
||||
@@ -490,6 +434,18 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(tid)
|
||||
if tid not in self.taskhash:
|
||||
continue
|
||||
if dataCaches[mc].basetaskhash[tid] != self.basehash[tid]:
|
||||
bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % tid)
|
||||
bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[tid], self.basehash[tid]))
|
||||
self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
|
||||
|
||||
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
name = "basichash"
|
||||
|
||||
@@ -500,11 +456,11 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
# If task is not in basehash, then error
|
||||
return self.basehash[tid]
|
||||
|
||||
def stampfile(self, stampbase, mcfn, taskname, extrainfo, clean=False):
|
||||
if taskname.endswith("_setscene"):
|
||||
tid = mcfn + ":" + taskname[:-9]
|
||||
def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
tid = fn + ":" + taskname[:-9]
|
||||
else:
|
||||
tid = mcfn + ":" + taskname
|
||||
tid = fn + ":" + taskname
|
||||
if clean:
|
||||
h = "*"
|
||||
else:
|
||||
@@ -512,100 +468,42 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
|
||||
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
|
||||
|
||||
def stampcleanmask(self, stampbase, mcfn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, mcfn, taskname, extrainfo, clean=True)
|
||||
def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
|
||||
|
||||
def invalidate_task(self, task, mcfn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (mcfn, task))
|
||||
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
|
||||
taintfn = stamp + '.' + task + '.taint'
|
||||
|
||||
import uuid
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
with open(taintfn, 'w') as taintf:
|
||||
taintf.write(str(uuid.uuid4()))
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
|
||||
bb.build.write_taint(task, d, fn)
|
||||
|
||||
class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queried from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.env = {}
|
||||
|
||||
origenv = data.getVar("BB_ORIGENV")
|
||||
for e in HASHSERV_ENVVARS:
|
||||
value = data.getVar(e)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(e)
|
||||
if value:
|
||||
self.env[e] = value
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.username, self.password, self.env = data[:6]
|
||||
super().set_taskdata(data[6:])
|
||||
self.server, self.method, self.extramethod = data[:3]
|
||||
super().set_taskdata(data[3:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
return {
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
@contextmanager
|
||||
def _client_env(self):
|
||||
orig_env = os.environ.copy()
|
||||
try:
|
||||
for k, v in self.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
yield
|
||||
finally:
|
||||
for k, v in self.env.items():
|
||||
if k in orig_env:
|
||||
os.environ[k] = orig_env[k]
|
||||
else:
|
||||
del os.environ[k]
|
||||
|
||||
@contextmanager
|
||||
def client(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server)
|
||||
return self._client
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().reset(data)
|
||||
|
||||
def exit(self):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().exit()
|
||||
|
||||
def __close_clients(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
if getattr(self, '_client_pool', None) is not None:
|
||||
self._client_pool.close()
|
||||
self._client_pool = None
|
||||
|
||||
def get_stampfile_hash(self, tid):
|
||||
if tid in self.taskhash:
|
||||
# If a unique hash is reported, use it as the stampfile hash. This
|
||||
@@ -637,7 +535,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
return None
|
||||
return unihash
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
def get_unihash(self, tid):
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
# If its not a setscene task we can return
|
||||
@@ -652,96 +550,40 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
return None
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
unihash = taskhash
|
||||
|
||||
def _get_method(self, tid):
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
return method
|
||||
|
||||
def unihashes_exist(self, query):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
query_keys = []
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
query_keys.append(key)
|
||||
|
||||
if query_keys:
|
||||
with self.client() as client:
|
||||
query_result = client.unihash_exists_batch(query[k] for k in query_keys)
|
||||
|
||||
for idx, key in enumerate(query_keys):
|
||||
exists = query_result[idx]
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
|
||||
return result
|
||||
|
||||
def get_unihash(self, tid):
|
||||
return self.get_unihashes([tid])[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
"""
|
||||
For a iterable of tids, returns a dictionary that maps each tid to a
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
query_tids = []
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
query_tids.append(tid)
|
||||
|
||||
if query_tids:
|
||||
unihashes = []
|
||||
try:
|
||||
with self.client() as client:
|
||||
unihashes = client.get_unihash_batch((self._get_method(tid), self.taskhash[tid]) for tid in query_tids)
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
for idx, tid in enumerate(query_tids):
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
if unihashes and unihashes[idx]:
|
||||
unihash = unihashes[idx]
|
||||
try:
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
data = self.client().get_unihash(method, self.taskhash[tid])
|
||||
if data:
|
||||
unihash = data
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.bbdebug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
hashequiv_logger.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
|
||||
return result
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
def report_unihash(self, path, task, d):
|
||||
import importlib
|
||||
@@ -750,8 +592,8 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
unihash = d.getVar('BB_UNIHASH')
|
||||
report_taskdata = d.getVar('SSTATE_HASHEQUIV_REPORT_TASKDATA') == '1'
|
||||
tempdir = d.getVar('T')
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
tid = mcfn + ':do_' + task
|
||||
fn = d.getVar('BB_FILENAME')
|
||||
tid = fn + ':do_' + task
|
||||
key = tid + ':' + taskhash
|
||||
|
||||
if self.setscenetasks and tid not in self.setscenetasks:
|
||||
@@ -805,19 +647,17 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
hashequiv_logger.debug('Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(mcfn + ':do_' + task, new_unihash), d)
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
|
||||
self.set_unihash(tid, new_unihash)
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
if sigfile:
|
||||
@@ -838,9 +678,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
hashequiv_logger.verbose('Reported task %s as unihash %s to %s (%s)' % (tid, wanted_unihash, self.server, str(data)))
|
||||
|
||||
if data is None:
|
||||
@@ -859,7 +697,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# TODO: What to do here?
|
||||
hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash))
|
||||
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
return False
|
||||
@@ -874,18 +712,19 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return os.path.basename(f)
|
||||
#
|
||||
# Dummy class used for bitbake-selftest
|
||||
#
|
||||
class SignatureGeneratorTestMulticonfigDepends(SignatureGeneratorBasicHash):
|
||||
name = "TestMulticonfigDepends"
|
||||
supports_multiconfig_datacaches = True
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
mcfn = d.getVar("BB_FILENAME")
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
referencestamp = bb.parse.siggen.stampfile_base(mcfn)
|
||||
bb.parse.siggen.dump_sigtask(mcfn, task, outfile, "customfile:" + referencestamp)
|
||||
referencestamp = bb.build.stamp_internal(task, d, None, True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
|
||||
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
@@ -938,6 +777,39 @@ def list_inline_diff(oldlist, newlist, colors=None):
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(basepath):
|
||||
basepath, dir, recipe_task = basepath.rsplit("/", 2)
|
||||
cleaned = dir + '/' + recipe_task
|
||||
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
mc_suffix = ''
|
||||
|
||||
# mc stuff now removed from basepath. Whatever was next, if present will be the first
|
||||
# suffix. ':/', recipe path start, marks the end of this. Something like
|
||||
# 'virtual:a[:b[:c]]:/path...' (b and c being optional)
|
||||
if basepath[0] != '/':
|
||||
cleaned += ':' + basepath.split(':/', 1)[0]
|
||||
|
||||
return cleaned + mc_suffix
|
||||
|
||||
def clean_basepaths(a):
|
||||
b = {}
|
||||
for x in a:
|
||||
b[clean_basepath(x)] = a[x]
|
||||
return b
|
||||
|
||||
def clean_basepaths_list(a):
|
||||
b = []
|
||||
for x in a:
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
# Handled renamed fields
|
||||
def handle_renames(data):
|
||||
if 'basewhitelist' in data:
|
||||
@@ -968,18 +840,10 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
try:
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (b, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
for data in [a_data, b_data]:
|
||||
handle_renames(data)
|
||||
@@ -1114,11 +978,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = a_data['runtaskdeps']
|
||||
clean_b = b_data['runtaskdeps']
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
@@ -1139,7 +1003,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (dep, b[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in sorted(removed):
|
||||
adep_found = False
|
||||
@@ -1149,11 +1013,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (dep, a[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (dep, a[dep], b[dep]))
|
||||
output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
@@ -1163,7 +1027,6 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
break
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
@@ -1185,7 +1048,7 @@ def calc_basehash(sigdata):
|
||||
basedata = ''
|
||||
|
||||
alldeps = sigdata['taskdeps']
|
||||
for dep in sorted(alldeps):
|
||||
for dep in alldeps:
|
||||
basedata = basedata + dep
|
||||
val = sigdata['varvals'][dep]
|
||||
if val is not None:
|
||||
@@ -1217,12 +1080,8 @@ def calc_taskhash(sigdata):
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
handle_renames(a_data)
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
def parseExpression(self, exp):
|
||||
parsedvar = self.d.expandWithRefs(exp, None)
|
||||
self.references = parsedvar.references
|
||||
self.execs = parsedvar.execs
|
||||
|
||||
def test_simple_reference(self):
|
||||
self.setEmptyVars(["FOO"])
|
||||
@@ -62,11 +61,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
def test_python_exec_reference(self):
|
||||
self.parseExpression("${@eval('3 * 5')}")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -106,46 +100,6 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.parseExpression("foo=$(echo bar)")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_quotes(self):
|
||||
self.parseExpression('foo="$(echo bar)"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_nested(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2 bar$(func3))")"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_multiple(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2)") $(func3)"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o$(func1)"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_expansion_empty(self):
|
||||
self.parseExpression('foo="bar$()foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_assign_subshell_backticks(self):
|
||||
self.parseExpression("foo=`echo bar`")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_quotes(self):
|
||||
self.parseExpression('foo="`echo bar`"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_multiple(self):
|
||||
self.parseExpression('foo="`func1 bar` `func2`"')
|
||||
self.assertExecs(set(["func1", "func2"]))
|
||||
|
||||
def test_assign_subshell_backticks_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o`func1`"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_backticks_empty(self):
|
||||
self.parseExpression('foo="bar``foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_shell_unexpanded(self):
|
||||
self.setEmptyVars(["QT_BASE_NAME"])
|
||||
self.parseExpression('echo "${QT_BASE_NAME}"')
|
||||
@@ -364,7 +318,7 @@ d.getVar(a(), False)
|
||||
"filename": "example.bb",
|
||||
})
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"]))
|
||||
|
||||
@@ -411,7 +365,7 @@ esac
|
||||
self.d.setVarFlags("FOO", {"func": True})
|
||||
self.setEmptyVars(execs)
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "inverted"] + execs))
|
||||
|
||||
@@ -421,7 +375,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "oe_libinstall")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -430,7 +384,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -445,7 +399,7 @@ esac
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([expr,
|
||||
'TESTVAR{anothervalue} = Set',
|
||||
@@ -464,49 +418,23 @@ esac
|
||||
self.d.setVar('ANOTHERVAR', varval)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
self.d.setVar('TESTVAR2', 'testval3')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(["TESTVAR"]), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(["TESTVAR"]), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
# Check the vardepsexclude flag is handled by contains functionality
|
||||
self.d.setVarFlag('ANOTHERVAR', 'vardepsexclude', 'TESTVAR')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
def test_contains_vardeps_override_operators(self):
|
||||
# Check override operators handle dependencies correctly with the contains functionality
|
||||
expr_plain = 'testval'
|
||||
expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
|
||||
expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
|
||||
expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr_plain)
|
||||
self.d.prependVar('ANOTHERVAR', expr_prepend)
|
||||
self.d.appendVar('ANOTHERVAR', expr_append)
|
||||
self.d.setVar('ANOTHERVAR:remove', expr_remove)
|
||||
self.d.setVar('TESTVAR1', 'blah')
|
||||
self.d.setVar('TESTVAR2', 'testval2')
|
||||
self.d.setVar('TESTVAR3', 'no-testval')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([
|
||||
expr_prepend + expr_plain + expr_append,
|
||||
'_remove of ' + expr_remove,
|
||||
'TESTVAR1{testval1} = Unset',
|
||||
'TESTVAR2{testval2} = Set',
|
||||
'TESTVAR3{no-testval} = Set',
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
# self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
# self.d.setVarFlag("FOO", "vardeps", "oe_*")
|
||||
# self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
# self.assertEquals(deps, set(["oe_libinstall"]))
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ class ProgressWatcher:
|
||||
def __init__(self):
|
||||
self._reports = []
|
||||
|
||||
def handle_event(self, event, d):
|
||||
def handle_event(self, event):
|
||||
self._reports.append((event.progress, event.rate))
|
||||
|
||||
def reports(self):
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -66,8 +64,8 @@ class CompressionTests(object):
|
||||
|
||||
class LZ4Tests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("lz4") is None:
|
||||
self.skipTest("'lz4' not found")
|
||||
if shutil.which("lz4c") is None:
|
||||
self.skipTest("'lz4c' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -60,15 +60,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@5*12}")
|
||||
self.assertEqual(str(val), "60")
|
||||
|
||||
def test_python_snippet_w_dict(self):
|
||||
val = self.d.expand("${@{ 'green': 1, 'blue': 2 }['green']}")
|
||||
self.assertEqual(str(val), "1")
|
||||
|
||||
def test_python_unexpanded_multi(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@2*2},${foo},${@d.getVar('foo') + ' ${bar}'},${foo}")
|
||||
self.assertEqual(str(val), "4,value_of_foo,${@d.getVar('foo') + ' ${unsetvar}'},value_of_foo")
|
||||
|
||||
def test_expand_in_python_snippet(self):
|
||||
val = self.d.expand("${@'boo ' + '${foo}'}")
|
||||
self.assertEqual(str(val), "boo value_of_foo")
|
||||
@@ -77,18 +68,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_snippet_function_reference(self):
|
||||
self.d.setVar("TESTVAL", "testvalue")
|
||||
self.d.setVar("testfunc", 'd.getVar("TESTVAL")')
|
||||
context = bb.utils.get_context()
|
||||
context["testfunc"] = lambda d: d.getVar("TESTVAL")
|
||||
val = self.d.expand("${@testfunc(d)}")
|
||||
self.assertEqual(str(val), "testvalue")
|
||||
|
||||
def test_python_snippet_builtin_metadata(self):
|
||||
self.d.setVar("eval", "INVALID")
|
||||
self.d.expand("${@eval('3')}")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
@@ -395,16 +374,6 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override_2(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3 testvalue5")
|
||||
@@ -426,6 +395,16 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST:bar:append", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_append_and_unused_override(self):
|
||||
# Had a bug where an unused override append could return "" instead of None
|
||||
self.d.setVar("BAR:append:unusedoverride", "testvalue2")
|
||||
|
||||
@@ -13,7 +13,6 @@ import pickle
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import tempfile
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
|
||||
@@ -158,7 +157,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self._test_process.event_handler,
|
||||
event,
|
||||
None)
|
||||
self._test_process.event_handler.assert_called_once_with(event, None)
|
||||
self._test_process.event_handler.assert_called_once_with(event)
|
||||
|
||||
def test_fire_class_handlers(self):
|
||||
""" Test fire_class_handlers method """
|
||||
@@ -176,10 +175,10 @@ class EventHandlingTest(unittest.TestCase):
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = [call(event1, None)]
|
||||
expected_event_handler2 = [call(event1, None),
|
||||
call(event2, None),
|
||||
call(event2, None)]
|
||||
expected_event_handler1 = [call(event1)]
|
||||
expected_event_handler2 = [call(event1),
|
||||
call(event2),
|
||||
call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
@@ -206,7 +205,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = []
|
||||
expected_event_handler2 = [call(event1, None)]
|
||||
expected_event_handler2 = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
@@ -224,7 +223,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1, None), call(event2, None)]
|
||||
expected = [call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -238,7 +237,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1, None), call(event2, None), call(event1, None)]
|
||||
expected = [call(event1), call(event2), call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -252,7 +251,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1,None), call(event2, None), call(event1, None), call(event2, None)]
|
||||
expected = [call(event1), call(event2), call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -360,10 +359,9 @@ class EventHandlingTest(unittest.TestCase):
|
||||
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire(event1, None)
|
||||
expected = [call(event1, None)]
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -452,9 +450,10 @@ class EventHandlingTest(unittest.TestCase):
|
||||
and disable threadlocks tests """
|
||||
bb.event.fire(bb.event.OperationStarted(), None)
|
||||
|
||||
def test_event_threadlock(self):
|
||||
def test_enable_threadlock(self):
|
||||
""" Test enable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.enable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be in order as all the registered
|
||||
# handlers for the event coming from the first worker should be
|
||||
@@ -462,6 +461,20 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
|
||||
|
||||
|
||||
def test_disable_threadlock(self):
|
||||
""" Test disable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.disable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be intertwined together. Thanks to the
|
||||
# delay in the registered handlers for the event coming from the first
|
||||
# worker, the event coming from the second worker starts being
|
||||
# processed before finishing handling the first worker event.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
||||
|
||||
|
||||
class EventClassesTest(unittest.TestCase):
|
||||
""" Event classes test class """
|
||||
|
||||
@@ -469,8 +482,6 @@ class EventClassesTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
@@ -953,24 +964,3 @@ class EventClassesTest(unittest.TestCase):
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_lineno_in_eventhandler(self):
|
||||
# The error lineno is 5, not 4 since the first line is '\n'
|
||||
error_line = """
|
||||
# Comment line1
|
||||
# Comment line2
|
||||
python test_lineno_in_eventhandler() {
|
||||
This is an error line
|
||||
}
|
||||
addhandler test_lineno_in_eventhandler
|
||||
test_lineno_in_eventhandler[eventmask] = "bb.event.ConfigParsed"
|
||||
"""
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = tempfile.NamedTemporaryFile(suffix = '.bb')
|
||||
f.write(bytes(error_line, "utf-8"))
|
||||
f.flush()
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue(" line 5\n" in output)
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/2.10/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/2.10/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="LATEST-IS-2.10.3" title="LATEST-IS-2.10.3">LATEST-IS-2.10.3</a></td><td class="size">2.5 MiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.news" title="libxml2-2.10.0.news">libxml2-2.10.0.news</a></td><td class="size">7.1 KiB</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.sha256sum" title="libxml2-2.10.0.sha256sum">libxml2-2.10.0.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.tar.xz" title="libxml2-2.10.0.tar.xz">libxml2-2.10.0.tar.xz</a></td><td class="size">2.6 MiB</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.news" title="libxml2-2.10.1.news">libxml2-2.10.1.news</a></td><td class="size">455 B</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.sha256sum" title="libxml2-2.10.1.sha256sum">libxml2-2.10.1.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.tar.xz" title="libxml2-2.10.1.tar.xz">libxml2-2.10.1.tar.xz</a></td><td class="size">2.6 MiB</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.news" title="libxml2-2.10.2.news">libxml2-2.10.2.news</a></td><td class="size">309 B</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.sha256sum" title="libxml2-2.10.2.sha256sum">libxml2-2.10.2.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.tar.xz" title="libxml2-2.10.2.tar.xz">libxml2-2.10.2.tar.xz</a></td><td class="size">2.5 MiB</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.news" title="libxml2-2.10.3.news">libxml2-2.10.3.news</a></td><td class="size">294 B</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.sha256sum" title="libxml2-2.10.3.sha256sum">libxml2-2.10.3.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.tar.xz" title="libxml2-2.10.3.tar.xz">libxml2-2.10.3.tar.xz</a></td><td class="size">2.5 MiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
</tbody></table></body></html>
|
||||
@@ -1,40 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/2.9/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/2.9/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="LATEST-IS-2.9.14" title="LATEST-IS-2.9.14">LATEST-IS-2.9.14</a></td><td class="size">3.0 MiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.0.sha256sum" title="libxml2-2.9.0.sha256sum">libxml2-2.9.0.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:27</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.0.tar.xz" title="libxml2-2.9.0.tar.xz">libxml2-2.9.0.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:27</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.1.sha256sum" title="libxml2-2.9.1.sha256sum">libxml2-2.9.1.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:28</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.1.tar.xz" title="libxml2-2.9.1.tar.xz">libxml2-2.9.1.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:28</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.10.sha256sum" title="libxml2-2.9.10.sha256sum">libxml2-2.9.10.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.10.tar.xz" title="libxml2-2.9.10.tar.xz">libxml2-2.9.10.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.11.sha256sum" title="libxml2-2.9.11.sha256sum">libxml2-2.9.11.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:43</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.11.tar.xz" title="libxml2-2.9.11.tar.xz">libxml2-2.9.11.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:43</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.12.sha256sum" title="libxml2-2.9.12.sha256sum">libxml2-2.9.12.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:45</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.12.tar.xz" title="libxml2-2.9.12.tar.xz">libxml2-2.9.12.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:45</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.news" title="libxml2-2.9.13.news">libxml2-2.9.13.news</a></td><td class="size">26.6 KiB</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.sha256sum" title="libxml2-2.9.13.sha256sum">libxml2-2.9.13.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.tar.xz" title="libxml2-2.9.13.tar.xz">libxml2-2.9.13.tar.xz</a></td><td class="size">3.1 MiB</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.news" title="libxml2-2.9.14.news">libxml2-2.9.14.news</a></td><td class="size">1.0 KiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.sha256sum" title="libxml2-2.9.14.sha256sum">libxml2-2.9.14.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.tar.xz" title="libxml2-2.9.14.tar.xz">libxml2-2.9.14.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.2.sha256sum" title="libxml2-2.9.2.sha256sum">libxml2-2.9.2.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:30</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.2.tar.xz" title="libxml2-2.9.2.tar.xz">libxml2-2.9.2.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:30</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.3.sha256sum" title="libxml2-2.9.3.sha256sum">libxml2-2.9.3.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:31</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.3.tar.xz" title="libxml2-2.9.3.tar.xz">libxml2-2.9.3.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:31</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.4.sha256sum" title="libxml2-2.9.4.sha256sum">libxml2-2.9.4.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.4.tar.xz" title="libxml2-2.9.4.tar.xz">libxml2-2.9.4.tar.xz</a></td><td class="size">2.9 MiB</td><td class="date">2022-Feb-14 18:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.5.sha256sum" title="libxml2-2.9.5.sha256sum">libxml2-2.9.5.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:35</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.5.tar.xz" title="libxml2-2.9.5.tar.xz">libxml2-2.9.5.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:35</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.6.sha256sum" title="libxml2-2.9.6.sha256sum">libxml2-2.9.6.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:36</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.6.tar.xz" title="libxml2-2.9.6.tar.xz">libxml2-2.9.6.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:36</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.7.sha256sum" title="libxml2-2.9.7.sha256sum">libxml2-2.9.7.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:37</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.7.tar.xz" title="libxml2-2.9.7.tar.xz">libxml2-2.9.7.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:37</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.8.sha256sum" title="libxml2-2.9.8.sha256sum">libxml2-2.9.8.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:39</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.8.tar.xz" title="libxml2-2.9.8.tar.xz">libxml2-2.9.8.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:39</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.9.sha256sum" title="libxml2-2.9.9.sha256sum">libxml2-2.9.9.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:40</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.9.tar.xz" title="libxml2-2.9.9.tar.xz">libxml2-2.9.9.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:40</td></tr>
|
||||
</tbody></table></body></html>
|
||||
@@ -1,19 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="2.0/" title="2.0">2.0/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.1/" title="2.1">2.1/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.10/" title="2.10">2.10/</a></td><td class="size">-</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="2.2/" title="2.2">2.2/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.3/" title="2.3">2.3/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.4/" title="2.4">2.4/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.5/" title="2.5">2.5/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.6/" title="2.6">2.6/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.7/" title="2.7">2.7/</a></td><td class="size">-</td><td class="date">2022-Feb-14 18:24</td></tr>
|
||||
<tr><td class="link"><a href="2.8/" title="2.8">2.8/</a></td><td class="size">-</td><td class="date">2022-Feb-14 18:26</td></tr>
|
||||
<tr><td class="link"><a href="2.9/" title="2.9">2.9/</a></td><td class="size">-</td><td class="date">2022-May-02 12:04</td></tr>
|
||||
<tr><td class="link"><a href="cache.json" title="cache.json">cache.json</a></td><td class="size">22.8 KiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
</tbody></table></body></html>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -75,59 +75,6 @@ unset B[flag]
|
||||
self.assertEqual(d.getVarFlag("A","flag"), None)
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
|
||||
defaulttest = """
|
||||
A = "set value"
|
||||
A ??= "default value"
|
||||
|
||||
A[flag_set_vs_question] = "set flag"
|
||||
A[flag_set_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_vs_default] = "set flag"
|
||||
A[flag_set_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_question] ?= "question flag"
|
||||
|
||||
A[flag_default] ??= "default flag"
|
||||
|
||||
A[flag_question_vs_default] ?= "question flag"
|
||||
A[flag_question_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_default_vs_question] ??= "default flag"
|
||||
A[flag_default_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_question_default] = "set flag"
|
||||
A[flag_set_question_default] ?= "question flag"
|
||||
A[flag_set_question_default] ??= "default flag"
|
||||
|
||||
A[flag_set_default_question] = "set flag"
|
||||
A[flag_set_default_question] ??= "default flag"
|
||||
A[flag_set_default_question] ?= "question flag"
|
||||
|
||||
A[flag_set_twice] = "set flag first"
|
||||
A[flag_set_twice] = "set flag second"
|
||||
|
||||
A[flag_question_twice] ?= "question flag first"
|
||||
A[flag_question_twice] ?= "question flag second"
|
||||
|
||||
A[flag_default_twice] ??= "default flag first"
|
||||
A[flag_default_twice] ??= "default flag second"
|
||||
"""
|
||||
def test_parse_defaulttest(self):
|
||||
f = self.parsehelper(self.defaulttest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "set value")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default"), "default flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_vs_default"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_vs_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_question_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_default_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_twice"), "set flag second")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_twice"), "question flag first")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_twice"), "default flag second")
|
||||
|
||||
exporttest = """
|
||||
A = "a"
|
||||
export B = "b"
|
||||
@@ -172,7 +119,7 @@ EXTRA_OECONF:class-target = "b"
|
||||
EXTRA_OECONF:append = " c"
|
||||
"""
|
||||
|
||||
def test_parse_overrides2(self):
|
||||
def test_parse_overrides(self):
|
||||
f = self.parsehelper(self.overridetest2)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
d.appendVar("EXTRA_OECONF", " d")
|
||||
@@ -217,7 +164,6 @@ python () {
|
||||
# become unset/disappear.
|
||||
#
|
||||
def test_parse_classextend_contamination(self):
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
cls = self.parsehelper(self.classextend_bbclass, suffix=".bbclass")
|
||||
#clsname = os.path.basename(cls.name).replace(".bbclass", "")
|
||||
self.classextend = self.classextend.replace("###CLASS###", cls.name)
|
||||
@@ -230,19 +176,7 @@ python () {
|
||||
|
||||
addtask_deltask = """
|
||||
addtask do_patch after do_foo after do_unpack before do_configure before do_compile
|
||||
addtask do_fetch2 do_patch2
|
||||
|
||||
addtask do_myplaintask
|
||||
addtask do_myplaintask2
|
||||
deltask do_myplaintask2
|
||||
addtask do_mytask# comment
|
||||
addtask do_mytask2 # comment2
|
||||
addtask do_mytask3
|
||||
deltask do_mytask3# comment
|
||||
deltask do_mytask4 # comment2
|
||||
|
||||
# Ensure a missing task prefix on after works
|
||||
addtask do_mytask5 after mytask
|
||||
addtask do_fetch do_patch
|
||||
|
||||
MYVAR = "do_patch"
|
||||
EMPTYVAR = ""
|
||||
@@ -250,154 +184,13 @@ deltask do_fetch ${MYVAR} ${EMPTYVAR}
|
||||
deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
|
||||
import sys
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
self.assertSequenceEqual(['do_fetch2', 'do_patch2', 'do_myplaintask', 'do_mytask', 'do_mytask2', 'do_mytask5'], bb.build.listtasks(d))
|
||||
self.assertEqual(['do_mytask'], d.getVarFlag("do_mytask5", "deps"))
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
# Second line of comment \\
|
||||
|
||||
"""
|
||||
def test_parse_broken_multiline_comment(self):
|
||||
f = self.parsehelper(self.broken_multiline_comment)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
comment_in_var = """
|
||||
VAR = " \\
|
||||
SOMEVAL \\
|
||||
# some comment \\
|
||||
SOMEOTHERVAL \\
|
||||
"
|
||||
"""
|
||||
def test_parse_comment_in_var(self):
|
||||
f = self.parsehelper(self.comment_in_var)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
at_sign_in_var_flag = """
|
||||
A[flag@.service] = "nonet"
|
||||
B[flag@.target] = "ntb"
|
||||
C[f] = "flag"
|
||||
|
||||
unset A[flag@.service]
|
||||
"""
|
||||
def test_parse_at_sign_in_var_flag(self):
|
||||
f = self.parsehelper(self.at_sign_in_var_flag)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), None)
|
||||
self.assertEqual(d.getVar("B"), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag@.service"), None)
|
||||
self.assertEqual(d.getVarFlag("B","flag@.target"), "ntb")
|
||||
self.assertEqual(d.getVarFlag("C","f"), "flag")
|
||||
|
||||
def test_parse_invalid_at_sign_in_var_flag(self):
|
||||
invalid_at_sign = self.at_sign_in_var_flag.replace("B[f", "B[@f")
|
||||
f = self.parsehelper(invalid_at_sign)
|
||||
with self.assertRaises(bb.parse.ParseError):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
export_function_recipe = """
|
||||
inherit someclass
|
||||
"""
|
||||
|
||||
export_function_recipe2 = """
|
||||
inherit someclass
|
||||
|
||||
do_compile () {
|
||||
false
|
||||
}
|
||||
|
||||
python do_compilepython () {
|
||||
bb.note("Something else")
|
||||
}
|
||||
|
||||
"""
|
||||
export_function_class = """
|
||||
someclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python someclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
export_function_class2 = """
|
||||
secondclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python secondclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
def test_parse_export_functions(self):
|
||||
def check_function_flags(d):
|
||||
self.assertEqual(d.getVarFlag("do_compile", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compile", "python"), None)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "python"), "1")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
recipename = tempdir + "/recipe.bb"
|
||||
os.makedirs(tempdir + "/classes")
|
||||
with open(tempdir + "/classes/someclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class)
|
||||
f.flush()
|
||||
with open(tempdir + "/classes/secondclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class2)
|
||||
f.flush()
|
||||
|
||||
with open(recipename, "w") as f:
|
||||
f.write(self.export_function_recipe)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
recipename2 = tempdir + "/recipe2.bb"
|
||||
with open(recipename2, "w") as f:
|
||||
f.write(self.export_function_recipe2)
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
with open(recipename, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
with open(recipename2, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("secondclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("secondclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in stdout)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in stdout)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user