mirror of
https://git.yoctoproject.org/poky
synced 2026-02-20 08:29:42 +01:00
Compare commits
77 Commits
yocto-5.0.
...
langdale-4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3cda9a3e0 | ||
|
|
80d22fc07f | ||
|
|
c35857bd24 | ||
|
|
d8917f76bc | ||
|
|
d5add7c5b7 | ||
|
|
e65081b949 | ||
|
|
027ec0ecf5 | ||
|
|
54fb46c66e | ||
|
|
63e80a0233 | ||
|
|
c689d5d4e3 | ||
|
|
2ac597044a | ||
|
|
79434a17eb | ||
|
|
25f355e0ef | ||
|
|
186d179614 | ||
|
|
975e3fb53c | ||
|
|
e881560619 | ||
|
|
6054c58908 | ||
|
|
cb9d9fd076 | ||
|
|
7b401c7540 | ||
|
|
62c4b68a11 | ||
|
|
1b5b1ba8fb | ||
|
|
6bded7cb12 | ||
|
|
d18ec217b3 | ||
|
|
0771c25330 | ||
|
|
f02d7f4547 | ||
|
|
d051fc188b | ||
|
|
22c5e7fa3e | ||
|
|
cd873bc5de | ||
|
|
2f52d04e17 | ||
|
|
3d58ac1ddd | ||
|
|
e6428f3c1c | ||
|
|
db48ca5830 | ||
|
|
23cf93f091 | ||
|
|
9921f0a250 | ||
|
|
cd89ca53ed | ||
|
|
290ce3525f | ||
|
|
c3911e12f6 | ||
|
|
ad8bd886a4 | ||
|
|
b0bf1ab118 | ||
|
|
e57c26ea29 | ||
|
|
baccaad9a0 | ||
|
|
c5c4cbb024 | ||
|
|
394054d7ca | ||
|
|
27e0f91aaa | ||
|
|
079bb45350 | ||
|
|
2dd06fb636 | ||
|
|
8ed9ff8919 | ||
|
|
86eaa373a7 | ||
|
|
b0b966ad07 | ||
|
|
fc5bc29d1b | ||
|
|
8a3bbee311 | ||
|
|
b1b1c9232f | ||
|
|
cc4b3a0040 | ||
|
|
900420392d | ||
|
|
03f1b28c6d | ||
|
|
692a8ab550 | ||
|
|
94270812fa | ||
|
|
570e56775b | ||
|
|
0dfef83aa5 | ||
|
|
adaa8ad2a5 | ||
|
|
811f8a09eb | ||
|
|
72157834c6 | ||
|
|
25cfdd66e4 | ||
|
|
33711d546d | ||
|
|
1c94f9d64b | ||
|
|
e6daf39c9b | ||
|
|
7ffb05dd16 | ||
|
|
8074213da8 | ||
|
|
f435cff54a | ||
|
|
a6586821f0 | ||
|
|
0bc04f5e6d | ||
|
|
6b9db5a99b | ||
|
|
6672cbe670 | ||
|
|
c58059d282 | ||
|
|
ec08faf2e4 | ||
|
|
7aa3ed5c37 | ||
|
|
b6d633e7f3 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -31,9 +31,4 @@ pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
_toaster_clones/
|
||||
downloads/
|
||||
sstate-cache/
|
||||
toaster.sqlite
|
||||
.vscode/
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
35
Makefile
Normal file
35
Makefile
Normal file
@@ -0,0 +1,35 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
endif
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile.sphinx clean publish
|
||||
|
||||
publish: Makefile.sphinx html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/
|
||||
cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/
|
||||
cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR)
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile.sphinx
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -16,13 +16,9 @@ which can be found at:
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/dev/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,22 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla]
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -13,24 +13,19 @@ Bitbake plain documentation can be found under the doc directory or its integrat
|
||||
html version at the Yocto Project website:
|
||||
https://docs.yoctoproject.org
|
||||
|
||||
Bitbake requires Python version 3.8 or newer.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to bitbake-devel@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
If you're sending a patch related to the BitBake manual, make sure you copy
|
||||
the Yocto Project documentation mailing list:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org --cc docs@lists.yoctoproject.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/bitbake-devel
|
||||
@@ -39,25 +34,10 @@ Source code:
|
||||
|
||||
https://git.openembedded.org/bitbake/
|
||||
|
||||
Testing
|
||||
-------
|
||||
Testing:
|
||||
|
||||
Bitbake has a testsuite located in lib/bb/tests/ whichs aim to try and prevent regressions.
|
||||
You can run this with "bitbake-selftest". In particular the fetcher is well covered since
|
||||
it has so many corner cases. The datastore has many tests too. Testing with the testsuite is
|
||||
recommended before submitting patches, particularly to the fetcher and datastore. We also
|
||||
appreciate new test cases and may require them for more obscure issues.
|
||||
|
||||
To run the tests "zstd" and "git" must be installed.
|
||||
|
||||
The assumption is made that this testsuite is run from an initialized OpenEmbedded build
|
||||
environment (i.e. `source oe-init-build-env` is used). If this is not the case, run the
|
||||
testsuite as follows:
|
||||
|
||||
export PATH=$(pwd)/bin:$PATH
|
||||
bin/bitbake-selftest
|
||||
|
||||
The testsuite can alternatively be executed using pytest, e.g. obtained from PyPI (in this
|
||||
case, the PATH is configured automatically):
|
||||
|
||||
pytest
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -25,9 +25,10 @@ except RuntimeError as exc:
|
||||
from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "2.8.0"
|
||||
__version__ = "2.2.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -72,16 +72,13 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
elif sig2 not in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
sig1 = latestsigs[0]
|
||||
sig2 = latestsigs[1]
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
|
||||
return latestfiles
|
||||
|
||||
@@ -99,7 +96,7 @@ def recursecb(key, hash1, hash2):
|
||||
elif hash2 not in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, color=color)
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
|
||||
@@ -25,36 +25,26 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
|
||||
parser.add_argument('--ignore-undefined', help='Suppress any errors related to undefined variables', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.value:
|
||||
if args.unexpand:
|
||||
sys.exit("--unexpand only makes sense with --value")
|
||||
if args.unexpand and not args.value:
|
||||
print("--unexpand only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
if args.flag:
|
||||
sys.exit("--flag only makes sense with --value")
|
||||
if args.flag and not args.value:
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
quiet = args.quiet or args.value
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
d = tinfoil.config_data
|
||||
|
||||
value = None
|
||||
if args.flag:
|
||||
value = d.getVarFlag(args.variable, args.flag, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The flag '{args.flag}' is not defined for variable '{args.variable}'")
|
||||
else:
|
||||
value = d.getVar(args.variable, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The variable '{args.variable}' is not defined")
|
||||
if args.value:
|
||||
print(str(value if value is not None else ""))
|
||||
print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
|
||||
elif args.value:
|
||||
print(str(d.getVar(args.variable, expand=(not args.unexpand))))
|
||||
else:
|
||||
bb.data.emit_var(args.variable, d=d, all=True)
|
||||
|
||||
@@ -14,8 +14,6 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -38,42 +36,18 @@ except ImportError:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
import bb.asyncrpc
|
||||
|
||||
DEFAULT_ADDRESS = 'unix://./hashserve.sock'
|
||||
METHOD = 'stress.test.method'
|
||||
|
||||
def print_user(u):
|
||||
print(f"Username: {u['username']}")
|
||||
if "permissions" in u:
|
||||
print("Permissions: " + " ".join(u["permissions"]))
|
||||
if "token" in u:
|
||||
print(f"Token: {u['token']}")
|
||||
|
||||
|
||||
def main():
|
||||
def handle_get(args, client):
|
||||
result = client.get_taskhash(args.method, args.taskhash, all_properties=True)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_get_outhash(args, client):
|
||||
result = client.get_outhash(args.method, args.outhash, args.taskhash)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_stats(args, client):
|
||||
if args.reset:
|
||||
s = client.reset_stats()
|
||||
else:
|
||||
s = client.get_stats()
|
||||
print(json.dumps(s, sort_keys=True, indent=4))
|
||||
pprint.pprint(s)
|
||||
return 0
|
||||
|
||||
def handle_stress(args, client):
|
||||
@@ -82,24 +56,25 @@ def main():
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
@@ -138,114 +113,12 @@ def main():
|
||||
with lock:
|
||||
pbar.update()
|
||||
|
||||
def handle_remove(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
if where:
|
||||
result = client.remove(where)
|
||||
print("Removed %d row(s)" % (result["count"]))
|
||||
else:
|
||||
print("No query specified")
|
||||
|
||||
def handle_clean_unused(args, client):
|
||||
result = client.clean_unused(args.max_age)
|
||||
print("Removed %d rows" % (result["count"]))
|
||||
return 0
|
||||
|
||||
def handle_refresh_token(args, client):
|
||||
r = client.refresh_token(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_set_user_permissions(args, client):
|
||||
r = client.set_user_perms(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_user(args, client):
|
||||
r = client.get_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_all_users(args, client):
|
||||
users = client.get_all_users()
|
||||
print("{username:20}| {permissions}".format(username="Username", permissions="Permissions"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for u in users:
|
||||
print("{username:20}| {permissions}".format(username=u["username"], permissions=" ".join(u["permissions"])))
|
||||
|
||||
def handle_new_user(args, client):
|
||||
r = client.new_user(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_delete_user(args, client):
|
||||
r = client.delete_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_db_usage(args, client):
|
||||
usage = client.get_db_usage()
|
||||
print(usage)
|
||||
tables = sorted(usage.keys())
|
||||
print("{name:20}| {rows:20}".format(name="Table name", rows="Rows"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for t in tables:
|
||||
print("{name:20}| {rows:<20}".format(name=t, rows=usage[t]["rows"]))
|
||||
print()
|
||||
|
||||
total_rows = sum(t["rows"] for t in usage.values())
|
||||
print(f"Total rows: {total_rows}")
|
||||
|
||||
def handle_get_db_query_columns(args, client):
|
||||
columns = client.get_db_query_columns()
|
||||
print("\n".join(sorted(columns)))
|
||||
|
||||
def handle_gc_status(args, client):
|
||||
result = client.gc_status()
|
||||
if not result["mark"]:
|
||||
print("No Garbage collection in progress")
|
||||
return 0
|
||||
|
||||
print("Current Mark: %s" % result["mark"])
|
||||
print("Total hashes to keep: %d" % result["keep"])
|
||||
print("Total hashes to remove: %s" % result["remove"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
result = client.gc_mark(args.mark, where)
|
||||
print("New hashes marked: %d" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_gc_sweep(args, client):
|
||||
result = client.gc_sweep(args.mark)
|
||||
print("Removed %d rows" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_unihash_exists(args, client):
|
||||
result = client.unihash_exists(args.unihash)
|
||||
if args.quiet:
|
||||
return 0 if result else 1
|
||||
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME")
|
||||
parser.add_argument('--password', '-p', metavar="TOKEN", help="Authenticate using token TOKEN")
|
||||
parser.add_argument('--become', '-b', metavar="USERNAME", help="Impersonate user USERNAME (if allowed) when performing actions")
|
||||
parser.add_argument('--no-netrc', '-n', action="store_false", dest="netrc", help="Do not use .netrc")
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
get_parser = subparsers.add_parser('get', help="Get the unihash for a taskhash")
|
||||
get_parser.add_argument("method", help="Method to query")
|
||||
get_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_parser.set_defaults(func=handle_get)
|
||||
|
||||
get_outhash_parser = subparsers.add_parser('get-outhash', help="Get output hash information")
|
||||
get_outhash_parser.add_argument("method", help="Method to query")
|
||||
get_outhash_parser.add_argument("outhash", help="Output hash to query")
|
||||
get_outhash_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_outhash_parser.set_defaults(func=handle_get_outhash)
|
||||
|
||||
stats_parser = subparsers.add_parser('stats', help='Show server stats')
|
||||
stats_parser.add_argument('--reset', action='store_true',
|
||||
help='Reset server stats')
|
||||
@@ -264,64 +137,6 @@ def main():
|
||||
help='Include string in outhash')
|
||||
stress_parser.set_defaults(func=handle_stress)
|
||||
|
||||
remove_parser = subparsers.add_parser('remove', help="Remove hash entries")
|
||||
remove_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Remove entries from table where KEY == VALUE")
|
||||
remove_parser.set_defaults(func=handle_remove)
|
||||
|
||||
clean_unused_parser = subparsers.add_parser('clean-unused', help="Remove unused database entries")
|
||||
clean_unused_parser.add_argument("max_age", metavar="SECONDS", type=int, help="Remove unused entries older than SECONDS old")
|
||||
clean_unused_parser.set_defaults(func=handle_clean_unused)
|
||||
|
||||
refresh_token_parser = subparsers.add_parser('refresh-token', help="Refresh auth token")
|
||||
refresh_token_parser.add_argument("--username", "-u", help="Refresh the token for another user (if authorized)")
|
||||
refresh_token_parser.set_defaults(func=handle_refresh_token)
|
||||
|
||||
set_user_perms_parser = subparsers.add_parser('set-user-perms', help="Set new permissions for user")
|
||||
set_user_perms_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
set_user_perms_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
set_user_perms_parser.set_defaults(func=handle_set_user_permissions)
|
||||
|
||||
get_user_parser = subparsers.add_parser('get-user', help="Get user")
|
||||
get_user_parser.add_argument("--username", "-u", help="Username")
|
||||
get_user_parser.set_defaults(func=handle_get_user)
|
||||
|
||||
get_all_users_parser = subparsers.add_parser('get-all-users', help="List all users")
|
||||
get_all_users_parser.set_defaults(func=handle_get_all_users)
|
||||
|
||||
new_user_parser = subparsers.add_parser('new-user', help="Create new user")
|
||||
new_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
new_user_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
new_user_parser.set_defaults(func=handle_new_user)
|
||||
|
||||
delete_user_parser = subparsers.add_parser('delete-user', help="Delete user")
|
||||
delete_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
delete_user_parser.set_defaults(func=handle_delete_user)
|
||||
|
||||
db_usage_parser = subparsers.add_parser('get-db-usage', help="Database Usage")
|
||||
db_usage_parser.set_defaults(func=handle_get_db_usage)
|
||||
|
||||
db_query_columns_parser = subparsers.add_parser('get-db-query-columns', help="Show columns that can be used in database queries")
|
||||
db_query_columns_parser.set_defaults(func=handle_get_db_query_columns)
|
||||
|
||||
gc_status_parser = subparsers.add_parser("gc-status", help="Show garbage collection status")
|
||||
gc_status_parser.set_defaults(func=handle_gc_status)
|
||||
|
||||
gc_mark_parser = subparsers.add_parser('gc-mark', help="Mark hashes to be kept for garbage collection")
|
||||
gc_mark_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Keep entries in table where KEY == VALUE")
|
||||
gc_mark_parser.set_defaults(func=handle_gc_mark)
|
||||
|
||||
gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked")
|
||||
gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_sweep_parser.set_defaults(func=handle_gc_sweep)
|
||||
|
||||
unihash_exists_parser = subparsers.add_parser('unihash-exists', help="Check if a unihash is known to the server")
|
||||
unihash_exists_parser.add_argument("--quiet", action="store_true", help="Don't print status. Instead, exit with 0 if unihash exists and 1 if it does not")
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
@@ -335,30 +150,11 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
login = args.login
|
||||
password = args.password
|
||||
|
||||
if login is None and args.netrc:
|
||||
try:
|
||||
n = netrc.netrc()
|
||||
auth = n.authenticators(args.address)
|
||||
if auth is not None:
|
||||
login, _, password = auth
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except netrc.NetrcParseError as e:
|
||||
sys.stderr.write(f"Error parsing {e.filename}:{e.lineno}: {e.msg}\n")
|
||||
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
try:
|
||||
with hashserv.create_client(args.address, login, password) as client:
|
||||
if args.become:
|
||||
client.become_user(args.become)
|
||||
return func(args, client)
|
||||
except bb.asyncrpc.InvokeError as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
return func(args, client)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -11,161 +11,56 @@ import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
from hashserv.server import DEFAULT_ANON_PERMS
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DEFAULT_BIND = "unix://./hashserve.sock"
|
||||
DEFAULT_BIND = 'unix://./hashserve.sock'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hash Equivalence Reference Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
epilog="""
|
||||
The bind address may take one of the following formats:
|
||||
unix://PATH - Bind to unix domain socket at PATH
|
||||
ws://ADDRESS:PORT - Bind to websocket on ADDRESS:PORT
|
||||
ADDRESS:PORT - Bind to raw TCP socket on ADDRESS:PORT
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Reference Server. Version=%s' % VERSION,
|
||||
epilog='''The bind address is the path to a unix domain socket if it is
|
||||
prefixed with "unix://". Otherwise, it is an IP address
|
||||
and port in form ADDRESS:PORT. To bind to all addresses, leave
|
||||
the ADDRESS empty, e.g. "--bind :8686". To bind to a specific
|
||||
IPv6 address, enclose the address in "[]", e.g.
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
To bind to all addresses, leave the ADDRESS empty, e.g. "--bind :8686" or
|
||||
"--bind ws://:8686". To bind to a specific IPv6 address, enclose the address in
|
||||
"[]", e.g. "--bind [::1]:8686" or "--bind ws://[::1]:8686"
|
||||
|
||||
Note that the default Anonymous permissions are designed to not break existing
|
||||
server instances when upgrading, but are not particularly secure defaults. If
|
||||
you want to use authentication, it is recommended that you use "--anon-perms
|
||||
@read" to only give anonymous users read access, or "--anon-perms @none" to
|
||||
give un-authenticated users no access at all.
|
||||
|
||||
Setting "--anon-perms @all" or "--anon-perms @user-admin" is not allowed, since
|
||||
this would allow anonymous users to manage all users accounts, which is a bad
|
||||
idea.
|
||||
|
||||
If you are using user authentication, you should run your server in websockets
|
||||
mode with an SSL terminating load balancer in front of it (as this server does
|
||||
not implement SSL). Otherwise all usernames and passwords will be transmitted
|
||||
in the clear. When configured this way, clients can connect using a secure
|
||||
websocket, as in "wss://SERVER:PORT"
|
||||
|
||||
The following permissions are supported by the server:
|
||||
|
||||
@none - No permissions
|
||||
@read - The ability to read equivalent hashes from the server
|
||||
@report - The ability to report equivalent hashes to the server
|
||||
@db-admin - Manage the hash database(s). This includes cleaning the
|
||||
database, removing hashes, etc.
|
||||
@user-admin - The ability to manage user accounts. This includes, creating
|
||||
users, deleting users, resetting login tokens, and assigning
|
||||
permissions.
|
||||
@all - All possible permissions, including any that may be added
|
||||
in the future
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--bind",
|
||||
default=os.environ.get("HASHSERVER_BIND", DEFAULT_BIND),
|
||||
help='Bind address (default $HASHSERVER_BIND, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--database",
|
||||
default=os.environ.get("HASHSERVER_DB", "./hashserv.db"),
|
||||
help='Database file (default $HASHSERVER_DB, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default=os.environ.get("HASHSERVER_LOG_LEVEL", "WARNING"),
|
||||
help='Set logging level (default $HASHSERVER_LOG_LEVEL, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("HASHSERVER_UPSTREAM", None),
|
||||
help="Upstream hashserv to pull hashes from ($HASHSERVER_UPSTREAM)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="Disallow write operations from clients ($HASHSERVER_READ_ONLY)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-username",
|
||||
default=os.environ.get("HASHSERVER_DB_USERNAME", None),
|
||||
help="Database username ($HASHSERVER_DB_USERNAME)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-password",
|
||||
default=os.environ.get("HASHSERVER_DB_PASSWORD", None),
|
||||
help="Database password ($HASHSERVER_DB_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--anon-perms",
|
||||
metavar="PERM[,PERM[,...]]",
|
||||
default=os.environ.get("HASHSERVER_ANON_PERMS", ",".join(DEFAULT_ANON_PERMS)),
|
||||
help='Permissions to give anonymous users (default $HASHSERVER_ANON_PERMS, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-user",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_USER", None),
|
||||
help="Create default admin user with name ADMIN_USER ($HASHSERVER_ADMIN_USER)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-password",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger("hashserv")
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError("Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log)
|
||||
raise ValueError('Invalid log level: %s' % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
read_only = (os.environ.get("HASHSERVER_READ_ONLY", "0") == "1") or args.read_only
|
||||
if "," in args.anon_perms:
|
||||
anon_perms = args.anon_perms.split(",")
|
||||
else:
|
||||
anon_perms = args.anon_perms.split()
|
||||
|
||||
server = hashserv.create_server(
|
||||
args.bind,
|
||||
args.database,
|
||||
upstream=args.upstream,
|
||||
read_only=read_only,
|
||||
db_username=args.db_username,
|
||||
db_password=args.db_password,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
)
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -7,77 +7,49 @@
|
||||
|
||||
import os
|
||||
import sys,logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "1.1.0"
|
||||
__version__="1.0.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRHOST_DEFAULT='0.0.0.0'
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser = optparse.OptionParser(
|
||||
version="Bitbake PR Service Core version %s, %%prog version %s" % (prserv.__version__, __version__),
|
||||
usage = "%prog < --start | --stop > [options]")
|
||||
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--file",
|
||||
default="prserv.sqlite3",
|
||||
help="database filename (default: prserv.sqlite3)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default="prserv.log",
|
||||
help="log filename(default: prserv.log)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
default="INFO",
|
||||
help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--start",
|
||||
action="store_true",
|
||||
help="start daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stop",
|
||||
action="store_true",
|
||||
help="stop daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
help="ip address to bind",
|
||||
default=PRHOST_DEFAULT,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=PRPORT_DEFAULT,
|
||||
help="port number (default: 8585)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_option("-f", "--file", help="database filename(default: prserv.sqlite3)", action="store",
|
||||
dest="dbfile", type="string", default="prserv.sqlite3")
|
||||
parser.add_option("-l", "--log", help="log filename(default: prserv.log)", action="store",
|
||||
dest="logfile", type="string", default="prserv.log")
|
||||
parser.add_option("--loglevel", help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
action = "store", type="string", dest="loglevel", default = "INFO")
|
||||
parser.add_option("--start", help="start daemon",
|
||||
action="store_true", dest="start")
|
||||
parser.add_option("--stop", help="stop daemon",
|
||||
action="store_true", dest="stop")
|
||||
parser.add_option("--host", help="ip address to bind", action="store",
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default: 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
parser.add_option("-r", "--read-only", help="open database in read-only mode",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
prserv.init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(args.file, args.host, args.port, os.path.abspath(args.log), args.read_only)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
if options.start:
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile), options.read_only)
|
||||
elif options.stop:
|
||||
ret=prserv.serv.stop_daemon(options.host, options.port)
|
||||
else:
|
||||
ret=parser.print_help()
|
||||
return ret
|
||||
|
||||
@@ -12,12 +12,11 @@ warnings.simplefilter("default")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 11 or not sys.argv[1].startswith("decafbad"):
|
||||
if len(sys.argv) != 10 or not sys.argv[1].startswith("decafbad"):
|
||||
print("bitbake-server is meant for internal execution by bitbake itself, please don't use it standalone.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -29,8 +28,7 @@ logfile = sys.argv[4]
|
||||
lockname = sys.argv[5]
|
||||
sockname = sys.argv[6]
|
||||
timeout = float(sys.argv[7])
|
||||
profile = bool(int(sys.argv[8]))
|
||||
xmlrpcinterface = (sys.argv[9], int(sys.argv[10]))
|
||||
xmlrpcinterface = (sys.argv[8], int(sys.argv[9]))
|
||||
if xmlrpcinterface[0] == "None":
|
||||
xmlrpcinterface = (None, xmlrpcinterface[1])
|
||||
|
||||
@@ -51,5 +49,5 @@ logger = logging.getLogger("BitBake")
|
||||
handler = bb.event.LogHandler()
|
||||
logger.addHandler(handler)
|
||||
|
||||
bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface, profile)
|
||||
bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface)
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ import subprocess
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
@@ -91,19 +92,19 @@ def worker_fire_prepickled(event):
|
||||
worker_thread_exit = False
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = bytearray()
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int.extend(worker_queue.get(True, 1))
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
@@ -121,10 +122,11 @@ def worker_child_fire(event, d):
|
||||
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
try:
|
||||
with bb.utils.lock_timeout(worker_pipe_lock):
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe_lock.acquire()
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe_lock.release()
|
||||
except IOError:
|
||||
sigterm_handler(None, None)
|
||||
raise
|
||||
@@ -143,17 +145,7 @@ def sigterm_handler(signum, frame):
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
|
||||
fn = runtask['fn']
|
||||
task = runtask['task']
|
||||
taskname = runtask['taskname']
|
||||
taskhash = runtask['taskhash']
|
||||
unihash = runtask['unihash']
|
||||
appends = runtask['appends']
|
||||
layername = runtask['layername']
|
||||
taskdepdata = runtask['taskdepdata']
|
||||
quieterrors = runtask['quieterrors']
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
@@ -165,7 +157,8 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
|
||||
taskdep = runtask['taskdep']
|
||||
|
||||
taskdep = workerdata["taskdeps"][fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
umask = taskdep['umask'][taskname]
|
||||
elif workerdata["umask"]:
|
||||
@@ -177,25 +170,25 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
dry_run = cfg.dry_run or runtask['dry_run']
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (runtask['fakerootenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
|
||||
fakedirs = (runtask['fakerootdirs'] or "").split()
|
||||
fakedirs = (workerdata["fakerootdirs"][fn] or "").split()
|
||||
for p in fakedirs:
|
||||
bb.utils.mkdirhier(p)
|
||||
logger.debug2('Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (runtask['fakerootnoenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
envvars = (workerdata["fakerootnoenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -237,16 +230,15 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
|
||||
# No stdin & stdout
|
||||
# stdout is used as a status report channel and must not be used by child processes.
|
||||
dumbio = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(dumbio, sys.stdin.fileno())
|
||||
os.dup2(dumbio, sys.stdout.fileno())
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
if umask is not None:
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
try:
|
||||
bb_cache = bb.cache.NoCache(databuilder)
|
||||
(realfn, virtual, mc) = bb.cache.virtualfn2realfn(fn)
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
@@ -265,14 +257,13 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
bb.parse.siggen.set_taskhashes(workerdata["newhashes"])
|
||||
ret = 0
|
||||
|
||||
the_data = databuilder.parseRecipe(fn, appends, layername)
|
||||
the_data = bb_cache.loadDataFull(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', taskhash)
|
||||
the_data.setVar('BB_UNIHASH', unihash)
|
||||
bb.parse.siggen.setup_datacache_from_datastore(fn, the_data)
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
|
||||
if not bb.utils.to_boolean(the_data.getVarFlag(taskname, 'network')):
|
||||
if not the_data.getVarFlag(taskname, 'network', False):
|
||||
if bb.utils.is_local_uid(uid):
|
||||
logger.debug("Attempting to disable network for %s" % taskname)
|
||||
bb.utils.disable_network(uid, gid)
|
||||
@@ -307,10 +298,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if not quieterrors:
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
@@ -352,12 +339,12 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
@@ -385,7 +372,7 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
@@ -419,7 +406,7 @@ class BitbakeWorker(object):
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue.extend(r)
|
||||
self.queue = self.queue + r
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
@@ -439,30 +426,18 @@ class BitbakeWorker(object):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
opening_tag = b"<" + item + b">"
|
||||
if not self.queue.startswith(opening_tag):
|
||||
return
|
||||
|
||||
tag_len = len(opening_tag)
|
||||
if len(self.queue) < tag_len + 4:
|
||||
# we need to receive more data
|
||||
return
|
||||
header = self.queue[tag_len:tag_len + 4]
|
||||
payload_len = int.from_bytes(header, 'big')
|
||||
# closing tag has length (tag_len + 1)
|
||||
if len(self.queue) < tag_len * 2 + 1 + payload_len:
|
||||
# we need to receive more data
|
||||
return
|
||||
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
if index != -1:
|
||||
try:
|
||||
func(self.queue[(tag_len + 4):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(b"</") + len(item) + len(b">")):]
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
while index != -1:
|
||||
try:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
@@ -500,15 +475,11 @@ class BitbakeWorker(object):
|
||||
sys.exit(0)
|
||||
|
||||
def handle_runtask(self, data):
|
||||
runtask = pickle.loads(data)
|
||||
|
||||
fn = runtask['fn']
|
||||
task = runtask['task']
|
||||
taskname = runtask['taskname']
|
||||
|
||||
fn, task, taskname, taskhash, unihash, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
|
||||
workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
|
||||
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, self.extraconfigdata, runtask)
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
|
||||
|
||||
|
||||
@@ -24,17 +24,15 @@ warnings.simplefilter("default")
|
||||
version = 1.0
|
||||
|
||||
|
||||
git_cmd = ['git', '-c', 'safe.bareRepository=all']
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(git_cmd + ['rev-parse', '--git-dir']).rstrip()
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(git_cmd + ['fetch', '--unshallow'])
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
@@ -43,21 +41,21 @@ def main():
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(git_cmd + ['rev-list'] + args.revisions).splitlines()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines()
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(git_cmd + ['rev-parse', '{}^@'.format(rev)]):
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(git_cmd + ['fsck', '--unreachable'])
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
@@ -74,12 +72,12 @@ def process_args():
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(git_cmd + ['rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
@@ -97,7 +95,7 @@ def make_shallow(shallow_file, revisions, refs):
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(git_cmd + ['for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
@@ -115,7 +113,7 @@ def filter_refs(refs):
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(['xargs', '-0', '-n', '1'] + git_cmd + ['update-ref', '-d', '--no-deref'],
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
@@ -128,7 +126,7 @@ def follow_history_intersections(revisions, refs):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(git_cmd + ['rev-parse', '%s^@' % rev]).splitlines()
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
@@ -136,12 +134,12 @@ def follow_history_intersections(revisions, refs):
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(git_cmd + ['merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(git_cmd + ['merge-base', parent, ref]).rstrip()
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
@@ -161,14 +159,14 @@ def iter_except(func, exception, start=None):
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(git_cmd + ['reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(git_cmd + ['repack', '-ad'])
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(git_cmd + ['prune', '--expire', 'now'])
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -84,7 +84,7 @@ webserverStartAll()
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/web.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
sleep 1
|
||||
@@ -181,14 +181,6 @@ WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
# ${BUILDDIR}/toaster_logs/ became the default location for toaster logs
|
||||
# This is needed for implemented django-log-viewer: https://pypi.org/project/django-log-viewer/
|
||||
# If the directory does not exist, create it.
|
||||
TOASTER_LOGS_DIR="${BUILDDIR}/toaster_logs/"
|
||||
if [ ! -d $TOASTER_LOGS_DIR ]
|
||||
then
|
||||
mkdir $TOASTER_LOGS_DIR
|
||||
fi
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
@@ -307,7 +299,7 @@ case $CMD in
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/toaster_runbuilds.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
|
||||
@@ -30,23 +30,79 @@ sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib'))
|
||||
|
||||
import bb.cooker
|
||||
from bb.ui import toasterui
|
||||
from bb.ui import eventreplay
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
event_str = json.loads(line)['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
|
||||
def main(argv):
|
||||
with open(argv[-1]) as eventfile:
|
||||
# load variables from the first line
|
||||
variables = None
|
||||
while line := eventfile.readline().strip():
|
||||
try:
|
||||
variables = json.loads(line)['allvariables']
|
||||
break
|
||||
except (KeyError, json.JSONDecodeError):
|
||||
continue
|
||||
if not variables:
|
||||
sys.exit("Cannot find allvariables entry in event log file %s" % argv[-1])
|
||||
eventfile.seek(0)
|
||||
variables = json.loads(eventfile.readline().strip())['allvariables']
|
||||
|
||||
params = namedtuple('ConfigParams', ['observe_only'])(True)
|
||||
player = eventreplay.EventPlayer(eventfile, variables)
|
||||
player = EventPlayer(eventfile, variables)
|
||||
|
||||
return toasterui.main(player, player, params)
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ set cpo&vim
|
||||
|
||||
let s:maxoff = 50 " maximum number of lines to look backwards for ()
|
||||
|
||||
function! GetBBPythonIndent(lnum)
|
||||
function GetPythonIndent(lnum)
|
||||
|
||||
" If this line is explicitly joined: If the previous line was also joined,
|
||||
" line it up with that one, otherwise add two 'shiftwidth'
|
||||
@@ -257,7 +257,7 @@ let b:did_indent = 1
|
||||
setlocal indentkeys+=0\"
|
||||
|
||||
|
||||
function! BitbakeIndent(lnum)
|
||||
function BitbakeIndent(lnum)
|
||||
if !has('syntax_items')
|
||||
return -1
|
||||
endif
|
||||
@@ -315,7 +315,7 @@ function! BitbakeIndent(lnum)
|
||||
endif
|
||||
|
||||
if index(["bbPyDefRegion", "bbPyFuncRegion"], name) != -1
|
||||
let ret = GetBBPythonIndent(a:lnum)
|
||||
let ret = GetPythonIndent(a:lnum)
|
||||
" Should normally always be indented by at least one shiftwidth; but allow
|
||||
" return of -1 (defer to autoindent) or -2 (force indent to 0)
|
||||
if ret == 0
|
||||
|
||||
@@ -63,14 +63,13 @@ syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*
|
||||
|
||||
" Includes and requires
|
||||
syn keyword bbInclude inherit include require contained
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref
|
||||
syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest
|
||||
|
||||
" Add taks and similar
|
||||
syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained
|
||||
syn match bbStatementRest /[^\\]*$/ skipwhite contained contains=bbStatement,bbVarDeref,bbVarPyValue
|
||||
syn region bbStatementRestCont start=/.*\\$/ end=/^[^\\]*$/ contained contains=bbStatement,bbVarDeref,bbVarPyValue,bbContinue keepend
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest,bbStatementRestCont
|
||||
syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
|
||||
" OE Important Functions
|
||||
syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained
|
||||
@@ -123,7 +122,6 @@ hi def link bbPyFlag Type
|
||||
hi def link bbPyDef Statement
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbStatementRestCont Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
hi def link bbOverrideOperator Operator
|
||||
|
||||
@@ -47,8 +47,8 @@ To install all required packages run:
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
$ cd doc
|
||||
$ make html
|
||||
$ cd documentation
|
||||
$ make -f Makefile.sphinx html
|
||||
|
||||
The resulting HTML index page will be _build/html/index.html, and you
|
||||
can browse your own copy of the locally generated documentation with
|
||||
|
||||
9
bitbake/doc/_templates/footer.html
vendored
9
bitbake/doc/_templates/footer.html
vendored
@@ -1,9 +0,0 @@
|
||||
<footer>
|
||||
<hr/>
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright {{ copyright }}
|
||||
<br>Last updated on {{ last_updated }} from the <a href="https://git.openembedded.org/bitbake/">bitbake</a> git repository.
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
@@ -552,8 +552,8 @@ through dependency chains are more complex and are generally
|
||||
accomplished with a Python function. The code in
|
||||
``meta/lib/oe/sstatesig.py`` shows two examples of this and also
|
||||
illustrates how you can insert your own policy into the system if so
|
||||
desired. This file defines the basic signature generator
|
||||
OpenEmbedded-Core uses: "OEBasicHash". By default, there
|
||||
desired. This file defines the two basic signature generators
|
||||
OpenEmbedded-Core uses: "OEBasic" and "OEBasicHash". By default, there
|
||||
is a dummy "noop" signature handler enabled in BitBake. This means that
|
||||
behavior is unchanged from previous versions. ``OE-Core`` uses the
|
||||
"OEBasicHash" signature handler by default through this setting in the
|
||||
@@ -561,13 +561,14 @@ behavior is unchanged from previous versions. ``OE-Core`` uses the
|
||||
|
||||
BB_SIGNATURE_HANDLER ?= "OEBasicHash"
|
||||
|
||||
The main feature of the "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is that
|
||||
it adds the task hash to the stamp files. Thanks to this, any metadata
|
||||
change will change the task hash, automatically causing the task to be run
|
||||
again. This removes the need to bump :term:`PR` values, and changes to
|
||||
metadata automatically ripple across the build.
|
||||
The "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is the same as the "OEBasic"
|
||||
version but adds the task hash to the stamp files. This results in any
|
||||
metadata change that changes the task hash, automatically causing the
|
||||
task to be run again. This removes the need to bump
|
||||
:term:`PR` values, and changes to metadata automatically
|
||||
ripple across the build.
|
||||
|
||||
It is also worth noting that the end result of signature
|
||||
It is also worth noting that the end result of these signature
|
||||
generators is to make some dependency and hash information available to
|
||||
the build. This information includes:
|
||||
|
||||
@@ -586,11 +587,10 @@ or possibly those defined in the metadata/signature handler itself. The
|
||||
simplest parameter to pass is "none", which causes a set of signature
|
||||
information to be written out into ``STAMPS_DIR`` corresponding to the
|
||||
targets specified. The other currently available parameter is
|
||||
"printdiff", which causes BitBake to try to establish the most recent
|
||||
"printdiff", which causes BitBake to try to establish the closest
|
||||
signature match it can (e.g. in the sstate cache) and then run
|
||||
compare the matched signatures to determine the stamps and delta
|
||||
where these two stamp trees diverge. This can be used to determine why
|
||||
tasks need to be re-run in situations where that is not expected.
|
||||
``bitbake-diffsigs`` over the matches to determine the stamps and delta
|
||||
where these two stamp trees diverge.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -657,7 +657,7 @@ builds are when execute, bitbake also supports user defined
|
||||
configuration of the `Python
|
||||
logging <https://docs.python.org/3/library/logging.html>`__ facilities
|
||||
through the :term:`BB_LOGCONFIG` variable. This
|
||||
variable defines a JSON or YAML `logging
|
||||
variable defines a json or yaml `logging
|
||||
configuration <https://docs.python.org/3/library/logging.config.html>`__
|
||||
that will be intelligently merged into the default configuration. The
|
||||
logging configuration is merged using the following rules:
|
||||
@@ -691,9 +691,9 @@ logging configuration is merged using the following rules:
|
||||
adds a filter called ``BitBake.defaultFilter``, both filters will be
|
||||
applied to the logger
|
||||
|
||||
As a first example, you can create a ``hashequiv.json`` user logging
|
||||
configuration file to log all Hash Equivalence related messages of ``VERBOSE``
|
||||
or higher priority to a file called ``hashequiv.log``::
|
||||
As an example, consider the following user logging configuration file
|
||||
which logs all Hash Equivalence related messages of VERBOSE or higher to
|
||||
a file called ``hashequiv.log`` ::
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
@@ -722,40 +722,3 @@ or higher priority to a file called ``hashequiv.log``::
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Then set the :term:`BB_LOGCONFIG` variable in ``conf/local.conf``::
|
||||
|
||||
BB_LOGCONFIG = "hashequiv.json"
|
||||
|
||||
Another example is this ``warn.json`` file to log all ``WARNING`` and
|
||||
higher priority messages to a ``warn.log`` file::
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
"formatters": {
|
||||
"warnlogFormatter": {
|
||||
"()": "bb.msg.BBLogFormatter",
|
||||
"format": "%(levelname)s: %(message)s"
|
||||
}
|
||||
},
|
||||
|
||||
"handlers": {
|
||||
"warnlog": {
|
||||
"class": "logging.FileHandler",
|
||||
"formatter": "warnlogFormatter",
|
||||
"level": "WARNING",
|
||||
"filename": "warn.log"
|
||||
}
|
||||
},
|
||||
|
||||
"loggers": {
|
||||
"BitBake": {
|
||||
"handlers": ["warnlog"]
|
||||
}
|
||||
},
|
||||
|
||||
"@disable_existing_loggers": false
|
||||
}
|
||||
|
||||
Note that BitBake's helper classes for structured logging are implemented in
|
||||
``lib/bb/msg.py``.
|
||||
|
||||
@@ -424,8 +424,8 @@ This fetcher supports the following parameters:
|
||||
|
||||
- *"nobranch":* Tells the fetcher to not check the SHA validation for
|
||||
the branch when set to "1". The default is "0". Set this option for
|
||||
the recipe that refers to the commit that is valid for any namespace
|
||||
(branch, tag, ...) instead of the branch.
|
||||
the recipe that refers to the commit that is valid for a tag instead
|
||||
of the branch.
|
||||
|
||||
- *"bareclone":* Tells the fetcher to clone a bare clone into the
|
||||
destination directory without checking out a working tree. Only the
|
||||
@@ -476,14 +476,6 @@ Here are some example URLs::
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
Using tags with the git fetcher may cause surprising behaviour. Bitbake needs to
|
||||
resolve the tag to a specific revision and to do that, it has to connect to and use
|
||||
the upstream repository. This is because the revision the tags point at can change and
|
||||
we've seen cases of this happening in well known public repositories. This can mean
|
||||
many more network connections than expected and recipes may be reparsed at every build.
|
||||
Source mirrors will also be bypassed as the upstream repository is the only source
|
||||
of truth to resolve the revision accurately. For these reasons, whilst the fetcher
|
||||
can support tags, we recommend being specific about revisions in recipes.
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
@@ -696,41 +688,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _gcp-fetcher:
|
||||
|
||||
GCP Fetcher (``gs://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from a
|
||||
`Google Cloud Storage Bucket <https://cloud.google.com/storage/docs/buckets>`__.
|
||||
It uses the `Google Cloud Storage Python Client <https://cloud.google.com/python/docs/reference/storage/latest>`__
|
||||
to check the status of objects in the bucket and download them.
|
||||
The use of the Python client makes it substantially faster than using command
|
||||
line tools such as gsutil.
|
||||
|
||||
The fetcher requires the Google Cloud Storage Python Client to be installed, along
|
||||
with the gsutil tool.
|
||||
|
||||
The fetcher requires that the machine has valid credentials for accessing the
|
||||
chosen bucket. Instructions for authentication can be found in the
|
||||
`Google Cloud documentation <https://cloud.google.com/docs/authentication/provide-credentials-adc#local-dev>`__.
|
||||
|
||||
If it used from the OpenEmbedded build system, the fetcher can be used for
|
||||
fetching sstate artifacts from a GCS bucket by specifying the
|
||||
``SSTATE_MIRRORS`` variable as shown below::
|
||||
|
||||
SSTATE_MIRRORS ?= "\
|
||||
file://.* gs://<bucket name>/PATH \
|
||||
"
|
||||
|
||||
The fetcher can also be used in recipes::
|
||||
|
||||
SRC_URI = "gs://<bucket name>/<foo_container>/<bar_file>"
|
||||
|
||||
However, the checksum of the file should be also be provided::
|
||||
|
||||
SRC_URI[sha256sum] = "<sha256 string>"
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
@@ -783,7 +740,7 @@ Here is an example URL with both fetchers::
|
||||
"
|
||||
|
||||
See :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/packages.html#creating-node-package-manager-npm-packages>`
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
in the Yocto Project manual for details about using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
to automatically create a recipe from an NPM URL.
|
||||
@@ -820,7 +777,7 @@ the package which has such dependencies, for example::
|
||||
Such a file can automatically be generated using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/packages.html#creating-node-package-manager-npm-packages>`
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
section of the Yocto Project.
|
||||
|
||||
Other Fetchers
|
||||
@@ -834,8 +791,6 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- OSC (``osc://``)
|
||||
|
||||
- S3 (``s3://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
- Secure Shell (``ssh://``)
|
||||
|
||||
@@ -18,32 +18,28 @@ it.
|
||||
Obtaining BitBake
|
||||
=================
|
||||
|
||||
See the :ref:`bitbake-user-manual/bitbake-user-manual-intro:obtaining bitbake` section for
|
||||
See the :ref:`bitbake-user-manual/bitbake-user-manual-hello:obtaining bitbake` section for
|
||||
information on how to obtain BitBake. Once you have the source code on
|
||||
your machine, the BitBake directory appears as follows::
|
||||
|
||||
$ ls -al
|
||||
total 108
|
||||
drwxr-xr-x 9 fawkh 10000 4096 feb 24 12:10 .
|
||||
drwx------ 36 fawkh 10000 4096 mar 2 17:00 ..
|
||||
-rw-r--r-- 1 fawkh 10000 365 feb 24 12:10 AUTHORS
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 bin
|
||||
-rw-r--r-- 1 fawkh 10000 16501 feb 24 12:10 ChangeLog
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 classes
|
||||
drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 conf
|
||||
drwxr-xr-x 5 fawkh 10000 4096 feb 24 12:10 contrib
|
||||
drwxr-xr-x 6 fawkh 10000 4096 feb 24 12:10 doc
|
||||
drwxr-xr-x 8 fawkh 10000 4096 mar 2 16:26 .git
|
||||
-rw-r--r-- 1 fawkh 10000 31 feb 24 12:10 .gitattributes
|
||||
-rw-r--r-- 1 fawkh 10000 392 feb 24 12:10 .gitignore
|
||||
drwxr-xr-x 13 fawkh 10000 4096 feb 24 12:11 lib
|
||||
-rw-r--r-- 1 fawkh 10000 1224 feb 24 12:10 LICENSE
|
||||
-rw-r--r-- 1 fawkh 10000 15394 feb 24 12:10 LICENSE.GPL-2.0-only
|
||||
-rw-r--r-- 1 fawkh 10000 1286 feb 24 12:10 LICENSE.MIT
|
||||
-rw-r--r-- 1 fawkh 10000 229 feb 24 12:10 MANIFEST.in
|
||||
-rw-r--r-- 1 fawkh 10000 2413 feb 24 12:10 README
|
||||
-rw-r--r-- 1 fawkh 10000 43 feb 24 12:10 toaster-requirements.txt
|
||||
-rw-r--r-- 1 fawkh 10000 2887 feb 24 12:10 TODO
|
||||
total 100
|
||||
drwxrwxr-x. 9 wmat wmat 4096 Jan 31 13:44 .
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Feb 4 10:45 ..
|
||||
-rw-rw-r--. 1 wmat wmat 365 Nov 26 04:55 AUTHORS
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 bin
|
||||
drwxrwxr-x. 4 wmat wmat 4096 Jan 31 13:44 build
|
||||
-rw-rw-r--. 1 wmat wmat 16501 Nov 26 04:55 ChangeLog
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 classes
|
||||
drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 conf
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 contrib
|
||||
-rw-rw-r--. 1 wmat wmat 17987 Nov 26 04:55 COPYING
|
||||
drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 doc
|
||||
-rw-rw-r--. 1 wmat wmat 69 Nov 26 04:55 .gitignore
|
||||
-rw-rw-r--. 1 wmat wmat 849 Nov 26 04:55 HEADER
|
||||
drwxrwxr-x. 5 wmat wmat 4096 Jan 31 13:44 lib
|
||||
-rw-rw-r--. 1 wmat wmat 195 Nov 26 04:55 MANIFEST.in
|
||||
-rw-rw-r--. 1 wmat wmat 2887 Nov 26 04:55 TODO
|
||||
|
||||
At this point, you should have BitBake cloned to a directory that
|
||||
matches the previous listing except for dates and user names.
|
||||
@@ -56,7 +52,7 @@ directory to where your local BitBake files are and run the following
|
||||
command::
|
||||
|
||||
$ ./bin/bitbake --version
|
||||
BitBake Build Tool Core version 2.3.1
|
||||
BitBake Build Tool Core version 1.23.0, bitbake version 1.23.0
|
||||
|
||||
The console output tells you what version
|
||||
you are running.
|
||||
@@ -134,8 +130,23 @@ Following is the complete "Hello World" example.
|
||||
directory. Run the ``bitbake`` command and see what it does::
|
||||
|
||||
$ bitbake
|
||||
ERROR: The BBPATH variable is not set and bitbake did not find a conf/bblayers.conf file in the expected location.
|
||||
The BBPATH variable is not set and bitbake did not
|
||||
find a conf/bblayers.conf file in the expected location.
|
||||
Maybe you accidentally invoked bitbake from the wrong directory?
|
||||
DEBUG: Removed the following variables from the environment:
|
||||
GNOME_DESKTOP_SESSION_ID, XDG_CURRENT_DESKTOP,
|
||||
GNOME_KEYRING_CONTROL, DISPLAY, SSH_AGENT_PID, LANG, no_proxy,
|
||||
XDG_SESSION_PATH, XAUTHORITY, SESSION_MANAGER, SHLVL,
|
||||
MANDATORY_PATH, COMPIZ_CONFIG_PROFILE, WINDOWID, EDITOR,
|
||||
GPG_AGENT_INFO, SSH_AUTH_SOCK, GDMSESSION, GNOME_KEYRING_PID,
|
||||
XDG_SEAT_PATH, XDG_CONFIG_DIRS, LESSOPEN, DBUS_SESSION_BUS_ADDRESS,
|
||||
_, XDG_SESSION_COOKIE, DESKTOP_SESSION, LESSCLOSE, DEFAULTS_PATH,
|
||||
UBUNTU_MENUPROXY, OLDPWD, XDG_DATA_DIRS, COLORTERM, LS_COLORS
|
||||
|
||||
The majority of this output is specific to environment variables that
|
||||
are not directly relevant to BitBake. However, the very first
|
||||
message regarding the :term:`BBPATH` variable and the
|
||||
``conf/bblayers.conf`` file is relevant.
|
||||
|
||||
When you run BitBake, it begins looking for metadata files. The
|
||||
:term:`BBPATH` variable is what tells BitBake where
|
||||
@@ -168,14 +179,20 @@ Following is the complete "Hello World" example.
|
||||
``bitbake`` command again::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/__init__.py
|
||||
Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 127, in resolve_file(fn='conf/bitbake.conf', d=<bb.data_smart.DataSmart object at 0x7f22919a3df0>):
|
||||
if not newfn:
|
||||
> raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
FileNotFoundError: [Errno 2] file conf/bitbake.conf not found in <projectdirectory>
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 173, in parse_config_file
|
||||
return bb.parse.handle(fn, data, include)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 99, in handle
|
||||
return h['handle'](fn, data, include)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 120, in handle
|
||||
abs_fn = resolve_file(fn, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 117, in resolve_file
|
||||
raise IOError("file %s not found in %s" % (fn, bbpath))
|
||||
IOError: file conf/bitbake.conf not found in /home/scott-lenovo/hello
|
||||
|
||||
ERROR: Unable to parse conf/bitbake.conf: file conf/bitbake.conf not found in /home/scott-lenovo/hello
|
||||
|
||||
This sample output shows that BitBake could not find the
|
||||
``conf/bitbake.conf`` file in the project directory. This file is
|
||||
@@ -209,12 +226,12 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
Without a value for :term:`PN`, the variables :term:`STAMP`, :term:`T`, and :term:`B`, prevent more
|
||||
than one recipe from working. You can fix this by either setting :term:`PN` to
|
||||
Without a value for PN , the variables STAMP , T , and B , prevent more
|
||||
than one recipe from working. You can fix this by either setting PN to
|
||||
have a value similar to what OpenEmbedded and BitBake use in the default
|
||||
``bitbake.conf`` file (see previous example). Or, by manually updating each
|
||||
recipe to set :term:`PN`. You will also need to include :term:`PN` as part of the :term:`STAMP`,
|
||||
:term:`T`, and :term:`B` variable definitions in the ``local.conf`` file.
|
||||
bitbake.conf file (see previous example). Or, by manually updating each
|
||||
recipe to set PN . You will also need to include PN as part of the STAMP
|
||||
, T , and B variable definitions in the local.conf file.
|
||||
|
||||
The ``TMPDIR`` variable establishes a directory that BitBake uses
|
||||
for build output and intermediate files other than the cached
|
||||
@@ -237,14 +254,18 @@ Following is the complete "Hello World" example.
|
||||
exists, you can run the ``bitbake`` command again::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py
|
||||
Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 67, in inherit(files=['base'], fn='configuration INHERITs', lineno=0, d=<bb.data_smart.DataSmart object at 0x7fab6815edf0>):
|
||||
if not os.path.exists(file):
|
||||
> raise ParseError("Could not inherit file %s" % (file), fn, lineno)
|
||||
|
||||
bb.parse.ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
In the sample output,
|
||||
BitBake could not find the ``classes/base.bbclass`` file. You need
|
||||
@@ -263,10 +284,7 @@ Following is the complete "Hello World" example.
|
||||
$ mkdir classes
|
||||
|
||||
Move to the ``classes`` directory and then create the
|
||||
``base.bbclass`` file by inserting this single line::
|
||||
|
||||
addtask build
|
||||
|
||||
``base.bbclass`` file by inserting this single line: addtask build
|
||||
The minimal task that BitBake runs is the ``do_build`` task. This is
|
||||
all the example needs in order to build the project. Of course, the
|
||||
``base.bbclass`` can have much more depending on which build
|
||||
@@ -310,19 +328,10 @@ Following is the complete "Hello World" example.
|
||||
BBFILES += "${LAYERDIR}/*.bb"
|
||||
BBFILE_COLLECTIONS += "mylayer"
|
||||
BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/"
|
||||
LAYERSERIES_CORENAMES = "hello_world_example"
|
||||
LAYERSERIES_COMPAT_mylayer = "hello_world_example"
|
||||
|
||||
For information on these variables, click on :term:`BBFILES`,
|
||||
:term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS`, :term:`BBFILE_PATTERN_mylayer <BBFILE_PATTERN>`
|
||||
or :term:`LAYERSERIES_COMPAT` to go to the definitions in the glossary.
|
||||
|
||||
.. note::
|
||||
|
||||
We are setting both ``LAYERSERIES_CORENAMES`` and :term:`LAYERSERIES_COMPAT` in this particular case, because we
|
||||
are using bitbake without OpenEmbedded.
|
||||
You should usually just use :term:`LAYERSERIES_COMPAT` to specify the OE-Core versions for which your layer
|
||||
is compatible, and add the meta-openembedded layer to your project.
|
||||
:term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS` or :term:`BBFILE_PATTERN_mylayer <BBFILE_PATTERN>`
|
||||
to go to the definitions in the glossary.
|
||||
|
||||
You need to create the recipe file next. Inside your layer at the
|
||||
top-level, use an editor and create a recipe file named
|
||||
@@ -380,14 +389,12 @@ Following is the complete "Hello World" example.
|
||||
target::
|
||||
|
||||
$ bitbake printhello
|
||||
Loading cache: 100% |
|
||||
Loaded 0 entries from dependency cache.
|
||||
Parsing recipes: 100% |##################################################################################|
|
||||
Time: 00:00:00
|
||||
Parsing of 1 .bb files complete (0 cached, 1 parsed). 1 targets, 0 skipped, 0 masked, 0 errors.
|
||||
NOTE: Resolving any missing task queue dependencies
|
||||
Initialising tasks: 100% |###############################################################################|
|
||||
NOTE: No setscene tasks
|
||||
NOTE: Executing Tasks
|
||||
NOTE: Preparing RunQueue
|
||||
NOTE: Executing RunQueue Tasks
|
||||
********************
|
||||
* *
|
||||
* Hello, World! *
|
||||
|
||||
@@ -319,10 +319,6 @@ The variable ``D`` becomes "dvaladditional data".
|
||||
|
||||
You must control all spacing when you use the override syntax.
|
||||
|
||||
.. note::
|
||||
|
||||
The overrides are applied in this order, ":append", ":prepend", ":remove".
|
||||
|
||||
It is also possible to append and prepend to shell functions and
|
||||
BitBake-style Python functions. See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:shell functions`" and ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:bitbake-style python functions`"
|
||||
sections for examples.
|
||||
@@ -356,28 +352,6 @@ The variable ``FOO`` becomes
|
||||
Like ":append" and ":prepend", ":remove" is applied at variable
|
||||
expansion time.
|
||||
|
||||
.. note::
|
||||
|
||||
The overrides are applied in this order, ":append", ":prepend", ":remove".
|
||||
This implies it is not possible to re-append previously removed strings.
|
||||
However, one can undo a ":remove" by using an intermediate variable whose
|
||||
content is passed to the ":remove" so that modifying the intermediate
|
||||
variable equals to keeping the string in::
|
||||
|
||||
FOOREMOVE = "123 456 789"
|
||||
FOO:remove = "${FOOREMOVE}"
|
||||
...
|
||||
FOOREMOVE = "123 789"
|
||||
|
||||
This expands to ``FOO:remove = "123 789"``.
|
||||
|
||||
.. note::
|
||||
|
||||
Override application order may not match variable parse history, i.e.
|
||||
the output of ``bitbake -e`` may contain ":remove" before ":append",
|
||||
but the result will be removed string, because ":remove" is handled
|
||||
last.
|
||||
|
||||
Override Style Operation Advantages
|
||||
-----------------------------------
|
||||
|
||||
@@ -1496,35 +1470,12 @@ functionality of the task:
|
||||
directory listed is used as the current working directory for the
|
||||
task.
|
||||
|
||||
- ``[file-checksums]``: Controls the file dependencies for a task. The
|
||||
baseline file list is the set of files associated with
|
||||
:term:`SRC_URI`. May be used to set additional dependencies on
|
||||
files not associated with :term:`SRC_URI`.
|
||||
|
||||
The value set to the list is a file-boolean pair where the first
|
||||
value is the file name and the second is whether or not it
|
||||
physically exists on the filesystem. ::
|
||||
|
||||
do_configure[file-checksums] += "${MY_DIRPATH}/my-file.txt:True"
|
||||
|
||||
It is important to record any paths which the task looked at and
|
||||
which didn't exist. This means that if these do exist at a later
|
||||
time, the task can be rerun with the new additional files. The
|
||||
"exists" True or False value after the path allows this to be
|
||||
handled.
|
||||
|
||||
- ``[lockfiles]``: Specifies one or more lockfiles to lock while the
|
||||
task executes. Only one task may hold a lockfile, and any task that
|
||||
attempts to lock an already locked file will block until the lock is
|
||||
released. You can use this variable flag to accomplish mutual
|
||||
exclusion.
|
||||
|
||||
- ``[network]``: When set to "1", allows a task to access the network. By
|
||||
default, only the ``do_fetch`` task is granted network access. Recipes
|
||||
shouldn't access the network outside of ``do_fetch`` as it usually
|
||||
undermines fetcher source mirroring, image and licence manifests, software
|
||||
auditing and supply chain security.
|
||||
|
||||
- ``[noexec]``: When set to "1", marks the task as being empty, with
|
||||
no execution required. You can use the ``[noexec]`` flag to set up
|
||||
tasks as dependency placeholders, or to disable tasks defined
|
||||
@@ -1978,31 +1929,13 @@ looking at the source code of the ``bb`` module, which is in
|
||||
the commonly used functions ``bb.utils.contains()`` and
|
||||
``bb.utils.mkdirhier()``, which come with docstrings.
|
||||
|
||||
Extending Python Library Code
|
||||
-----------------------------
|
||||
|
||||
If you wish to add your own Python library code (e.g. to provide
|
||||
functions/classes you can use from Python functions in the metadata)
|
||||
you can do so from any layer using the ``addpylib`` directive.
|
||||
This directive is typically added to your layer configuration (
|
||||
``conf/layer.conf``) although it will be handled in any ``.conf`` file.
|
||||
|
||||
Usage is of the form::
|
||||
|
||||
addpylib <directory> <namespace>
|
||||
|
||||
Where <directory> specifies the directory to add to the library path.
|
||||
The specified <namespace> is imported automatically, and if the imported
|
||||
module specifies an attribute named ``BBIMPORTS``, that list of
|
||||
sub-modules is iterated and imported too.
|
||||
|
||||
Testing and Debugging BitBake Python code
|
||||
-----------------------------------------
|
||||
|
||||
The OpenEmbedded build system implements a convenient ``pydevshell`` target which
|
||||
you can use to access the BitBake datastore and experiment with your own Python
|
||||
code. See :yocto_docs:`Using a Python Development Shell
|
||||
</dev-manual/python-development-shell.html#using-a-python-development-shell>` in the Yocto
|
||||
</dev-manual/common-tasks.html#using-a-python-development-shell>` in the Yocto
|
||||
Project manual for details.
|
||||
|
||||
Task Checksums and Setscene
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
================
|
||||
Variable Context
|
||||
================
|
||||
|
||||
|
|
||||
|
||||
Variables might only have an impact or can be used in certain contexts. Some
|
||||
should only be used in global files like ``.conf``, while others are intended only
|
||||
for local files like ``.bb``. This chapter aims to describe some important variable
|
||||
contexts.
|
||||
|
||||
.. _ref-varcontext-configuration:
|
||||
|
||||
BitBake's own configuration
|
||||
===========================
|
||||
|
||||
Variables starting with ``BB_`` usually configure the behaviour of BitBake itself.
|
||||
For example, one could configure:
|
||||
|
||||
- System resources, like disk space to be used (:term:`BB_DISKMON_DIRS`),
|
||||
or the number of tasks to be run in parallel by BitBake (:term:`BB_NUMBER_THREADS`).
|
||||
|
||||
- How the fetchers shall behave, e.g., :term:`BB_FETCH_PREMIRRORONLY` is used
|
||||
by BitBake to determine if BitBake's fetcher shall search only
|
||||
:term:`PREMIRRORS` for files.
|
||||
|
||||
Those variables are usually configured globally.
|
||||
|
||||
BitBake configuration
|
||||
=====================
|
||||
|
||||
There are variables:
|
||||
|
||||
- Like :term:`B` or :term:`T`, that are used to specify directories used by
|
||||
BitBake during the build of a particular recipe. Those variables are
|
||||
specified in ``bitbake.conf``. Some, like :term:`B`, are quite often
|
||||
overwritten in recipes.
|
||||
|
||||
- Starting with ``FAKEROOT``, to configure how the ``fakeroot`` command is
|
||||
handled. Those are usually set by ``bitbake.conf`` and might get adapted in a
|
||||
``bbclass``.
|
||||
|
||||
- Detailing where BitBake will store and fetch information from, for
|
||||
data reuse between build runs like :term:`CACHE`, :term:`DL_DIR` or
|
||||
:term:`PERSISTENT_DIR`. Those are usually global.
|
||||
|
||||
|
||||
Layers and files
|
||||
================
|
||||
|
||||
Variables starting with ``LAYER`` configure how BitBake handles layers.
|
||||
Additionally, variables starting with ``BB`` configure how layers and files are
|
||||
handled. For example:
|
||||
|
||||
- :term:`LAYERDEPENDS` is used to configure on which layers a given layer
|
||||
depends.
|
||||
|
||||
- The configured layers are contained in :term:`BBLAYERS` and files in
|
||||
:term:`BBFILES`.
|
||||
|
||||
Those variables are often used in the files ``layer.conf`` and ``bblayers.conf``.
|
||||
|
||||
Recipes and packages
|
||||
====================
|
||||
|
||||
Variables handling recipes and packages can be split into:
|
||||
|
||||
- :term:`PN`, :term:`PV` or :term:`PF` for example, contain information about
|
||||
the name or revision of a recipe or package. Usually, the default set in
|
||||
``bitbake.conf`` is used, but those are from time to time overwritten in
|
||||
recipes.
|
||||
|
||||
- :term:`SUMMARY`, :term:`DESCRIPTION`, :term:`LICENSE` or :term:`HOMEPAGE`
|
||||
contain the expected information and should be set specifically for every
|
||||
recipe.
|
||||
|
||||
- In recipes, variables are also used to control build and runtime
|
||||
dependencies between recipes/packages with other recipes/packages. The
|
||||
most common should be: :term:`PROVIDES`, :term:`RPROVIDES`, :term:`DEPENDS`,
|
||||
and :term:`RDEPENDS`.
|
||||
|
||||
- There are further variables starting with ``SRC`` that specify the sources in
|
||||
a recipe like :term:`SRC_URI` or :term:`SRCDATE`. Those are also usually set
|
||||
in recipes.
|
||||
|
||||
- Which version or provider of a recipe should be given preference when
|
||||
multiple recipes would provide the same item, is controlled by variables
|
||||
starting with ``PREFERRED_``. Those are normally set in the configuration
|
||||
files of a ``MACHINE`` or ``DISTRO``.
|
||||
@@ -40,7 +40,8 @@ overview of their function and contents.
|
||||
Azure Storage Shared Access Signature, when using the
|
||||
:ref:`Azure Storage fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
This variable can be defined to be used by the fetcher to authenticate
|
||||
and gain access to non-public artifacts::
|
||||
and gain access to non-public artifacts.
|
||||
::
|
||||
|
||||
AZ_SAS = ""se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>""
|
||||
|
||||
@@ -99,26 +100,10 @@ overview of their function and contents.
|
||||
the path of the build. BitBake's output should not (and usually does
|
||||
not) depend on the directory in which it was built.
|
||||
|
||||
:term:`BB_CACHEDIR`
|
||||
Specifies the code parser cache directory (distinct from :term:`CACHE`
|
||||
and :term:`PERSISTENT_DIR` although they can be set to the same value
|
||||
if desired). The default value is "${TOPDIR}/cache".
|
||||
|
||||
:term:`BB_CHECK_SSL_CERTS`
|
||||
Specifies if SSL certificates should be checked when fetching. The default
|
||||
value is ``1`` and certificates are not checked if the value is set to ``0``.
|
||||
|
||||
:term:`BB_HASH_CODEPARSER_VALS`
|
||||
Specifies values for variables to use when populating the codeparser cache.
|
||||
This can be used selectively to set dummy values for variables to avoid
|
||||
the codeparser cache growing on every parse. Variables that would typically
|
||||
be included are those where the value is not significant for where the
|
||||
codeparser cache is used (i.e. when calculating variable dependencies for
|
||||
code fragments.) The value is space-separated without quoting values, for
|
||||
example::
|
||||
|
||||
BB_HASH_CODEPARSER_VALS = "T=/ WORKDIR=/ DATE=1234 TIME=1234"
|
||||
|
||||
:term:`BB_CONSOLELOG`
|
||||
Specifies the path to a log file into which BitBake's user interface
|
||||
writes output during the build.
|
||||
@@ -359,14 +344,6 @@ overview of their function and contents.
|
||||
|
||||
For example usage, see :term:`BB_GIT_SHALLOW`.
|
||||
|
||||
:term:`BB_GLOBAL_PYMODULES`
|
||||
Specifies the list of Python modules to place in the global namespace.
|
||||
It is intended that only the core layer should set this and it is meant
|
||||
to be a very small list, typically just ``os`` and ``sys``.
|
||||
:term:`BB_GLOBAL_PYMODULES` is expected to be set before the first
|
||||
``addpylib`` directive.
|
||||
See also ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:extending python library code`".
|
||||
|
||||
:term:`BB_HASHCHECK_FUNCTION`
|
||||
Specifies the name of the function to call during the "setscene" part
|
||||
of the task's execution in order to validate the list of task hashes.
|
||||
@@ -432,15 +409,6 @@ overview of their function and contents.
|
||||
``ConfigParsed`` event can set the variable to trigger the re-parse.
|
||||
You must be careful to avoid recursive loops with this functionality.
|
||||
|
||||
:term:`BB_LOADFACTOR_MAX`
|
||||
Setting this to a value will cause BitBake to check the system load
|
||||
average before executing new tasks. If the load average is above the
|
||||
the number of CPUs multipled by this factor, no new task will be started
|
||||
unless there is no task executing. A value of "1.5" has been found to
|
||||
work reasonably. This is helpful for systems which don't have pressure
|
||||
regulation enabled, which is more granular. Pressure values take
|
||||
precedence over loadfactor.
|
||||
|
||||
:term:`BB_LOGCONFIG`
|
||||
Specifies the name of a config file that contains the user logging
|
||||
configuration. See
|
||||
@@ -572,7 +540,7 @@ overview of their function and contents.
|
||||
:term:`BB_RUNFMT` variable is undefined and the run filenames get
|
||||
created using the following form::
|
||||
|
||||
run.{func}.{pid}
|
||||
run.{task}.{pid}
|
||||
|
||||
If you want to force run files to take a specific name, you can set this
|
||||
variable in a configuration file.
|
||||
@@ -929,9 +897,9 @@ overview of their function and contents.
|
||||
section.
|
||||
|
||||
:term:`BBPATH`
|
||||
A colon-separated list used by BitBake to locate class (``.bbclass``)
|
||||
and configuration (``.conf``) files. This variable is analogous to the
|
||||
``PATH`` variable.
|
||||
Used by BitBake to locate class (``.bbclass``) and configuration
|
||||
(``.conf``) files. This variable is analogous to the ``PATH``
|
||||
variable.
|
||||
|
||||
If you run BitBake from a directory outside of the build directory,
|
||||
you must be sure to set :term:`BBPATH` to point to the build directory.
|
||||
@@ -1023,7 +991,7 @@ overview of their function and contents.
|
||||
``bblayers.conf`` configuration file.
|
||||
|
||||
To exclude a recipe from a world build using this variable, set the
|
||||
variable to "1" in the recipe. Set it to "0" to add it back to world build.
|
||||
variable to "1" in the recipe.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -1081,11 +1049,6 @@ overview of their function and contents.
|
||||
environment variable. The value is a colon-separated list of
|
||||
directories that are searched left-to-right in order.
|
||||
|
||||
:term:`FILE_LAYERNAME`
|
||||
During parsing and task execution, this is set to the name of the
|
||||
layer containing the recipe file. Code can use this to identify which
|
||||
layer a recipe is from.
|
||||
|
||||
:term:`GITDIR`
|
||||
The directory in which a local copy of a Git repository is stored
|
||||
when it is cloned.
|
||||
@@ -1134,29 +1097,6 @@ overview of their function and contents.
|
||||
variable is not available outside of ``layer.conf`` and references
|
||||
are expanded immediately when parsing of the file completes.
|
||||
|
||||
:term:`LAYERSERIES_COMPAT`
|
||||
Lists the versions of the OpenEmbedded-Core (OE-Core) for which
|
||||
a layer is compatible. Using the :term:`LAYERSERIES_COMPAT` variable
|
||||
allows the layer maintainer to indicate which combinations of the
|
||||
layer and OE-Core can be expected to work. The variable gives the
|
||||
system a way to detect when a layer has not been tested with new
|
||||
releases of OE-Core (e.g. the layer is not maintained).
|
||||
|
||||
To specify the OE-Core versions for which a layer is compatible, use
|
||||
this variable in your layer's ``conf/layer.conf`` configuration file.
|
||||
For the list, use the Yocto Project release name (e.g. "kirkstone",
|
||||
"mickledore"). To specify multiple OE-Core versions for the layer, use
|
||||
a space-separated list::
|
||||
|
||||
LAYERSERIES_COMPAT_layer_root_name = "kirkstone mickledore"
|
||||
|
||||
.. note::
|
||||
|
||||
Setting :term:`LAYERSERIES_COMPAT` is required by the Yocto Project
|
||||
Compatible version 2 standard.
|
||||
The OpenEmbedded build system produces a warning if the variable
|
||||
is not set for any given layer.
|
||||
|
||||
:term:`LAYERVERSION`
|
||||
Optionally specifies the version of a layer as a single number. You
|
||||
can use this variable within
|
||||
@@ -1179,8 +1119,8 @@ overview of their function and contents.
|
||||
order.
|
||||
|
||||
:term:`OVERRIDES`
|
||||
A colon-separated list that BitBake uses to control what variables are
|
||||
overridden after BitBake parses recipes and configuration files.
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
|
||||
Following is a simple example that uses an overrides list based on
|
||||
machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can
|
||||
|
||||
@@ -13,7 +13,6 @@ BitBake User Manual
|
||||
bitbake-user-manual/bitbake-user-manual-intro
|
||||
bitbake-user-manual/bitbake-user-manual-execution
|
||||
bitbake-user-manual/bitbake-user-manual-metadata
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables-context
|
||||
bitbake-user-manual/bitbake-user-manual-fetching
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables
|
||||
bitbake-user-manual/bitbake-user-manual-hello
|
||||
|
||||
@@ -1,63 +1,61 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
=================================
|
||||
BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
|
||||
================================
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.2 User Manual </bitbake/2.2/>`
|
||||
===========================
|
||||
Supported Release Manuals
|
||||
===========================
|
||||
|
||||
******************************
|
||||
Release Series 3.4 (honister)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.52 User Manual </bitbake/1.52/>`
|
||||
- :yocto_docs:`3.4 BitBake User Manual </3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.1 BitBake User Manual </3.4.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.4.2 BitBake User Manual </3.4.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
******************************
|
||||
Release Series 3.3 (hardknott)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.50 User Manual </bitbake/1.50/>`
|
||||
- :yocto_docs:`3.3 BitBake User Manual </3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.1 BitBake User Manual </3.3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.2 BitBake User Manual </3.3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.3 BitBake User Manual </3.3.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.4 BitBake User Manual </3.3.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.3.5 BitBake User Manual </3.3.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*******************************
|
||||
Release Series 3.2 (gatesgarth)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.48 User Manual </bitbake/1.48/>`
|
||||
|
||||
*******************************************
|
||||
Release Series 3.1 (dunfell first versions)
|
||||
*******************************************
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`3.1 BitBake User Manual </3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.1 BitBake User Manual </3.1.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.2 BitBake User Manual </3.1.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.3 BitBake User Manual </3.1.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.4 BitBake User Manual </3.1.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.5 BitBake User Manual </3.1.5/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.6 BitBake User Manual </3.1.6/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.7 BitBake User Manual </3.1.7/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.8 BitBake User Manual </3.1.8/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.9 BitBake User Manual </3.1.9/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.10 BitBake User Manual </3.1.10/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.11 BitBake User Manual </3.1.11/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.12 BitBake User Manual </3.1.12/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.13 BitBake User Manual </3.1.13/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.14 BitBake User Manual </3.1.14/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
==========================
|
||||
Outdated Release Manuals
|
||||
==========================
|
||||
|
||||
*******************************
|
||||
Release Series 3.2 (gatesgarth)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`3.2 BitBake User Manual </3.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.1 BitBake User Manual </3.2.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.2 BitBake User Manual </3.2.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.3 BitBake User Manual </3.2.3/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.2.4 BitBake User Manual </3.2.4/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
*************************
|
||||
Release Series 3.0 (zeus)
|
||||
|
||||
@@ -9,19 +9,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.8.0"
|
||||
__version__ = "2.2.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise RuntimeError("Sorry, python 3.8.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (3, 6, 0):
|
||||
raise RuntimeError("Sorry, python 3.6.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
# https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
|
||||
# https://bugs.ams1.psf.io/issue42888
|
||||
# so ensure libgcc_s is loaded early on
|
||||
import ctypes
|
||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||
|
||||
class BBHandledException(Exception):
|
||||
"""
|
||||
@@ -36,7 +29,6 @@ class BBHandledException(Exception):
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
@@ -228,14 +220,3 @@ def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
|
||||
setattr(sys.modules[current], newname, newobj)
|
||||
|
||||
TaskData = namedtuple("TaskData", [
|
||||
"pn",
|
||||
"taskname",
|
||||
"fn",
|
||||
"deps",
|
||||
"provides",
|
||||
"taskhash",
|
||||
"unihash",
|
||||
"hashfn",
|
||||
"taskhash_deps",
|
||||
])
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
libacl = ctypes.CDLL("libacl.so.1", use_errno=True)
|
||||
|
||||
|
||||
ACL_TYPE_ACCESS = 0x8000
|
||||
ACL_TYPE_DEFAULT = 0x4000
|
||||
|
||||
ACL_FIRST_ENTRY = 0
|
||||
ACL_NEXT_ENTRY = 1
|
||||
|
||||
ACL_UNDEFINED_TAG = 0x00
|
||||
ACL_USER_OBJ = 0x01
|
||||
ACL_USER = 0x02
|
||||
ACL_GROUP_OBJ = 0x04
|
||||
ACL_GROUP = 0x08
|
||||
ACL_MASK = 0x10
|
||||
ACL_OTHER = 0x20
|
||||
|
||||
ACL_READ = 0x04
|
||||
ACL_WRITE = 0x02
|
||||
ACL_EXECUTE = 0x01
|
||||
|
||||
acl_t = ctypes.c_void_p
|
||||
acl_entry_t = ctypes.c_void_p
|
||||
acl_permset_t = ctypes.c_void_p
|
||||
acl_perm_t = ctypes.c_uint
|
||||
|
||||
acl_tag_t = ctypes.c_int
|
||||
|
||||
libacl.acl_free.argtypes = [acl_t]
|
||||
|
||||
|
||||
def acl_free(acl):
|
||||
libacl.acl_free(acl)
|
||||
|
||||
|
||||
libacl.acl_get_file.restype = acl_t
|
||||
libacl.acl_get_file.argtypes = [ctypes.c_char_p, ctypes.c_uint]
|
||||
|
||||
|
||||
def acl_get_file(path, typ):
|
||||
acl = libacl.acl_get_file(os.fsencode(path), typ)
|
||||
if acl is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
return acl
|
||||
|
||||
|
||||
libacl.acl_get_entry.argtypes = [acl_t, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_entry(acl, entry_id):
|
||||
entry = acl_entry_t()
|
||||
ret = libacl.acl_get_entry(acl, entry_id, ctypes.byref(entry))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
if ret == 0:
|
||||
return None
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
libacl.acl_get_tag_type.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_tag_type(entry_d):
|
||||
tag = acl_tag_t()
|
||||
ret = libacl.acl_get_tag_type(entry_d, ctypes.byref(tag))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return tag.value
|
||||
|
||||
|
||||
libacl.acl_get_qualifier.restype = ctypes.c_void_p
|
||||
libacl.acl_get_qualifier.argtypes = [acl_entry_t]
|
||||
|
||||
|
||||
def acl_get_qualifier(entry_d):
|
||||
ret = libacl.acl_get_qualifier(entry_d)
|
||||
if ret is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return ctypes.c_void_p(ret)
|
||||
|
||||
|
||||
libacl.acl_get_permset.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_permset(entry_d):
|
||||
permset = acl_permset_t()
|
||||
ret = libacl.acl_get_permset(entry_d, ctypes.byref(permset))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
return permset
|
||||
|
||||
|
||||
libacl.acl_get_perm.argtypes = [acl_permset_t, acl_perm_t]
|
||||
|
||||
|
||||
def acl_get_perm(permset_d, perm):
|
||||
ret = libacl.acl_get_perm(permset_d, perm)
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return bool(ret)
|
||||
|
||||
|
||||
class Entry(object):
|
||||
def __init__(self, tag, qualifier, mode):
|
||||
self.tag = tag
|
||||
self.qualifier = qualifier
|
||||
self.mode = mode
|
||||
|
||||
def __str__(self):
|
||||
typ = ""
|
||||
qual = ""
|
||||
if self.tag == ACL_USER:
|
||||
typ = "user"
|
||||
qual = pwd.getpwuid(self.qualifier).pw_name
|
||||
elif self.tag == ACL_GROUP:
|
||||
typ = "group"
|
||||
qual = grp.getgrgid(self.qualifier).gr_name
|
||||
elif self.tag == ACL_USER_OBJ:
|
||||
typ = "user"
|
||||
elif self.tag == ACL_GROUP_OBJ:
|
||||
typ = "group"
|
||||
elif self.tag == ACL_MASK:
|
||||
typ = "mask"
|
||||
elif self.tag == ACL_OTHER:
|
||||
typ = "other"
|
||||
|
||||
r = "r" if self.mode & ACL_READ else "-"
|
||||
w = "w" if self.mode & ACL_WRITE else "-"
|
||||
x = "x" if self.mode & ACL_EXECUTE else "-"
|
||||
|
||||
return f"{typ}:{qual}:{r}{w}{x}"
|
||||
|
||||
|
||||
class ACL(object):
|
||||
def __init__(self, acl):
|
||||
self.acl = acl
|
||||
|
||||
def __del__(self):
|
||||
acl_free(self.acl)
|
||||
|
||||
def entries(self):
|
||||
entry_id = ACL_FIRST_ENTRY
|
||||
while True:
|
||||
entry = acl_get_entry(self.acl, entry_id)
|
||||
if entry is None:
|
||||
break
|
||||
|
||||
permset = acl_get_permset(entry)
|
||||
|
||||
mode = 0
|
||||
for m in (ACL_READ, ACL_WRITE, ACL_EXECUTE):
|
||||
if acl_get_perm(permset, m):
|
||||
mode |= m
|
||||
|
||||
qualifier = None
|
||||
tag = acl_get_tag_type(entry)
|
||||
|
||||
if tag == ACL_USER or tag == ACL_GROUP:
|
||||
qual = acl_get_qualifier(entry)
|
||||
qualifier = ctypes.cast(qual, ctypes.POINTER(ctypes.c_int))[0]
|
||||
|
||||
yield Entry(tag, qualifier, mode)
|
||||
|
||||
entry_id = ACL_NEXT_ENTRY
|
||||
|
||||
@classmethod
|
||||
def from_path(cls, path, typ):
|
||||
acl = acl_get_file(path, typ)
|
||||
return cls(acl)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import pwd
|
||||
import grp
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
acl = ACL.from_path(args.path, ACL_TYPE_ACCESS)
|
||||
for entry in acl.entries():
|
||||
print(str(entry))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -4,13 +4,30 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from .client import AsyncClient, Client, ClientPool
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
ClientError,
|
||||
ServerError,
|
||||
ConnectionClosedError,
|
||||
InvokeError,
|
||||
)
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection, ClientError, ServerError
|
||||
|
||||
@@ -10,59 +10,22 @@ import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import re
|
||||
import contextlib
|
||||
from threading import Thread
|
||||
from .connection import StreamConnection, WebsocketConnection, DEFAULT_MAX_CHUNK
|
||||
from .exceptions import ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
UNIX_PREFIX = "unix://"
|
||||
WS_PREFIX = "ws://"
|
||||
WSS_PREFIX = "wss://"
|
||||
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
elif addr.startswith(WS_PREFIX) or addr.startswith(WSS_PREFIX):
|
||||
return (ADDR_TYPE_WS, (addr,))
|
||||
else:
|
||||
m = re.match(r"\[(?P<host>[^\]]*)\]:(?P<port>\d+)$", addr)
|
||||
if m is not None:
|
||||
host = m.group("host")
|
||||
port = m.group("port")
|
||||
else:
|
||||
host, port = addr.split(":")
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
proto_name,
|
||||
proto_version,
|
||||
logger,
|
||||
timeout=30,
|
||||
server_headers=False,
|
||||
headers={},
|
||||
):
|
||||
self.socket = None
|
||||
def __init__(self, proto_name, proto_version, logger, timeout=30):
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.proto_name = proto_name
|
||||
self.proto_version = proto_version
|
||||
self.logger = logger
|
||||
self.timeout = timeout
|
||||
self.needs_server_headers = server_headers
|
||||
self.server_headers = {}
|
||||
self.headers = headers
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
reader, writer = await asyncio.open_connection(address, port)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
return await asyncio.open_connection(address, port)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
@@ -77,59 +40,27 @@ class AsyncClient(object):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
reader, writer = await asyncio.open_unix_connection(sock=sock)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
async def connect_sock():
|
||||
websocket = await websockets.connect(uri, ping_interval=None)
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
os.chdir(cwd)
|
||||
return await asyncio.open_unix_connection(sock=sock)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def setup_connection(self):
|
||||
# Send headers
|
||||
await self.socket.send("%s %s" % (self.proto_name, self.proto_version))
|
||||
await self.socket.send(
|
||||
"needs-headers: %s" % ("true" if self.needs_server_headers else "false")
|
||||
)
|
||||
for k, v in self.headers.items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
|
||||
# End of headers
|
||||
await self.socket.send("")
|
||||
|
||||
self.server_headers = {}
|
||||
if self.needs_server_headers:
|
||||
while True:
|
||||
line = await self.socket.recv()
|
||||
if not line:
|
||||
# End headers
|
||||
break
|
||||
tag, value = line.split(":", 1)
|
||||
self.server_headers[tag.lower()] = value.strip()
|
||||
|
||||
async def get_header(self, tag, default):
|
||||
await self.connect()
|
||||
return self.server_headers.get(tag, default)
|
||||
s = '%s %s\n\n' % (self.proto_name, self.proto_version)
|
||||
self.writer.write(s.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def connect(self):
|
||||
if self.socket is None:
|
||||
self.socket = await self._connect_sock()
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
await self.setup_connection()
|
||||
|
||||
async def disconnect(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
async def close(self):
|
||||
await self.disconnect()
|
||||
self.reader = None
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
@@ -140,7 +71,6 @@ class AsyncClient(object):
|
||||
except (
|
||||
OSError,
|
||||
ConnectionError,
|
||||
ConnectionClosedError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
@@ -152,27 +82,49 @@ class AsyncClient(object):
|
||||
await self.close()
|
||||
count += 1
|
||||
|
||||
def check_invoke_error(self, msg):
|
||||
if isinstance(msg, dict) and "invoke-error" in msg:
|
||||
raise InvokeError(msg["invoke-error"]["message"])
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for server")
|
||||
|
||||
if not line:
|
||||
raise ConnectionError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line
|
||||
|
||||
async def invoke(self, msg):
|
||||
async def proc():
|
||||
await self.socket.send_message(msg)
|
||||
return await self.socket.recv_message()
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
result = await self._send_wrapper(proc)
|
||||
self.check_invoke_error(result)
|
||||
return result
|
||||
l = await get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def ping(self):
|
||||
return await self.invoke({"ping": {}})
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
return await self.send_message(
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
|
||||
class Client(object):
|
||||
@@ -190,7 +142,7 @@ class Client(object):
|
||||
# required (but harmless) with it.
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self._add_methods("connect_tcp", "ping")
|
||||
self._add_methods('connect_tcp', 'ping')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_async_client(self):
|
||||
@@ -219,95 +171,8 @@ class Client(object):
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
|
||||
def disconnect(self):
|
||||
def close(self):
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
|
||||
class ClientPool(object):
|
||||
def __init__(self, max_clients):
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
self.max_clients = max_clients
|
||||
self.loop = None
|
||||
self.client_condition = None
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _new_client(self):
|
||||
raise NotImplementedError("Must be implemented in derived class")
|
||||
|
||||
def close(self):
|
||||
if self.client_condition:
|
||||
self.client_condition = None
|
||||
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.__close_clients())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def run_tasks(self, tasks):
|
||||
if not self.loop:
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
thread = Thread(target=self.__thread_main, args=(tasks,))
|
||||
thread.start()
|
||||
thread.join()
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def get_client(self):
|
||||
async with self.client_condition:
|
||||
if self.avail_clients:
|
||||
client = self.avail_clients.pop()
|
||||
elif self.num_clients < self.max_clients:
|
||||
self.num_clients += 1
|
||||
client = await self._new_client()
|
||||
else:
|
||||
while not self.avail_clients:
|
||||
await self.client_condition.wait()
|
||||
client = self.avail_clients.pop()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
async with self.client_condition:
|
||||
self.avail_clients.append(client)
|
||||
self.client_condition.notify()
|
||||
|
||||
def __thread_main(self, tasks):
|
||||
async def process_task(task):
|
||||
async with self.get_client() as client:
|
||||
await task(client)
|
||||
|
||||
asyncio.set_event_loop(self.loop)
|
||||
if not self.client_condition:
|
||||
self.client_condition = asyncio.Condition()
|
||||
tasks = [process_task(t) for t in tasks]
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
|
||||
async def __close_clients(self):
|
||||
for c in self.avail_clients:
|
||||
await c.close()
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
self.loop.close()
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
from datetime import datetime
|
||||
from .exceptions import ClientError, ConnectionClosedError
|
||||
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield "".join((msg, "\n"))
|
||||
else:
|
||||
yield "".join((json.dumps({"chunk-stream": None}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map("".join, itertools.zip_longest(*args, fillvalue="")):
|
||||
yield "".join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def json_serialize(obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
raise TypeError("Type %s not serializeable" % type(obj))
|
||||
|
||||
|
||||
class StreamConnection(object):
|
||||
def __init__(self, reader, writer, timeout, max_chunk=DEFAULT_MAX_CHUNK):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.timeout = timeout
|
||||
self.max_chunk = max_chunk
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self.writer.get_extra_info("peername")
|
||||
|
||||
async def send_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg, default=json_serialize), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv_message(self):
|
||||
l = await self.recv()
|
||||
|
||||
m = json.loads(l)
|
||||
if not m:
|
||||
return m
|
||||
|
||||
if "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = await self.recv()
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
async def send(self, msg):
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv(self):
|
||||
if self.timeout < 0:
|
||||
line = await self.reader.readline()
|
||||
else:
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
|
||||
if not line:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line.rstrip()
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
|
||||
class WebsocketConnection(object):
|
||||
def __init__(self, socket, timeout):
|
||||
self.socket = socket
|
||||
self.timeout = timeout
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return ":".join(str(s) for s in self.socket.remote_address)
|
||||
|
||||
async def send_message(self, msg):
|
||||
await self.send(json.dumps(msg, default=json_serialize))
|
||||
|
||||
async def recv_message(self):
|
||||
m = await self.recv()
|
||||
return json.loads(m)
|
||||
|
||||
async def send(self, msg):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
await self.socket.send(msg)
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def recv(self):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
if self.timeout < 0:
|
||||
return await self.socket.recv()
|
||||
|
||||
try:
|
||||
return await asyncio.wait_for(self.socket.recv(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def close(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
@@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvokeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionClosedError(Exception):
|
||||
pass
|
||||
@@ -12,333 +12,241 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import multiprocessing
|
||||
import logging
|
||||
from .connection import StreamConnection, WebsocketConnection
|
||||
from .exceptions import ClientError, ServerError, ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class ClientLoggerAdapter(logging.LoggerAdapter):
|
||||
def process(self, msg, kwargs):
|
||||
return f"[Client {self.extra['address']}] {msg}", kwargs
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncServerConnection(object):
|
||||
# If a handler returns this object (e.g. `return self.NO_RESPONSE`), no
|
||||
# return message will be automatically be sent back to the client
|
||||
NO_RESPONSE = object()
|
||||
|
||||
def __init__(self, socket, proto_name, logger):
|
||||
self.socket = socket
|
||||
def __init__(self, reader, writer, proto_name, logger):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.proto_name = proto_name
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.handlers = {
|
||||
"ping": self.handle_ping,
|
||||
'chunk-stream': self.handle_chunk,
|
||||
'ping': self.handle_ping,
|
||||
}
|
||||
self.logger = ClientLoggerAdapter(
|
||||
logger,
|
||||
{
|
||||
"address": socket.address,
|
||||
},
|
||||
)
|
||||
self.client_headers = {}
|
||||
|
||||
async def close(self):
|
||||
await self.socket.close()
|
||||
|
||||
async def handle_headers(self, headers):
|
||||
return {}
|
||||
self.logger = logger
|
||||
|
||||
async def process_requests(self):
|
||||
try:
|
||||
self.logger.info("Client %r connected" % (self.socket.address,))
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
self.logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
# Read protocol and version
|
||||
client_protocol = await self.socket.recv()
|
||||
client_protocol = await self.reader.readline()
|
||||
if not client_protocol:
|
||||
return
|
||||
|
||||
(client_proto_name, client_proto_version) = client_protocol.split()
|
||||
(client_proto_name, client_proto_version) = client_protocol.decode('utf-8').rstrip().split()
|
||||
if client_proto_name != self.proto_name:
|
||||
self.logger.debug("Rejecting invalid protocol %s" % (self.proto_name))
|
||||
self.logger.debug('Rejecting invalid protocol %s' % (self.proto_name))
|
||||
return
|
||||
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split("."))
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split('.'))
|
||||
if not self.validate_proto_version():
|
||||
self.logger.debug(
|
||||
"Rejecting invalid protocol version %s" % (client_proto_version)
|
||||
)
|
||||
self.logger.debug('Rejecting invalid protocol version %s' % (client_proto_version))
|
||||
return
|
||||
|
||||
# Read headers
|
||||
self.client_headers = {}
|
||||
# Read headers. Currently, no headers are implemented, so look for
|
||||
# an empty line to signal the end of the headers
|
||||
while True:
|
||||
header = await self.socket.recv()
|
||||
if not header:
|
||||
# Empty line. End of headers
|
||||
break
|
||||
tag, value = header.split(":", 1)
|
||||
self.client_headers[tag.lower()] = value.strip()
|
||||
line = await self.reader.readline()
|
||||
if not line:
|
||||
return
|
||||
|
||||
if self.client_headers.get("needs-headers", "false") == "true":
|
||||
for k, v in (await self.handle_headers(self.client_headers)).items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
await self.socket.send("")
|
||||
line = line.decode('utf-8').rstrip()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Handle messages
|
||||
while True:
|
||||
d = await self.socket.recv_message()
|
||||
d = await self.read_message()
|
||||
if d is None:
|
||||
break
|
||||
try:
|
||||
response = await self.dispatch_message(d)
|
||||
except InvokeError as e:
|
||||
await self.socket.send_message(
|
||||
{"invoke-error": {"message": str(e)}}
|
||||
)
|
||||
break
|
||||
|
||||
if response is not self.NO_RESPONSE:
|
||||
await self.socket.send_message(response)
|
||||
|
||||
except ConnectionClosedError as e:
|
||||
self.logger.info(str(e))
|
||||
except (ClientError, ConnectionError) as e:
|
||||
await self.dispatch_message(d)
|
||||
await self.writer.drain()
|
||||
except ClientError as e:
|
||||
self.logger.error(str(e))
|
||||
finally:
|
||||
await self.close()
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug("Handling %s" % k)
|
||||
return await self.handlers[k](msg[k])
|
||||
self.logger.debug('Handling %s' % k)
|
||||
await self.handlers[k](msg[k])
|
||||
return
|
||||
|
||||
raise ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
async def handle_ping(self, request):
|
||||
return {"alive": True}
|
||||
def write_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode('utf-8'))
|
||||
|
||||
async def read_message(self):
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return None
|
||||
|
||||
class StreamServer(object):
|
||||
def __init__(self, handler, logger):
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.closed = False
|
||||
|
||||
async def handle_stream_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
socket = StreamConnection(reader, writer, -1)
|
||||
if self.closed:
|
||||
await socket.close()
|
||||
return
|
||||
|
||||
await self.handler(socket)
|
||||
|
||||
async def stop(self):
|
||||
self.closed = True
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_stream_client, self.host, self.port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
|
||||
class UnixStreamServer(StreamServer):
|
||||
def __init__(self, path, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.path = path
|
||||
|
||||
def start(self, loop):
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(self.path))
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_unix_server(
|
||||
self.handle_stream_client, os.path.basename(self.path)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
message = l.decode('utf-8')
|
||||
|
||||
self.logger.debug("Listening on %r" % self.path)
|
||||
self.address = "unix://%s" % os.path.abspath(self.path)
|
||||
return [self.server.wait_closed()]
|
||||
if not message.endswith('\n'):
|
||||
return None
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
return json.loads(message)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % message)
|
||||
raise e
|
||||
|
||||
def cleanup(self):
|
||||
os.unlink(self.path)
|
||||
async def handle_chunk(self, request):
|
||||
lines = []
|
||||
try:
|
||||
while True:
|
||||
l = await self.reader.readline()
|
||||
l = l.rstrip(b"\n").decode("utf-8")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
msg = json.loads(''.join(lines))
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % lines)
|
||||
raise e
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
if 'chunk-stream' in msg:
|
||||
raise ClientError("Nested chunks are not allowed")
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
await self.dispatch_message(msg)
|
||||
|
||||
self.server = loop.run_until_complete(
|
||||
websockets.server.serve(
|
||||
self.client_handler,
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "ws://[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "ws://%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
async def client_handler(self, websocket):
|
||||
socket = WebsocketConnection(websocket, -1)
|
||||
await self.handler(socket)
|
||||
async def handle_ping(self, request):
|
||||
response = {'alive': True}
|
||||
self.write_message(response)
|
||||
|
||||
|
||||
class AsyncServer(object):
|
||||
def __init__(self, logger):
|
||||
self._cleanup_socket = None
|
||||
self.logger = logger
|
||||
self.start = None
|
||||
self.address = None
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port):
|
||||
self.server = TCPStreamServer(host, port, self._client_handler, self.logger)
|
||||
def start_tcp():
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug('Listening on %r' % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
self.start = start_tcp
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
def cleanup():
|
||||
os.unlink(path)
|
||||
|
||||
def start_websocket_server(self, host, port):
|
||||
self.server = WebsocketsServer(host, port, self._client_handler, self.logger)
|
||||
def start_unix():
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
self.logger.debug('Listening on %r' % path)
|
||||
|
||||
self._cleanup_socket = cleanup
|
||||
self.address = "unix://%s" % os.path.abspath(path)
|
||||
|
||||
self.start = start_unix
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, reader, writer):
|
||||
pass
|
||||
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = self.accept_client(socket)
|
||||
client = self.accept_client(reader, writer)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
self.logger.error(
|
||||
"Error from client %s: %s" % (address, str(e)), exc_info=True
|
||||
)
|
||||
self.logger.error('Error from client: %s' % str(e), exc_info=True)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
self.logger.debug("Client %s disconnected", address)
|
||||
await socket.close()
|
||||
writer.close()
|
||||
self.logger.debug('Client disconnected')
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, socket):
|
||||
pass
|
||||
|
||||
async def stop(self):
|
||||
self.logger.debug("Stopping server")
|
||||
await self.server.stop()
|
||||
|
||||
def start(self):
|
||||
tasks = self.server.start(self.loop)
|
||||
self.address = self.server.address
|
||||
return tasks
|
||||
def run_loop_forever(self):
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def signal_handler(self):
|
||||
self.logger.debug("Got exit signal")
|
||||
self.loop.create_task(self.stop())
|
||||
self.loop.stop()
|
||||
|
||||
def _serve_forever(self, tasks):
|
||||
def _serve_forever(self):
|
||||
try:
|
||||
self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGINT, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGQUIT, self.signal_handler)
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
|
||||
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
self.run_loop_forever()
|
||||
self.server.close()
|
||||
|
||||
self.logger.debug("Server shutting down")
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.logger.debug('Server shutting down')
|
||||
finally:
|
||||
self.server.cleanup()
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
def serve_forever(self):
|
||||
"""
|
||||
Serve requests in the current process
|
||||
"""
|
||||
self._create_loop()
|
||||
tasks = self.start()
|
||||
self._serve_forever(tasks)
|
||||
self.loop.close()
|
||||
|
||||
def _create_loop(self):
|
||||
# Create loop and override any loop that may have existed in
|
||||
# a parent process. It is possible that the usecases of
|
||||
# serve_forever might be constrained enough to allow using
|
||||
# get_event_loop here, but better safe than sorry for now.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.start()
|
||||
self._serve_forever()
|
||||
|
||||
def serve_as_process(self, *, prefunc=None, args=(), log_level=None):
|
||||
def serve_as_process(self, *, prefunc=None, args=()):
|
||||
"""
|
||||
Serve requests in a child process
|
||||
"""
|
||||
|
||||
def run(queue):
|
||||
# Create loop and override any loop that may have existed
|
||||
# in a parent process. Without doing this and instead
|
||||
@@ -351,22 +259,18 @@ class AsyncServer(object):
|
||||
# more general, though, as any potential use of asyncio in
|
||||
# Cooker could create a loop that needs to replaced in this
|
||||
# new process.
|
||||
self._create_loop()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
try:
|
||||
self.address = None
|
||||
tasks = self.start()
|
||||
self.start()
|
||||
finally:
|
||||
# Always put the server address to wake up the parent task
|
||||
queue.put(self.address)
|
||||
queue.close()
|
||||
|
||||
if prefunc is not None:
|
||||
prefunc(self, *args)
|
||||
|
||||
if log_level is not None:
|
||||
self.logger.setLevel(log_level)
|
||||
|
||||
self._serve_forever(tasks)
|
||||
self._serve_forever()
|
||||
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
|
||||
@@ -25,7 +25,6 @@ import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
import bb.progress
|
||||
from io import StringIO
|
||||
from bb import data, event, utils
|
||||
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
@@ -178,9 +177,7 @@ class StdoutNoopContextManager:
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if "name" in dir(sys.stdout):
|
||||
return sys.stdout.name
|
||||
return "<mem>"
|
||||
return sys.stdout.name
|
||||
|
||||
|
||||
def exec_func(func, d, dirs = None):
|
||||
@@ -299,21 +296,9 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
lineno = int(d.getVarFlag(func, "lineno", False))
|
||||
bb.methodpool.insert_method(func, text, fn, lineno - 1)
|
||||
|
||||
if verboseStdoutLogging:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
currout = sys.stdout
|
||||
currerr = sys.stderr
|
||||
sys.stderr = sys.stdout = execio = StringIO()
|
||||
comp = utils.better_compile(code, func, "exec_func_python() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_func_python() autogenerated")
|
||||
finally:
|
||||
if verboseStdoutLogging:
|
||||
execio.flush()
|
||||
logger.plain("%s" % execio.getvalue())
|
||||
sys.stdout = currout
|
||||
sys.stderr = currerr
|
||||
execio.close()
|
||||
# We want any stdout/stderr to be printed before any other log messages to make debugging
|
||||
# more accurate. In some cases we seem to lose stdout/stderr entirely in logging tests without this.
|
||||
sys.stdout.flush()
|
||||
@@ -456,11 +441,7 @@ exit $ret
|
||||
if fakerootcmd:
|
||||
cmd = [fakerootcmd, runfile]
|
||||
|
||||
# We only want to output to logger via LogTee if stdout is sys.__stdout__ (which will either
|
||||
# be real stdout or subprocess PIPE or similar). In other cases we are being run "recursively",
|
||||
# ie. inside another function, in which case stdout is already being captured so we don't
|
||||
# want to Tee here as output would be printed twice, and out of order.
|
||||
if verboseStdoutLogging and sys.stdout == sys.__stdout__:
|
||||
if verboseStdoutLogging:
|
||||
logfile = LogTee(logger, StdoutNoopContextManager())
|
||||
else:
|
||||
logfile = StdoutNoopContextManager()
|
||||
@@ -591,6 +572,7 @@ def _task_data(fn, task, d):
|
||||
localdata.setVar('BB_FILENAME', fn)
|
||||
localdata.setVar('OVERRIDES', 'task-%s:%s' %
|
||||
(task[3:].replace('_', '-'), d.getVar('OVERRIDES', False)))
|
||||
localdata.finalize()
|
||||
bb.data.expandKeys(localdata)
|
||||
return localdata
|
||||
|
||||
@@ -791,7 +773,44 @@ def exec_task(fn, task, d, profile = False):
|
||||
event.fire(failedevent, d)
|
||||
return 1
|
||||
|
||||
def _get_cleanmask(taskname, mcfn):
|
||||
def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
"""
|
||||
Internal stamp helper function
|
||||
Makes sure the stamp directory exists
|
||||
Returns the stamp path+filename
|
||||
|
||||
In the bitbake core, d can be a CacheData and file_name will be set.
|
||||
When called in task context, d will be a data store, file_name will not be set
|
||||
"""
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene") and taskname != "do_setscene":
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
|
||||
if baseonly:
|
||||
return stamp
|
||||
if noextra:
|
||||
extrainfo = ""
|
||||
|
||||
if not stamp:
|
||||
return
|
||||
|
||||
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
stampdir = os.path.dirname(stamp)
|
||||
if cached_mtime_noerror(stampdir) == 0:
|
||||
bb.utils.mkdirhier(stampdir)
|
||||
|
||||
return stamp
|
||||
|
||||
def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
"""
|
||||
Internal stamp helper function to generate stamp cleaning mask
|
||||
Returns the stamp path+filename
|
||||
@@ -799,14 +818,27 @@ def _get_cleanmask(taskname, mcfn):
|
||||
In the bitbake core, d can be a CacheData and file_name will be set.
|
||||
When called in task context, d will be a data store, file_name will not be set
|
||||
"""
|
||||
cleanmask = bb.parse.siggen.stampcleanmask_mcfn(taskname, mcfn)
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
if cleanmask:
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
return []
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene") and taskname != "do_setscene":
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
def clean_stamp_mcfn(task, mcfn):
|
||||
cleanmask = _get_cleanmask(task, mcfn)
|
||||
if file_name:
|
||||
stamp = d.stampclean[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMPCLEAN')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
|
||||
if not stamp:
|
||||
return []
|
||||
|
||||
cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
|
||||
def clean_stamp(task, d, file_name = None):
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
# Preserve sigdata files in the stamps directory
|
||||
@@ -816,46 +848,33 @@ def clean_stamp_mcfn(task, mcfn):
|
||||
if name.endswith('.taint'):
|
||||
continue
|
||||
os.unlink(name)
|
||||
return
|
||||
|
||||
def clean_stamp(task, d):
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
clean_stamp_mcfn(task, mcfn)
|
||||
|
||||
def make_stamp_mcfn(task, mcfn):
|
||||
|
||||
basestamp = bb.parse.siggen.stampfile_mcfn(task, mcfn)
|
||||
|
||||
stampdir = os.path.dirname(basestamp)
|
||||
if cached_mtime_noerror(stampdir) == 0:
|
||||
bb.utils.mkdirhier(stampdir)
|
||||
|
||||
clean_stamp_mcfn(task, mcfn)
|
||||
|
||||
# Remove the file and recreate to force timestamp
|
||||
# change on broken NFS filesystems
|
||||
if basestamp:
|
||||
bb.utils.remove(basestamp)
|
||||
open(basestamp, "w").close()
|
||||
|
||||
def make_stamp(task, d):
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
clean_stamp(task, d, file_name)
|
||||
|
||||
make_stamp_mcfn(task, mcfn)
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
# Remove the file and recreate to force timestamp
|
||||
# change on broken NFS filesystems
|
||||
if stamp:
|
||||
bb.utils.remove(stamp)
|
||||
open(stamp, "w").close()
|
||||
|
||||
# If we're in task context, write out a signature file for each task
|
||||
# as it completes
|
||||
if not task.endswith("_setscene"):
|
||||
stampbase = bb.parse.siggen.stampfile_base(mcfn)
|
||||
bb.parse.siggen.dump_sigtask(mcfn, task, stampbase, True)
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
stampbase = stamp_internal(task, d, None, True)
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
|
||||
def find_stale_stamps(task, mcfn):
|
||||
current = bb.parse.siggen.stampfile_mcfn(task, mcfn)
|
||||
current2 = bb.parse.siggen.stampfile_mcfn(task + "_setscene", mcfn)
|
||||
cleanmask = _get_cleanmask(task, mcfn)
|
||||
def find_stale_stamps(task, d, file_name=None):
|
||||
current = stamp_internal(task, d, file_name)
|
||||
current2 = stamp_internal(task + "_setscene", d, file_name)
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
found = []
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
@@ -869,14 +888,38 @@ def find_stale_stamps(task, mcfn):
|
||||
found.append(name)
|
||||
return found
|
||||
|
||||
def write_taint(task, d):
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Removes a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
bb.utils.remove(stamp)
|
||||
|
||||
def write_taint(task, d, file_name = None):
|
||||
"""
|
||||
Creates a "taint" file which will force the specified task and its
|
||||
dependents to be re-run the next time by influencing the value of its
|
||||
taskhash.
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.invalidate_task(task, mcfn)
|
||||
import uuid
|
||||
if file_name:
|
||||
taintfn = d.stamp[file_name] + '.' + task + '.taint'
|
||||
else:
|
||||
taintfn = d.getVar('STAMP') + '.' + task + '.taint'
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
with open(taintfn, 'w') as taintf:
|
||||
taintf.write(str(uuid.uuid4()))
|
||||
|
||||
def stampfile(taskname, d, file_name = None, noextra=False):
|
||||
"""
|
||||
Return the stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
return stamp_internal(taskname, d, file_name, noextra=noextra)
|
||||
|
||||
def add_tasks(tasklist, d):
|
||||
task_deps = d.getVar('_task_deps', False)
|
||||
|
||||
@@ -28,7 +28,7 @@ import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "155"
|
||||
__cache_version__ = "154"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -105,7 +105,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.tasks = metadata.getVar('__BBTASKS', False)
|
||||
|
||||
self.basetaskhashes = metadata.getVar('__siggen_basehashes', False) or {}
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
|
||||
|
||||
self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
|
||||
@@ -216,7 +216,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not bb.utils.to_boolean(self.not_world):
|
||||
if not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
#else:
|
||||
# logger.debug2("EXCLUDE FROM WORLD: %s", fn)
|
||||
@@ -238,113 +238,15 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootlogs[fn] = self.fakerootlogs
|
||||
cachedata.extradepsfunc[fn] = self.extradepsfunc
|
||||
|
||||
|
||||
class SiggenRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
classname = "SiggenRecipeInfo"
|
||||
cachefile = "bb_cache_" + classname +".dat"
|
||||
# we don't want to show this information in graph files so don't set cachefields
|
||||
#cachefields = []
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
self.siggen_gendeps = metadata.getVar("__siggen_gendeps", False)
|
||||
self.siggen_varvals = metadata.getVar("__siggen_varvals", False)
|
||||
self.siggen_taskdeps = metadata.getVar("__siggen_taskdeps", False)
|
||||
|
||||
@classmethod
|
||||
def init_cacheData(cls, cachedata):
|
||||
cachedata.siggen_taskdeps = {}
|
||||
cachedata.siggen_gendeps = {}
|
||||
cachedata.siggen_varvals = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
cachedata.siggen_gendeps[fn] = self.siggen_gendeps
|
||||
cachedata.siggen_varvals[fn] = self.siggen_varvals
|
||||
cachedata.siggen_taskdeps[fn] = self.siggen_taskdeps
|
||||
|
||||
# The siggen variable data is large and impacts:
|
||||
# - bitbake's overall memory usage
|
||||
# - the amount of data sent over IPC between parsing processes and the server
|
||||
# - the size of the cache files on disk
|
||||
# - the size of "sigdata" hash information files on disk
|
||||
# The data consists of strings (some large) or frozenset lists of variables
|
||||
# As such, we a) deplicate the data here and b) pass references to the object at second
|
||||
# access (e.g. over IPC or saving into pickle).
|
||||
|
||||
store = {}
|
||||
save_map = {}
|
||||
save_count = 1
|
||||
restore_map = {}
|
||||
restore_count = {}
|
||||
|
||||
@classmethod
|
||||
def reset(cls):
|
||||
# Needs to be called before starting new streamed data in a given process
|
||||
# (e.g. writing out the cache again)
|
||||
cls.save_map = {}
|
||||
cls.save_count = 1
|
||||
cls.restore_map = {}
|
||||
|
||||
@classmethod
|
||||
def _save(cls, deps):
|
||||
ret = []
|
||||
if not deps:
|
||||
return deps
|
||||
for dep in deps:
|
||||
fs = deps[dep]
|
||||
if fs is None:
|
||||
ret.append((dep, None, None))
|
||||
elif fs in cls.save_map:
|
||||
ret.append((dep, None, cls.save_map[fs]))
|
||||
else:
|
||||
cls.save_map[fs] = cls.save_count
|
||||
ret.append((dep, fs, cls.save_count))
|
||||
cls.save_count = cls.save_count + 1
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def _restore(cls, deps, pid):
|
||||
ret = {}
|
||||
if not deps:
|
||||
return deps
|
||||
if pid not in cls.restore_map:
|
||||
cls.restore_map[pid] = {}
|
||||
map = cls.restore_map[pid]
|
||||
for dep, fs, mapnum in deps:
|
||||
if fs is None and mapnum is None:
|
||||
ret[dep] = None
|
||||
elif fs is None:
|
||||
ret[dep] = map[mapnum]
|
||||
else:
|
||||
try:
|
||||
fs = cls.store[fs]
|
||||
except KeyError:
|
||||
cls.store[fs] = fs
|
||||
map[mapnum] = fs
|
||||
ret[dep] = fs
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
ret = {}
|
||||
for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]:
|
||||
ret[key] = self._save(self.__dict__[key])
|
||||
ret['pid'] = os.getpid()
|
||||
return ret
|
||||
|
||||
def __setstate__(self, state):
|
||||
pid = state['pid']
|
||||
for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]:
|
||||
setattr(self, key, self._restore(state[key], pid))
|
||||
|
||||
|
||||
def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
(_, mc, virtualfn) = virtualfn.split(':', 2)
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
@@ -367,7 +269,7 @@ def realfn2virtual(realfn, cls, mc):
|
||||
|
||||
def variant2virtual(realfn, variant):
|
||||
"""
|
||||
Convert a real filename + a variant to a virtual filename
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
@@ -378,18 +280,75 @@ def variant2virtual(realfn, variant):
|
||||
return "mc:" + elems[1] + ":" + realfn
|
||||
return "virtual:" + variant + ":" + realfn
|
||||
|
||||
#
|
||||
# Cooker calls cacheValid on its recipe list, then either calls loadCached
|
||||
# from it's main thread or parse from separate processes to generate an up to
|
||||
# date cache
|
||||
#
|
||||
class Cache(object):
|
||||
def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
"""
|
||||
Parse a recipe
|
||||
"""
|
||||
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
return bb_data
|
||||
|
||||
|
||||
class NoCache(object):
|
||||
|
||||
def __init__(self, databuilder):
|
||||
self.databuilder = databuilder
|
||||
self.data = databuilder.data
|
||||
|
||||
def loadDataFull(self, virtualfn, appends):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
|
||||
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
|
||||
def load_bbfile(self, bbfile, appends, virtonly = False, mc=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
|
||||
if virtonly:
|
||||
(bbfile, virtual, mc) = virtualfn2realfn(bbfile)
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
datastores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
return datastores
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
return parse_recipe(bb_data, bbfile, appends, mc)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = parse_recipe(bb_data, bbfile, appends)
|
||||
|
||||
for mc in self.databuilder.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
newstores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
class Cache(NoCache):
|
||||
"""
|
||||
BitBake Cache implementation
|
||||
"""
|
||||
def __init__(self, databuilder, mc, data_hash, caches_array):
|
||||
self.databuilder = databuilder
|
||||
self.data = databuilder.data
|
||||
super().__init__(databuilder)
|
||||
data = databuilder.data
|
||||
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used later for deciding whether we
|
||||
@@ -397,7 +356,7 @@ class Cache(object):
|
||||
self.mc = mc
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else "default"), logger)
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = self.data.getVar("CACHE")
|
||||
self.cachedir = data.getVar("CACHE")
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
self.depends_cache = {}
|
||||
@@ -407,12 +366,20 @@ class Cache(object):
|
||||
self.filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+')
|
||||
|
||||
if self.cachedir in [None, '']:
|
||||
bb.fatal("Please ensure CACHE is set to the cache directory for BitBake to use")
|
||||
self.has_cache = False
|
||||
self.logger.info("Not using a cache. "
|
||||
"Set CACHE = <directory> to enable.")
|
||||
return
|
||||
|
||||
self.has_cache = True
|
||||
|
||||
def getCacheFile(self, cachefile):
|
||||
return getCacheFile(self.cachedir, cachefile, self.mc, self.data_hash)
|
||||
|
||||
def prepare_cache(self, progress):
|
||||
if not self.has_cache:
|
||||
return 0
|
||||
|
||||
loaded = 0
|
||||
|
||||
self.cachefile = self.getCacheFile("bb_cache.dat")
|
||||
@@ -451,6 +418,9 @@ class Cache(object):
|
||||
return loaded
|
||||
|
||||
def cachesize(self):
|
||||
if not self.has_cache:
|
||||
return 0
|
||||
|
||||
cachesize = 0
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
@@ -512,11 +482,11 @@ class Cache(object):
|
||||
|
||||
return len(self.depends_cache)
|
||||
|
||||
def parse(self, filename, appends, layername):
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc, layername=layername)
|
||||
datastores = self.load_bbfile(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
variants = []
|
||||
# Process the "real" fn last so we can store variants list
|
||||
@@ -538,19 +508,43 @@ class Cache(object):
|
||||
|
||||
return infos
|
||||
|
||||
def loadCached(self, filename, appends):
|
||||
def load(self, filename, appends):
|
||||
"""Obtain the recipe information for the specified filename,
|
||||
using cached values.
|
||||
"""
|
||||
using cached values if available, otherwise parsing.
|
||||
|
||||
infos = []
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
Note that if it does parse to obtain the info, it will not
|
||||
automatically add the information to the cache or to your
|
||||
CacheData. Use the add or add_info method to do so after
|
||||
running this, or use loadData instead."""
|
||||
cached = self.cacheValid(filename, appends)
|
||||
if cached:
|
||||
infos = []
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
else:
|
||||
return self.parse(filename, appends, configdata, self.caches_array)
|
||||
|
||||
return infos
|
||||
return cached, infos
|
||||
|
||||
def loadData(self, fn, appends, cacheData):
|
||||
"""Load the recipe info for the specified filename,
|
||||
parsing and adding to the cache if necessary, and adding
|
||||
the recipe information to the supplied CacheData instance."""
|
||||
skipped, virtuals = 0, 0
|
||||
|
||||
cached, infos = self.load(fn, appends)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
self.logger.debug("Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info_array, cacheData, not cached)
|
||||
virtuals += 1
|
||||
|
||||
return cached, skipped, virtuals
|
||||
|
||||
def cacheValid(self, fn, appends):
|
||||
"""
|
||||
@@ -559,6 +553,10 @@ class Cache(object):
|
||||
"""
|
||||
if fn not in self.checked:
|
||||
self.cacheValidUpdate(fn, appends)
|
||||
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
return False
|
||||
if fn in self.clean:
|
||||
return True
|
||||
return False
|
||||
@@ -568,6 +566,10 @@ class Cache(object):
|
||||
Is the cache valid for fn?
|
||||
Make thorough (slower) checks including timestamps.
|
||||
"""
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
return False
|
||||
|
||||
self.checked.add(fn)
|
||||
|
||||
# File isn't in depends_cache
|
||||
@@ -674,6 +676,10 @@ class Cache(object):
|
||||
Save the cache
|
||||
Called from the parser when complete (or exiting)
|
||||
"""
|
||||
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if self.cacheclean:
|
||||
self.logger.debug2("Cache is clean, not saving.")
|
||||
return
|
||||
@@ -694,7 +700,6 @@ class Cache(object):
|
||||
p.dump(info)
|
||||
|
||||
del self.depends_cache
|
||||
SiggenRecipeInfo.reset()
|
||||
|
||||
@staticmethod
|
||||
def mtime(cachefile):
|
||||
@@ -717,11 +722,26 @@ class Cache(object):
|
||||
if watcher:
|
||||
watcher(info_array[0].file_depends)
|
||||
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
|
||||
if parsed:
|
||||
self.cacheclean = False
|
||||
self.depends_cache[filename] = info_array
|
||||
|
||||
def add(self, file_name, data, cacheData, parsed=None):
|
||||
"""
|
||||
Save data we need into the cache
|
||||
"""
|
||||
|
||||
realfn = virtualfn2realfn(file_name)[0]
|
||||
|
||||
info_array = []
|
||||
for cache_class in self.caches_array:
|
||||
info_array.append(cache_class(realfn, data))
|
||||
self.add_info(file_name, info_array, cacheData, parsed)
|
||||
|
||||
class MulticonfigCache(Mapping):
|
||||
def __init__(self, databuilder, data_hash, caches_array):
|
||||
def progress(p):
|
||||
@@ -758,7 +778,6 @@ class MulticonfigCache(Mapping):
|
||||
loaded = 0
|
||||
|
||||
for c in self.__caches.values():
|
||||
SiggenRecipeInfo.reset()
|
||||
loaded += c.prepare_cache(progress)
|
||||
previous_progress = current_progress
|
||||
|
||||
@@ -836,10 +855,11 @@ class MultiProcessCache(object):
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
|
||||
def init_cache(self, cachedir, cache_file_name=None):
|
||||
if not cachedir:
|
||||
def init_cache(self, d, cache_file_name=None):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
if cachedir in [None, '']:
|
||||
return
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
@@ -870,10 +890,6 @@ class MultiProcessCache(object):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
have_data = any(self.cachedata_extras)
|
||||
if not have_data:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
|
||||
|
||||
i = os.getpid()
|
||||
@@ -908,8 +924,6 @@ class MultiProcessCache(object):
|
||||
|
||||
data = self.cachedata
|
||||
|
||||
have_data = False
|
||||
|
||||
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
|
||||
f = os.path.join(os.path.dirname(self.cachefile), f)
|
||||
try:
|
||||
@@ -924,14 +938,12 @@ class MultiProcessCache(object):
|
||||
os.unlink(f)
|
||||
continue
|
||||
|
||||
have_data = True
|
||||
self.merge_data(extradata, data)
|
||||
os.unlink(f)
|
||||
|
||||
if have_data:
|
||||
with open(self.cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, -1)
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
with open(self.cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, -1)
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ import ast
|
||||
import sys
|
||||
import codegen
|
||||
import logging
|
||||
import inspect
|
||||
import bb.pysh as pysh
|
||||
import bb.utils, bb.data
|
||||
import hashlib
|
||||
@@ -59,45 +58,10 @@ def check_indent(codestr):
|
||||
|
||||
return codestr
|
||||
|
||||
modulecode_deps = {}
|
||||
|
||||
def add_module_functions(fn, functions, namespace):
|
||||
import os
|
||||
fstat = os.stat(fn)
|
||||
fixedhash = fn + ":" + str(fstat.st_size) + ":" + str(fstat.st_mtime)
|
||||
for f in functions:
|
||||
name = "%s.%s" % (namespace, f)
|
||||
parser = PythonParser(name, logger)
|
||||
try:
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
#bb.warn("Not cached %s" % f)
|
||||
execs = parser.execs.copy()
|
||||
# Expand internal module exec references
|
||||
for e in parser.execs:
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
|
||||
|
||||
class SetCache(object):
|
||||
def __init__(self):
|
||||
self.setcache = {}
|
||||
@@ -117,22 +81,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -161,7 +124,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 12
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -175,8 +138,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -191,12 +154,12 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.shellcachelines[h] = cacheline
|
||||
return cacheline
|
||||
|
||||
def init_cache(self, cachedir):
|
||||
def init_cache(self, d):
|
||||
# Check if we already have the caches
|
||||
if self.pythoncache:
|
||||
return
|
||||
|
||||
MultiProcessCache.init_cache(self, cachedir)
|
||||
MultiProcessCache.init_cache(self, d)
|
||||
|
||||
# cachedata gets re-assigned in the parent
|
||||
self.pythoncache = self.cachedata[0]
|
||||
@@ -208,8 +171,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
|
||||
codeparsercache = CodeParserCache()
|
||||
|
||||
def parser_cache_init(cachedir):
|
||||
codeparsercache.init_cache(cachedir)
|
||||
def parser_cache_init(d):
|
||||
codeparsercache.init_cache(d)
|
||||
|
||||
def parser_cache_save():
|
||||
codeparsercache.save_extras()
|
||||
@@ -262,19 +225,19 @@ class PythonParser():
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].value)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant):
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].value.split())
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Constant):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].value))
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
@@ -282,8 +245,8 @@ class PythonParser():
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
value = node.args[0].value
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
@@ -293,8 +256,8 @@ class PythonParser():
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
self.var_execs.add(node.args[0].value)
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
|
||||
@@ -326,17 +289,11 @@ class PythonParser():
|
||||
self.unhandled_message = "in call of %s, argument '%s' is not a string literal"
|
||||
self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message)
|
||||
|
||||
# For the python module code it is expensive to have the function text so it is
|
||||
# uses a different fixedhash to cache against. We can take the hit on obtaining the
|
||||
# text if it isn't in the cache.
|
||||
def parse_python(self, node, lineno=0, filename="<string>", fixedhash=None):
|
||||
if not fixedhash and (not node or not node.strip()):
|
||||
def parse_python(self, node, lineno=0, filename="<string>"):
|
||||
if not node or not node.strip():
|
||||
return
|
||||
|
||||
if fixedhash:
|
||||
h = fixedhash
|
||||
else:
|
||||
h = bbhash(str(node))
|
||||
h = bbhash(str(node))
|
||||
|
||||
if h in codeparsercache.pythoncache:
|
||||
self.references = set(codeparsercache.pythoncache[h].refs)
|
||||
@@ -344,7 +301,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -353,12 +309,8 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
raise KeyError
|
||||
|
||||
# Need to parse so take the hit on the real log buffer
|
||||
self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, self._log)
|
||||
|
||||
@@ -372,11 +324,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
|
||||
@@ -51,21 +51,20 @@ class Command:
|
||||
"""
|
||||
A queue of asynchronous commands for bitbake
|
||||
"""
|
||||
def __init__(self, cooker, process_server):
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
self.remotedatastores = None
|
||||
|
||||
self.process_server = process_server
|
||||
# Access with locking using process_server.{get/set/clear}_async_cmd()
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
|
||||
def runCommand(self, commandline, process_server, ro_only=False):
|
||||
def runCommand(self, commandline, ro_only = False):
|
||||
command = commandline.pop(0)
|
||||
|
||||
# Ensure cooker is ready for commands
|
||||
if command not in ["updateConfig", "setFeatures", "ping"]:
|
||||
if command != "updateConfig" and command != "setFeatures":
|
||||
try:
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
@@ -85,6 +84,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
@@ -99,23 +99,24 @@ class Command:
|
||||
return None, traceback.format_exc()
|
||||
else:
|
||||
return result, None
|
||||
if self.currentAsyncCommand is not None:
|
||||
return None, "Busy (%s in progress)" % self.currentAsyncCommand[0]
|
||||
if command not in CommandsAsync.__dict__:
|
||||
return None, "No such command"
|
||||
if not process_server.set_async_cmd((command, commandline)):
|
||||
return None, "Busy (%s in progress)" % self.process_server.get_async_cmd()[0]
|
||||
self.cooker.idleCallBackRegister(self.runAsyncCommand, process_server)
|
||||
self.currentAsyncCommand = (command, commandline)
|
||||
self.cooker.idleCallBackRegister(self.cooker.runCommands, self.cooker)
|
||||
return True, None
|
||||
|
||||
def runAsyncCommand(self, _, process_server, halt):
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
self.cooker.updateCache()
|
||||
return bb.server.process.idleFinish("Cooker in error state")
|
||||
cmd = process_server.get_async_cmd()
|
||||
if cmd is not None:
|
||||
(command, options) = cmd
|
||||
return False
|
||||
if self.currentAsyncCommand is not None:
|
||||
(command, options) = self.currentAsyncCommand
|
||||
commandmethod = getattr(CommandsAsync, command)
|
||||
needcache = getattr( commandmethod, "needcache" )
|
||||
if needcache and self.cooker.state != bb.cooker.state.running:
|
||||
@@ -125,21 +126,24 @@ class Command:
|
||||
commandmethod(self.cmds_async, self, options)
|
||||
return False
|
||||
else:
|
||||
return bb.server.process.idleFinish("Nothing to do, no async command?")
|
||||
return False
|
||||
except KeyboardInterrupt as exc:
|
||||
return bb.server.process.idleFinish("Interrupted")
|
||||
self.finishAsyncCommand("Interrupted")
|
||||
return False
|
||||
except SystemExit as exc:
|
||||
arg = exc.args[0]
|
||||
if isinstance(arg, str):
|
||||
return bb.server.process.idleFinish(arg)
|
||||
self.finishAsyncCommand(arg)
|
||||
else:
|
||||
return bb.server.process.idleFinish("Exited with %s" % arg)
|
||||
self.finishAsyncCommand("Exited with %s" % arg)
|
||||
return False
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
if isinstance(exc, bb.BBHandledException):
|
||||
return bb.server.process.idleFinish("")
|
||||
self.finishAsyncCommand("")
|
||||
else:
|
||||
return bb.server.process.idleFinish(traceback.format_exc())
|
||||
self.finishAsyncCommand(traceback.format_exc())
|
||||
return False
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg or msg == "":
|
||||
@@ -148,8 +152,8 @@ class Command:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
self.currentAsyncCommand = None
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
|
||||
def reset(self):
|
||||
if self.remotedatastores:
|
||||
@@ -162,14 +166,6 @@ class CommandsSync:
|
||||
These must not influence any running synchronous command.
|
||||
"""
|
||||
|
||||
def ping(self, command, params):
|
||||
"""
|
||||
Allow a UI to check the server is still alive
|
||||
"""
|
||||
return "Still alive!"
|
||||
ping.needconfig = False
|
||||
ping.readonly = True
|
||||
|
||||
def stateShutdown(self, command, params):
|
||||
"""
|
||||
Trigger cooker 'shutdown' mode
|
||||
@@ -307,11 +303,6 @@ class CommandsSync:
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def revalidateCaches(self, command, params):
|
||||
"""Called by UI clients when metadata may have changed"""
|
||||
command.cooker.revalidateCaches()
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
@@ -550,8 +541,8 @@ class CommandsSync:
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
virtualfn = params[0]
|
||||
(fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
fn = params[0]
|
||||
mc = bb.runqueue.mc_from_tid(fn)
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
@@ -566,7 +557,6 @@ class CommandsSync:
|
||||
appendfiles = command.cooker.collections[mc].get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
layername = command.cooker.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
@@ -574,10 +564,11 @@ class CommandsSync:
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc, layername)[cls]
|
||||
envdata = bb.cache.parse_recipe(config_data, fn, appendfiles, mc)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
envdata = command.cooker.databuilder.parseRecipe(virtualfn, appendfiles, layername)
|
||||
parser = bb.cache.NoCache(command.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
@@ -750,7 +741,7 @@ class CommandsAsync:
|
||||
"""
|
||||
event = params[0]
|
||||
bb.event.fire(eval(event), command.cooker.data)
|
||||
process_server.clear_async_cmd()
|
||||
command.currentAsyncCommand = None
|
||||
triggerEvent.needcache = False
|
||||
|
||||
def resetCooker(self, command, params):
|
||||
@@ -777,14 +768,7 @@ class CommandsAsync:
|
||||
(mc, pn) = bb.runqueue.split_mc(params[0])
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
bb.siggen.check_siggen_version(bb.siggen)
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc])
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc])
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
def getTaskSignatures(self, command, params):
|
||||
res = command.cooker.getTaskSignatures(params[0], params[1])
|
||||
bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
getTaskSignatures.needcache = True
|
||||
|
||||
@@ -22,6 +22,7 @@ from bb import utils, data, parse, event, cache, providers, taskdata, runqueue,
|
||||
import queue
|
||||
import signal
|
||||
import prserv.serv
|
||||
import pyinotify
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
@@ -79,7 +80,7 @@ class SkippedPackage:
|
||||
|
||||
|
||||
class CookerFeatures(object):
|
||||
_feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS, RECIPE_SIGGEN_INFO] = list(range(4))
|
||||
_feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = list(range(3))
|
||||
|
||||
def __init__(self):
|
||||
self._features=set()
|
||||
@@ -102,15 +103,12 @@ class CookerFeatures(object):
|
||||
|
||||
class EventWriter:
|
||||
def __init__(self, cooker, eventfile):
|
||||
self.file_inited = None
|
||||
self.cooker = cooker
|
||||
self.eventfile = eventfile
|
||||
self.event_queue = []
|
||||
|
||||
def write_variables(self):
|
||||
with open(self.eventfile, "a") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
def send(self, event):
|
||||
def write_event(self, event):
|
||||
with open(self.eventfile, "a") as f:
|
||||
try:
|
||||
str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8')
|
||||
@@ -120,6 +118,28 @@ class EventWriter:
|
||||
import traceback
|
||||
print(err, traceback.format_exc())
|
||||
|
||||
def send(self, event):
|
||||
if self.file_inited:
|
||||
# we have the file, just write the event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# init on bb.event.BuildStarted
|
||||
name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"):
|
||||
with open(self.eventfile, "w") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
self.file_inited = True
|
||||
|
||||
# write pending events
|
||||
for evt in self.event_queue:
|
||||
self.write_event(evt)
|
||||
|
||||
# also write the current event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# queue all events until the file is inited
|
||||
self.event_queue.append(event)
|
||||
|
||||
#============================================================================#
|
||||
# BBCooker
|
||||
@@ -129,10 +149,8 @@ class BBCooker:
|
||||
Manages one bitbake build run
|
||||
"""
|
||||
|
||||
def __init__(self, featureSet=None, server=None):
|
||||
def __init__(self, featureSet=None, idleCallBackRegister=None):
|
||||
self.recipecaches = None
|
||||
self.baseconfig_valid = False
|
||||
self.parsecache_valid = False
|
||||
self.eventlog = None
|
||||
self.skiplist = {}
|
||||
self.featureset = CookerFeatures()
|
||||
@@ -145,17 +163,20 @@ class BBCooker:
|
||||
|
||||
self.configuration = bb.cookerdata.CookerConfiguration()
|
||||
|
||||
self.process_server = server
|
||||
self.idleCallBackRegister = None
|
||||
self.waitIdle = None
|
||||
if server:
|
||||
self.idleCallBackRegister = server.register_idle_function
|
||||
self.waitIdle = server.wait_for_idle
|
||||
self.idleCallBackRegister = idleCallBackRegister
|
||||
|
||||
bb.debug(1, "BBCooker starting %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatched = {}
|
||||
self.parsewatched = {}
|
||||
self.configwatcher = None
|
||||
self.confignotifier = None
|
||||
|
||||
self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
|
||||
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
|
||||
|
||||
self.watcher = None
|
||||
self.notifier = None
|
||||
|
||||
# If being called by something like tinfoil, we need to clean cached data
|
||||
# which may now be invalid
|
||||
@@ -166,6 +187,14 @@ class BBCooker:
|
||||
self.hashserv = None
|
||||
self.hashservaddr = None
|
||||
|
||||
self.inotify_modified_files = []
|
||||
|
||||
def _process_inotify_updates(server, cooker, halt):
|
||||
cooker.process_inotify_updates()
|
||||
return 1.0
|
||||
|
||||
self.idleCallBackRegister(_process_inotify_updates, self)
|
||||
|
||||
# TOSTOP must not be set or our children will hang when they output
|
||||
try:
|
||||
fd = sys.stdout.fileno()
|
||||
@@ -179,7 +208,7 @@ class BBCooker:
|
||||
except UnsupportedOperation:
|
||||
pass
|
||||
|
||||
self.command = bb.command.Command(self, self.process_server)
|
||||
self.command = bb.command.Command(self)
|
||||
self.state = state.initial
|
||||
|
||||
self.parser = None
|
||||
@@ -189,37 +218,116 @@ class BBCooker:
|
||||
signal.signal(signal.SIGHUP, self.sigterm_exception)
|
||||
|
||||
bb.debug(1, "BBCooker startup complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
def init_configdata(self):
|
||||
if not hasattr(self, "data"):
|
||||
self.initConfigurationData()
|
||||
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.handlePRServ()
|
||||
|
||||
def _baseconfig_set(self, value):
|
||||
if value and not self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config valid")
|
||||
elif not value and self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config invalidated")
|
||||
self.baseconfig_valid = value
|
||||
def setupConfigWatcher(self):
|
||||
if self.configwatcher:
|
||||
self.configwatcher.close()
|
||||
self.confignotifier = None
|
||||
self.configwatcher = None
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
self.configwatcher.bbseen = set()
|
||||
self.configwatcher.bbwatchedfiles = set()
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
|
||||
def _parsecache_set(self, value):
|
||||
if value and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache valid")
|
||||
elif not value and self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache invalidated")
|
||||
self.parsecache_valid = value
|
||||
def setupParserWatcher(self):
|
||||
if self.watcher:
|
||||
self.watcher.close()
|
||||
self.notifier = None
|
||||
self.watcher = None
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
self.watcher.bbseen = set()
|
||||
self.watcher.bbwatchedfiles = set()
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
def add_filewatch(self, deps, configwatcher=False):
|
||||
if configwatcher:
|
||||
watcher = self.configwatched
|
||||
else:
|
||||
watcher = self.parsewatched
|
||||
def process_inotify_updates(self):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
# read notified events and enqeue them
|
||||
n.read_events()
|
||||
n.process_events()
|
||||
|
||||
def config_notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
self.baseconfig_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if not event.pathname in self.configwatcher.bbwatchedfiles:
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.configwatcher.bbseen:
|
||||
self.configwatcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.baseconfig_valid = False
|
||||
|
||||
def notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if event.pathname.endswith("bitbake-cookerdaemon.log") \
|
||||
or event.pathname.endswith("bitbake.lock"):
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.watcher.bbseen:
|
||||
self.watcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def add_filewatch(self, deps, watcher=None, dirs=False):
|
||||
if not watcher:
|
||||
watcher = self.watcher
|
||||
for i in deps:
|
||||
f = i[0]
|
||||
mtime = i[1]
|
||||
watcher[f] = mtime
|
||||
watcher.bbwatchedfiles.add(i[0])
|
||||
if dirs:
|
||||
f = i[0]
|
||||
else:
|
||||
f = os.path.dirname(i[0])
|
||||
if f in watcher.bbseen:
|
||||
continue
|
||||
watcher.bbseen.add(f)
|
||||
watchtarget = None
|
||||
while True:
|
||||
# We try and add watches for files that don't exist but if they did, would influence
|
||||
# the parser. The parent directory of these files may not exist, in which case we need
|
||||
# to watch any parent that does exist for changes.
|
||||
try:
|
||||
watcher.add_watch(f, self.watchmask, quiet=False)
|
||||
if watchtarget:
|
||||
watcher.bbwatchedfiles.add(watchtarget)
|
||||
break
|
||||
except pyinotify.WatchManagerError as e:
|
||||
if 'ENOENT' in str(e):
|
||||
watchtarget = f
|
||||
f = os.path.dirname(f)
|
||||
if f in watcher.bbseen:
|
||||
break
|
||||
watcher.bbseen.add(f)
|
||||
continue
|
||||
if 'ENOSPC' in str(e):
|
||||
providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
|
||||
providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
|
||||
providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
|
||||
providerlog.error("Root privilege is required to modify max_user_watches.")
|
||||
raise
|
||||
|
||||
def sigterm_exception(self, signum, stackframe):
|
||||
if signum == signal.SIGTERM:
|
||||
@@ -227,7 +335,6 @@ class BBCooker:
|
||||
elif signum == signal.SIGHUP:
|
||||
bb.warn("Cooker received SIGHUP, shutting down...")
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
|
||||
def setFeatures(self, features):
|
||||
# we only accept a new feature set if we're in state initial, so we can reset without problems
|
||||
@@ -250,7 +357,7 @@ class BBCooker:
|
||||
if mod not in self.orig_sysmodules:
|
||||
del sys.modules[mod]
|
||||
|
||||
self.configwatched = {}
|
||||
self.setupConfigWatcher()
|
||||
|
||||
# Need to preserve BB_CONSOLELOG over resets
|
||||
consolelog = None
|
||||
@@ -260,12 +367,12 @@ class BBCooker:
|
||||
if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
|
||||
self.enableDataTracking()
|
||||
|
||||
caches_name_array = ['bb.cache:CoreRecipeInfo']
|
||||
all_extra_cache_names = []
|
||||
# We hardcode all known cache types in a single place, here.
|
||||
if CookerFeatures.HOB_EXTRA_CACHES in self.featureset:
|
||||
caches_name_array.append("bb.cache_extra:HobRecipeInfo")
|
||||
if CookerFeatures.RECIPE_SIGGEN_INFO in self.featureset:
|
||||
caches_name_array.append("bb.cache:SiggenRecipeInfo")
|
||||
all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo")
|
||||
|
||||
caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names
|
||||
|
||||
# At least CoreRecipeInfo will be loaded, so caches_array will never be empty!
|
||||
# This is the entry point, no further check needed!
|
||||
@@ -284,10 +391,6 @@ class BBCooker:
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
self.extraconfigdata = {}
|
||||
|
||||
eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
|
||||
if not self.configuration.writeeventlog and eventlog:
|
||||
self.setupEventLog(eventlog)
|
||||
|
||||
if consolelog:
|
||||
self.data.setVar("BB_CONSOLELOG", consolelog)
|
||||
|
||||
@@ -297,10 +400,12 @@ class BBCooker:
|
||||
self.disableDataTracking()
|
||||
|
||||
for mc in self.databuilder.mcdata.values():
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True)
|
||||
mc.renameVar("__depends", "__base_depends")
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
|
||||
mc.setVar("__bbclasstype", "recipe")
|
||||
|
||||
self._baseconfig_set(True)
|
||||
self._parsecache_set(False)
|
||||
self.baseconfig_valid = True
|
||||
self.parsecache_valid = False
|
||||
|
||||
def handlePRServ(self):
|
||||
# Setup a PR Server based on the new configuration
|
||||
@@ -315,13 +420,13 @@ class BBCooker:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
with hashserv.create_client(upstream) as client:
|
||||
client.ping()
|
||||
except (ConnectionError, ImportError) as e:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
upstream = None
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
@@ -330,9 +435,11 @@ class BBCooker:
|
||||
sync=False,
|
||||
upstream=upstream,
|
||||
)
|
||||
self.hashserv.serve_as_process(log_level=logging.WARNING)
|
||||
self.hashserv.serve_as_process()
|
||||
self.data.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.origdata.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.data.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
@@ -347,29 +454,6 @@ class BBCooker:
|
||||
if hasattr(self, "data"):
|
||||
self.data.disableTracking()
|
||||
|
||||
def revalidateCaches(self):
|
||||
bb.parse.clear_cache()
|
||||
|
||||
clean = True
|
||||
for f in self.configwatched:
|
||||
if not bb.parse.check_mtime(f, self.configwatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._baseconfig_set(False)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if clean:
|
||||
for f in self.parsewatched:
|
||||
if not bb.parse.check_mtime(f, self.parsewatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if not clean:
|
||||
bb.parse.BBHandler.cached_statements = {}
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
@@ -388,24 +472,8 @@ class BBCooker:
|
||||
self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
|
||||
|
||||
self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS"))
|
||||
self.collections = {}
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
|
||||
self._parsecache_set(False)
|
||||
|
||||
def setupEventLog(self, eventlog):
|
||||
if self.eventlog and self.eventlog[0] != eventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
self.eventlog = None
|
||||
if not self.eventlog or self.eventlog[0] != eventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
if not os.path.exists(os.path.dirname(eventlog)):
|
||||
bb.utils.mkdirhier(os.path.dirname(eventlog))
|
||||
writer = EventWriter(self, eventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def updateConfigOpts(self, options, environment, cmdline):
|
||||
self.ui_cmdline = cmdline
|
||||
@@ -426,7 +494,14 @@ class BBCooker:
|
||||
setattr(self.configuration, o, options[o])
|
||||
|
||||
if self.configuration.writeeventlog:
|
||||
self.setupEventLog(self.configuration.writeeventlog)
|
||||
if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
writer = EventWriter(self, self.configuration.writeeventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)))
|
||||
|
||||
bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel
|
||||
bb.msg.loggerDefaultDomains = self.configuration.debug_domains
|
||||
@@ -456,11 +531,19 @@ class BBCooker:
|
||||
# Now update all the variables not in the datastore to match
|
||||
self.configuration.env = environment
|
||||
|
||||
self.revalidateCaches()
|
||||
if not clean:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
|
||||
def runCommands(self, server, data, halt):
|
||||
"""
|
||||
Run any queued asynchronous command
|
||||
This is done by the idle handler so it runs in true context rather than
|
||||
tied to any UI.
|
||||
"""
|
||||
|
||||
return self.command.runAsyncCommand()
|
||||
|
||||
def showVersions(self):
|
||||
|
||||
(latest_versions, preferred_versions, required) = self.findProviders()
|
||||
@@ -534,14 +617,14 @@ class BBCooker:
|
||||
|
||||
if fn:
|
||||
try:
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
envdata = bb_caches[mc].loadDataFull(fn, self.collections[mc].get_file_appends(fn))
|
||||
except Exception as e:
|
||||
parselog.exception("Unable to read %s", fn)
|
||||
raise
|
||||
else:
|
||||
if not mc in self.databuilder.mcdata:
|
||||
bb.fatal('No multiconfig named "%s" found' % mc)
|
||||
bb.fatal('Not multiconfig named "%s" found' % mc)
|
||||
envdata = self.databuilder.mcdata[mc]
|
||||
data.expandKeys(envdata)
|
||||
parse.ast.runAnonFuncs(envdata)
|
||||
@@ -1273,8 +1356,8 @@ class BBCooker:
|
||||
if bf.startswith("/") or bf.startswith("../"):
|
||||
bf = os.path.abspath(bf)
|
||||
|
||||
collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
self.collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
try:
|
||||
os.stat(bf)
|
||||
bf = os.path.abspath(bf)
|
||||
@@ -1340,8 +1423,7 @@ class BBCooker:
|
||||
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn))
|
||||
infos = dict(infos)
|
||||
|
||||
fn = bb.cache.realfn2virtual(fn, cls, mc)
|
||||
@@ -1367,12 +1449,10 @@ class BBCooker:
|
||||
self.recipecaches[mc].rundeps[fn] = defaultdict(list)
|
||||
self.recipecaches[mc].runrecs[fn] = defaultdict(list)
|
||||
|
||||
bb.parse.siggen.setup_datacache(self.recipecaches)
|
||||
|
||||
# Invalidate task for target if force mode active
|
||||
if self.configuration.force:
|
||||
logger.verbose("Invalidate task %s, %s", task, fn)
|
||||
bb.parse.siggen.invalidate_task(task, fn)
|
||||
bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn)
|
||||
|
||||
# Setup taskdata structure
|
||||
taskdata = {}
|
||||
@@ -1386,9 +1466,6 @@ class BBCooker:
|
||||
buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME")
|
||||
if fireevents:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
# Execute the runqueue
|
||||
runlist = [[mc, item, task, fn]]
|
||||
@@ -1414,57 +1491,28 @@ class BBCooker:
|
||||
failures += len(exc.args)
|
||||
retval = False
|
||||
except SystemExit as exc:
|
||||
self.command.finishAsyncCommand(str(exc))
|
||||
if quietlog:
|
||||
bb.runqueue.logger.setLevel(rqloglevel)
|
||||
return bb.server.process.idleFinish(str(exc))
|
||||
return False
|
||||
|
||||
if not retval:
|
||||
if fireevents:
|
||||
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc])
|
||||
bb.event.disable_heartbeat()
|
||||
self.command.finishAsyncCommand(msg)
|
||||
# We trashed self.recipecaches above
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
self.configuration.limited_deps = False
|
||||
bb.parse.siggen.reset(self.data)
|
||||
if quietlog:
|
||||
bb.runqueue.logger.setLevel(rqloglevel)
|
||||
return bb.server.process.idleFinish(msg)
|
||||
return False
|
||||
if retval is True:
|
||||
return True
|
||||
return retval
|
||||
|
||||
self.idleCallBackRegister(buildFileIdle, rq)
|
||||
|
||||
def getTaskSignatures(self, target, tasks):
|
||||
sig = []
|
||||
getAllTaskSignatures = False
|
||||
|
||||
if not tasks:
|
||||
tasks = ["do_build"]
|
||||
getAllTaskSignatures = True
|
||||
|
||||
for task in tasks:
|
||||
taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
|
||||
for l in runlist:
|
||||
mc, pn, taskname, fn = l
|
||||
|
||||
taskdep = rq.rqdata.dataCaches[mc].task_deps[fn]
|
||||
for t in taskdep['tasks']:
|
||||
if t in taskdep['nostamp'] or "setscene" in t:
|
||||
continue
|
||||
tid = bb.runqueue.build_tid(mc, fn, t)
|
||||
|
||||
if t in task or getAllTaskSignatures:
|
||||
try:
|
||||
sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
|
||||
except KeyError:
|
||||
sig.append(self.getTaskSignatures(target, [t])[0])
|
||||
|
||||
return sig
|
||||
|
||||
def buildTargets(self, targets, task):
|
||||
"""
|
||||
Attempt to build the targets specified
|
||||
@@ -1474,7 +1522,6 @@ class BBCooker:
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == state.forceshutdown:
|
||||
bb.event._should_exit.set()
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
@@ -1489,16 +1536,16 @@ class BBCooker:
|
||||
failures += len(exc.args)
|
||||
retval = False
|
||||
except SystemExit as exc:
|
||||
return bb.server.process.idleFinish(str(exc))
|
||||
self.command.finishAsyncCommand(str(exc))
|
||||
return False
|
||||
|
||||
if not retval:
|
||||
try:
|
||||
for mc in self.multiconfigs:
|
||||
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc])
|
||||
finally:
|
||||
bb.event.disable_heartbeat()
|
||||
return bb.server.process.idleFinish(msg)
|
||||
|
||||
self.command.finishAsyncCommand(msg)
|
||||
return False
|
||||
if retval is True:
|
||||
return True
|
||||
return retval
|
||||
@@ -1530,9 +1577,6 @@ class BBCooker:
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
if 'universe' in targets:
|
||||
@@ -1542,13 +1586,7 @@ class BBCooker:
|
||||
|
||||
|
||||
def getAllKeysWithFlags(self, flaglist):
|
||||
def dummy_autorev(d):
|
||||
return
|
||||
|
||||
dump = {}
|
||||
# Horrible but for now we need to avoid any sideeffects of autorev being called
|
||||
saved = bb.fetch2.get_autorev
|
||||
bb.fetch2.get_autorev = dummy_autorev
|
||||
for k in self.data.keys():
|
||||
try:
|
||||
expand = True
|
||||
@@ -1568,7 +1606,6 @@ class BBCooker:
|
||||
dump[k][d] = None
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bb.fetch2.get_autorev = saved
|
||||
return dump
|
||||
|
||||
|
||||
@@ -1576,6 +1613,13 @@ class BBCooker:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
# reload files for which we got notifications
|
||||
for p in self.inotify_modified_files:
|
||||
bb.parse.update_cache(p)
|
||||
if p in bb.parse.BBHandler.cached_statements:
|
||||
del bb.parse.BBHandler.cached_statements[p]
|
||||
self.inotify_modified_files = []
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
logger.debug("Reloading base configuration data")
|
||||
self.initConfigurationData()
|
||||
@@ -1596,8 +1640,7 @@ class BBCooker:
|
||||
self.updateCacheSync()
|
||||
|
||||
if self.state != state.parsing and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parsing started")
|
||||
self.parsewatched = {}
|
||||
self.setupParserWatcher()
|
||||
|
||||
bb.parse.siggen.reset(self.data)
|
||||
self.parseConfiguration ()
|
||||
@@ -1612,22 +1655,25 @@ class BBCooker:
|
||||
for dep in self.configuration.extra_assume_provided:
|
||||
self.recipecaches[mc].ignored_dependencies.add(dep)
|
||||
|
||||
self.collections = {}
|
||||
|
||||
mcfilelist = {}
|
||||
total_masked = 0
|
||||
searchdirs = set()
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
(filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
|
||||
mcfilelist[mc] = filelist
|
||||
total_masked += masked
|
||||
searchdirs |= set(search)
|
||||
|
||||
# Add mtimes for directories searched for bb/bbappend files
|
||||
# Add inotify watches for directories searched for bb/bbappend files
|
||||
for dirent in searchdirs:
|
||||
self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))])
|
||||
self.add_filewatch([[dirent]], dirs=True)
|
||||
|
||||
self.parser = CookerParser(self, mcfilelist, total_masked)
|
||||
self._parsecache_set(True)
|
||||
self.parsecache_valid = True
|
||||
|
||||
self.state = state.parsing
|
||||
|
||||
@@ -1710,28 +1756,22 @@ class BBCooker:
|
||||
if hasattr(self, "data"):
|
||||
bb.event.fire(CookerExit(), self.data)
|
||||
|
||||
def shutdown(self, force=False):
|
||||
def shutdown(self, force = False):
|
||||
if force:
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
else:
|
||||
self.state = state.shutdown
|
||||
|
||||
if self.parser:
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.shutdown(clean=not force)
|
||||
self.parser.final_cleanup()
|
||||
|
||||
def finishcommand(self):
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.final_cleanup()
|
||||
self.state = state.initial
|
||||
bb.event._should_exit.clear()
|
||||
|
||||
def reset(self):
|
||||
if hasattr(bb.parse, "siggen"):
|
||||
bb.parse.siggen.exit()
|
||||
self.finishcommand()
|
||||
self.initConfigurationData()
|
||||
self.handlePRServ()
|
||||
|
||||
@@ -1743,9 +1783,9 @@ class BBCooker:
|
||||
if hasattr(self, "data"):
|
||||
self.databuilder.reset()
|
||||
self.data = self.databuilder.data
|
||||
# In theory tinfoil could have modified the base data before parsing,
|
||||
# ideally need to track if anything did modify the datastore
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
self.baseconfig_valid = False
|
||||
|
||||
|
||||
class CookerExit(bb.event.Event):
|
||||
"""
|
||||
@@ -1766,10 +1806,10 @@ class CookerCollectFiles(object):
|
||||
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
|
||||
|
||||
def calc_bbfile_priority(self, filename):
|
||||
for layername, _, regex, pri in self.bbfile_config_priorities:
|
||||
for _, _, regex, pri in self.bbfile_config_priorities:
|
||||
if regex.match(filename):
|
||||
return pri, regex, layername
|
||||
return 0, None, None
|
||||
return pri, regex
|
||||
return 0, None
|
||||
|
||||
def get_bbfiles(self):
|
||||
"""Get list of default .bb files by reading out the current directory"""
|
||||
@@ -1788,7 +1828,7 @@ class CookerCollectFiles(object):
|
||||
for ignored in ('SCCS', 'CVS', '.svn'):
|
||||
if ignored in dirs:
|
||||
dirs.remove(ignored)
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))]
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))]
|
||||
|
||||
return found
|
||||
|
||||
@@ -1811,7 +1851,7 @@ class CookerCollectFiles(object):
|
||||
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
# We need to track where we look so that we can know when the cache is invalid. There
|
||||
# We need to track where we look so that we can add inotify watches. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir()
|
||||
# (or os.scandir() for python 3.6+) calls while we run glob().
|
||||
origlistdir = os.listdir
|
||||
@@ -1942,7 +1982,7 @@ class CookerCollectFiles(object):
|
||||
# Calculate priorities for each file
|
||||
for p in pkgfns:
|
||||
realfn, cls, mc = bb.cache.virtualfn2realfn(p)
|
||||
priorities[p], regex, _ = self.calc_bbfile_priority(realfn)
|
||||
priorities[p], regex = self.calc_bbfile_priority(realfn)
|
||||
if regex in unmatched_regex:
|
||||
matched_regex.add(regex)
|
||||
unmatched_regex.remove(regex)
|
||||
@@ -2052,34 +2092,34 @@ class Parser(multiprocessing.Process):
|
||||
multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
|
||||
|
||||
pending = []
|
||||
havejobs = True
|
||||
try:
|
||||
while havejobs or pending:
|
||||
if self.quit.is_set():
|
||||
while True:
|
||||
try:
|
||||
self.quit.get_nowait()
|
||||
except queue.Empty:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
job = None
|
||||
try:
|
||||
job = self.jobs.pop()
|
||||
except IndexError:
|
||||
havejobs = False
|
||||
if job:
|
||||
if pending:
|
||||
result = pending.pop()
|
||||
else:
|
||||
try:
|
||||
job = self.jobs.pop()
|
||||
except IndexError:
|
||||
break
|
||||
result = self.parse(*job)
|
||||
# Clear the siggen cache after parsing to control memory usage, its huge
|
||||
bb.parse.siggen.postparsing_clean_cache()
|
||||
try:
|
||||
self.results.put(result, timeout=0.25)
|
||||
except queue.Full:
|
||||
pending.append(result)
|
||||
|
||||
if pending:
|
||||
try:
|
||||
result = pending.pop()
|
||||
self.results.put(result, timeout=0.05)
|
||||
except queue.Full:
|
||||
pending.append(result)
|
||||
finally:
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
|
||||
def parse(self, mc, cache, filename, appends, layername):
|
||||
def parse(self, mc, cache, filename, appends):
|
||||
try:
|
||||
origfilter = bb.event.LogHandler.filter
|
||||
# Record the filename we're parsing into any events generated
|
||||
@@ -2093,7 +2133,7 @@ class Parser(multiprocessing.Process):
|
||||
bb.event.set_class_handlers(self.handlers.copy())
|
||||
bb.event.LogHandler.filter = parse_filter
|
||||
|
||||
return True, mc, cache.parse(filename, appends, layername)
|
||||
return True, mc, cache.parse(filename, appends)
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
@@ -2133,11 +2173,10 @@ class CookerParser(object):
|
||||
for mc in self.cooker.multiconfigs:
|
||||
for filename in self.mcfilelist[mc]:
|
||||
appends = self.cooker.collections[mc].get_file_appends(filename)
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
if not self.bb_caches[mc].cacheValid(filename, appends):
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends))
|
||||
else:
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends))
|
||||
|
||||
self.total = len(self.fromcache) + len(self.willparse)
|
||||
self.toparse = len(self.willparse)
|
||||
@@ -2146,7 +2185,6 @@ class CookerParser(object):
|
||||
self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or
|
||||
multiprocessing.cpu_count()), self.toparse)
|
||||
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
self.start()
|
||||
self.haveshutdown = False
|
||||
self.syncthread = None
|
||||
@@ -2157,7 +2195,7 @@ class CookerParser(object):
|
||||
if self.toparse:
|
||||
bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata)
|
||||
|
||||
self.parser_quit = multiprocessing.Event()
|
||||
self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes)
|
||||
self.result_queue = multiprocessing.Queue()
|
||||
|
||||
def chunkify(lst,n):
|
||||
@@ -2172,7 +2210,7 @@ class CookerParser(object):
|
||||
|
||||
self.results = itertools.chain(self.results, self.parse_generator())
|
||||
|
||||
def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"):
|
||||
def shutdown(self, clean=True):
|
||||
if not self.toparse:
|
||||
return
|
||||
if self.haveshutdown:
|
||||
@@ -2187,9 +2225,11 @@ class CookerParser(object):
|
||||
|
||||
bb.event.fire(event, self.cfgdata)
|
||||
else:
|
||||
bb.event.fire(bb.event.ParseError(eventmsg), self.cfgdata)
|
||||
bb.error("Parsing halted due to errors, see error messages above")
|
||||
|
||||
for process in self.processes:
|
||||
self.parser_quit.put(None)
|
||||
|
||||
# Cleanup the queue before call process.join(), otherwise there might be
|
||||
# deadlocks.
|
||||
while True:
|
||||
@@ -2198,16 +2238,6 @@ class CookerParser(object):
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
def sync_caches():
|
||||
for c in self.bb_caches.values():
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
c.sync()
|
||||
|
||||
self.syncthread = threading.Thread(target=sync_caches, name="SyncThread")
|
||||
self.syncthread.start()
|
||||
|
||||
self.parser_quit.set()
|
||||
|
||||
for process in self.processes:
|
||||
process.join(0.5)
|
||||
|
||||
@@ -2228,9 +2258,18 @@ class CookerParser(object):
|
||||
if hasattr(process, "close"):
|
||||
process.close()
|
||||
|
||||
bb.codeparser.parser_cache_save()
|
||||
self.parser_quit.close()
|
||||
# Allow data left in the cancel queue to be discarded
|
||||
self.parser_quit.cancel_join_thread()
|
||||
|
||||
def sync_caches():
|
||||
for c in self.bb_caches.values():
|
||||
c.sync()
|
||||
|
||||
sync = threading.Thread(target=sync_caches, name="SyncThread")
|
||||
self.syncthread = sync
|
||||
sync.start()
|
||||
bb.codeparser.parser_cache_savemerge()
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
bb.fetch.fetcher_parse_done()
|
||||
if self.cooker.configuration.profile:
|
||||
profiles = []
|
||||
@@ -2248,9 +2287,9 @@ class CookerParser(object):
|
||||
self.syncthread.join()
|
||||
|
||||
def load_cached(self):
|
||||
for mc, cache, filename, appends, layername in self.fromcache:
|
||||
infos = cache.loadCached(filename, appends)
|
||||
yield False, mc, infos
|
||||
for mc, cache, filename, appends in self.fromcache:
|
||||
cached, infos = cache.load(filename, appends)
|
||||
yield not cached, mc, infos
|
||||
|
||||
def parse_generator(self):
|
||||
empty = False
|
||||
@@ -2305,7 +2344,7 @@ class CookerParser(object):
|
||||
except bb.parse.ParseError as exc:
|
||||
self.error += 1
|
||||
logger.error(str(exc))
|
||||
self.shutdown(clean=False, eventmsg=str(exc))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
self.error += 1
|
||||
@@ -2348,13 +2387,11 @@ class CookerParser(object):
|
||||
return True
|
||||
|
||||
def reparse(self, filename):
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
to_reparse = set()
|
||||
for mc in self.cooker.multiconfigs:
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername))
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename)))
|
||||
|
||||
for mc, filename, appends, layername in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends, layername)
|
||||
for mc, filename, appends in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends)
|
||||
for vfn, info_array in infos:
|
||||
self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)
|
||||
|
||||
@@ -160,7 +160,12 @@ def catch_parse_error(func):
|
||||
def wrapped(fn, *args):
|
||||
try:
|
||||
return func(fn, *args)
|
||||
except Exception as exc:
|
||||
except IOError as exc:
|
||||
import traceback
|
||||
parselog.critical(traceback.format_exc())
|
||||
parselog.critical("Unable to parse %s: %s" % (fn, exc))
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
import traceback
|
||||
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
@@ -172,11 +177,14 @@ def catch_parse_error(func):
|
||||
break
|
||||
parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
|
||||
raise bb.BBHandledException()
|
||||
except bb.parse.ParseError as exc:
|
||||
parselog.critical(str(exc))
|
||||
raise bb.BBHandledException()
|
||||
return wrapped
|
||||
|
||||
@catch_parse_error
|
||||
def parse_config_file(fn, data, include=True):
|
||||
return bb.parse.handle(fn, data, include, baseconfig=True)
|
||||
return bb.parse.handle(fn, data, include)
|
||||
|
||||
@catch_parse_error
|
||||
def _inherit(bbclass, data):
|
||||
@@ -255,7 +263,6 @@ class CookerDataBuilder(object):
|
||||
self.mcdata = {}
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
mcdata = {}
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
@@ -263,6 +270,7 @@ class CookerDataBuilder(object):
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.parse.init_parser(self.data)
|
||||
bb.codeparser.parser_cache_init(self.data)
|
||||
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
@@ -280,25 +288,29 @@ class CookerDataBuilder(object):
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
mcdata[''] = self.data
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
if config[0].isdigit():
|
||||
bb.fatal("Multiconfig name '%s' is invalid as multiconfigs cannot start with a digit" % config)
|
||||
parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata)
|
||||
mcdata[config] = parsed_mcdata
|
||||
data_hash.update(parsed_mcdata.get_hash().encode('utf-8'))
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
data_hash.update(mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data)
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
except Exception:
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException()
|
||||
|
||||
bb.codeparser.update_module_dependencies(self.data)
|
||||
|
||||
# Handle obsolete variable names
|
||||
d = self.data
|
||||
@@ -319,23 +331,17 @@ class CookerDataBuilder(object):
|
||||
if issues:
|
||||
raise bb.BBHandledException()
|
||||
|
||||
for mc in mcdata:
|
||||
mcdata[mc].renameVar("__depends", "__base_depends")
|
||||
mcdata[mc].setVar("__bbclasstype", "recipe")
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.mcorigdata = mcdata
|
||||
for mc in mcdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(mcdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
if not hasattr(self, 'mcorigdata'):
|
||||
if not hasattr(self, 'origdata'):
|
||||
return
|
||||
for mc in self.mcorigdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(self.mcorigdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
@@ -356,11 +362,6 @@ class CookerDataBuilder(object):
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
data = parse_config_file(layerconf, data)
|
||||
|
||||
if not data.getVar("BB_CACHEDIR"):
|
||||
data.setVar("BB_CACHEDIR", "${TOPDIR}/cache")
|
||||
|
||||
bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR"))
|
||||
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
broken_layers = []
|
||||
|
||||
@@ -382,8 +383,6 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Please check BBLAYERS in %s" % (layerconf))
|
||||
raise bb.BBHandledException()
|
||||
|
||||
layerseries = None
|
||||
compat_entries = {}
|
||||
for layer in layers:
|
||||
parselog.debug2("Adding layer %s", layer)
|
||||
if 'HOME' in approved and '~' in layer:
|
||||
@@ -396,27 +395,8 @@ class CookerDataBuilder(object):
|
||||
data.expandVarref('LAYERDIR')
|
||||
data.expandVarref('LAYERDIR_RE')
|
||||
|
||||
# Sadly we can't have nice things.
|
||||
# Some layers think they're going to be 'clever' and copy the values from
|
||||
# another layer, e.g. using ${LAYERSERIES_COMPAT_core}. The whole point of
|
||||
# this mechanism is to make it clear which releases a layer supports and
|
||||
# show when a layer master branch is bitrotting and is unmaintained.
|
||||
# We therefore avoid people doing this here.
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
for c in collections:
|
||||
compat_entry = data.getVar("LAYERSERIES_COMPAT_%s" % c)
|
||||
if compat_entry:
|
||||
compat_entries[c] = set(compat_entry.split())
|
||||
data.delVar("LAYERSERIES_COMPAT_%s" % c)
|
||||
if not layerseries:
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
if layerseries:
|
||||
data.delVar("LAYERSERIES_CORENAMES")
|
||||
|
||||
data.delVar('LAYERDIR_RE')
|
||||
data.delVar('LAYERDIR')
|
||||
for c in compat_entries:
|
||||
data.setVar("LAYERSERIES_COMPAT_%s" % c, " ".join(sorted(compat_entries[c])))
|
||||
|
||||
bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
@@ -435,15 +415,13 @@ class CookerDataBuilder(object):
|
||||
if invalid:
|
||||
bb.fatal("BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
|
||||
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
collections_tmp = collections[:]
|
||||
for c in collections:
|
||||
collections_tmp.remove(c)
|
||||
if c in collections_tmp:
|
||||
bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c)
|
||||
|
||||
compat = set()
|
||||
if c in compat_entries:
|
||||
compat = compat_entries[c]
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not layerseries:
|
||||
bb.fatal("No core layer found to work with layer '%s'. Missing entry in bblayers.conf?" % c)
|
||||
if compat and not (compat & layerseries):
|
||||
@@ -452,21 +430,16 @@ class CookerDataBuilder(object):
|
||||
elif not compat and not data.getVar("BB_WORKERCONTEXT"):
|
||||
bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
|
||||
|
||||
data.setVar("LAYERSERIES_CORENAMES", " ".join(sorted(layerseries)))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
msg = "The BBPATH variable is not set"
|
||||
if not layerconf:
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
" the expected location.\nMaybe you accidentally"
|
||||
" invoked bitbake from the wrong directory?")
|
||||
bb.fatal(msg)
|
||||
raise SystemExit(msg)
|
||||
|
||||
if not data.getVar("TOPDIR"):
|
||||
data.setVar("TOPDIR", os.path.abspath(os.getcwd()))
|
||||
if not data.getVar("BB_CACHEDIR"):
|
||||
data.setVar("BB_CACHEDIR", "${TOPDIR}/cache")
|
||||
bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR"))
|
||||
|
||||
data = parse_config_file(os.path.join("conf", "bitbake.conf"), data)
|
||||
|
||||
@@ -493,54 +466,3 @@ class CookerDataBuilder(object):
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc, layername):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
bb_data.setVar("FILE_LAYERNAME", layername)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
|
||||
return bb.parse.handle(bbfile, bb_data)
|
||||
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None, layername=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
|
||||
if virtonly:
|
||||
(bbfile, virtual, mc) = bb.cache.virtualfn2realfn(bbfile)
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, '', layername)
|
||||
|
||||
for mc in self.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
def parseRecipe(self, virtualfn, appends, layername):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
datastores = self.parseRecipeVariants(virtualfn, appends, virtonly=True, layername=layername)
|
||||
return datastores[virtual]
|
||||
|
||||
@@ -4,16 +4,14 @@ BitBake 'Data' implementations
|
||||
Functions for interacting with the data structure used by the
|
||||
BitBake build tools.
|
||||
|
||||
expandKeys and datastore iteration are the most expensive
|
||||
operations. Updating overrides is now "on the fly" but still based
|
||||
on the idea of the cookie monster introduced by zecke:
|
||||
"At night the cookie monster came by and
|
||||
The expandKeys and update_data are the most expensive
|
||||
operations. At night the cookie monster came by and
|
||||
suggested 'give me cookies on setting the variables and
|
||||
things will work out'. Taking this suggestion into account
|
||||
applying the skills from the not yet passed 'Entwurf und
|
||||
Analyse von Algorithmen' lecture and the cookie
|
||||
monster seems to be right. We will track setVar more carefully
|
||||
to have faster datastore operations."
|
||||
to have faster update_data and expandKeys operations.
|
||||
|
||||
This is a trade-off between speed and memory again but
|
||||
the speed is more critical here.
|
||||
@@ -28,6 +26,11 @@ the speed is more critical here.
|
||||
|
||||
import sys, os, re
|
||||
import hashlib
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[0]))
|
||||
sys.path.insert(0, path)
|
||||
from itertools import groupby
|
||||
|
||||
from bb import data_smart
|
||||
@@ -67,6 +70,10 @@ def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
return d.keys()
|
||||
|
||||
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
def expand(s, d, varname = None):
|
||||
"""Variable expansion using the data store"""
|
||||
return d.expand(s, varname)
|
||||
@@ -114,8 +121,8 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
if d.getVarFlag(var, 'python', False) and func:
|
||||
return False
|
||||
|
||||
export = bb.utils.to_boolean(d.getVarFlag(var, "export"))
|
||||
unexport = bb.utils.to_boolean(d.getVarFlag(var, "unexport"))
|
||||
export = d.getVarFlag(var, "export", False)
|
||||
unexport = d.getVarFlag(var, "unexport", False)
|
||||
if not all and not export and not unexport and not func:
|
||||
return False
|
||||
|
||||
@@ -188,8 +195,8 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
|
||||
def exported_keys(d):
|
||||
return (key for key in d.keys() if not key.startswith('__') and
|
||||
bb.utils.to_boolean(d.getVarFlag(key, 'export')) and
|
||||
not bb.utils.to_boolean(d.getVarFlag(key, 'unexport')))
|
||||
d.getVarFlag(key, 'export', False) and
|
||||
not d.getVarFlag(key, 'unexport', False))
|
||||
|
||||
def exported_vars(d):
|
||||
k = list(exported_keys(d))
|
||||
@@ -261,41 +268,13 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparsedata):
|
||||
def handle_contains(value, contains, exclusions, d):
|
||||
newvalue = []
|
||||
if value:
|
||||
newvalue.append(str(value))
|
||||
for k in sorted(contains):
|
||||
if k in exclusions or k in ignored_vars:
|
||||
continue
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue.append("\n%s{%s} = Unset" % (k, item))
|
||||
break
|
||||
else:
|
||||
newvalue.append("\n%s{%s} = Set" % (k, item))
|
||||
return "".join(newvalue)
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
value = handle_contains(value, r2.contains, exclusions, d)
|
||||
return value
|
||||
def update_data(d):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
d.finalize(parent = True)
|
||||
|
||||
def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
deps = set()
|
||||
try:
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
if vf[1] == "vardepvalueexclude":
|
||||
@@ -303,24 +282,48 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
deps -= ignored_vars
|
||||
return frozenset(deps), value
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
exclusions = varflags.get("vardepsexclude", "").split()
|
||||
|
||||
def handle_contains(value, contains, exclusions, d):
|
||||
newvalue = []
|
||||
if value:
|
||||
newvalue.append(str(value))
|
||||
for k in sorted(contains):
|
||||
if k in exclusions or k in ignored_vars:
|
||||
continue
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue.append("\n%s{%s} = Unset" % (k, item))
|
||||
break
|
||||
else:
|
||||
newvalue.append("\n%s{%s} = Set" % (k, item))
|
||||
return "".join(newvalue)
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
value = codeparsedata.getVarFlag(key, "_content", False)
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, exclusions, d)
|
||||
else:
|
||||
value, parsedvar = codeparsedata.getVarFlag(key, "_content", False, retparser=True)
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
@@ -362,43 +365,36 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(exclusions)
|
||||
deps -= ignored_vars
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
bb.warn("Exception during build_dependencies for %s" % key)
|
||||
raise
|
||||
return frozenset(deps), value
|
||||
return deps, value
|
||||
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
|
||||
#d.setVarFlag(key, "vardeps", deps)
|
||||
|
||||
def generate_dependencies(d, ignored_vars):
|
||||
|
||||
mod_funcs = set(bb.codeparser.modulecode_deps.keys())
|
||||
keys = set(key for key in d if not key.startswith("__")) | mod_funcs
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if bb.utils.to_boolean(d.getVarFlag(key, "export")) and not bb.utils.to_boolean(d.getVarFlag(key, "unexport")))
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
|
||||
|
||||
codeparserd = d.createCopy()
|
||||
for forced in (d.getVar('BB_HASH_CODEPARSER_VALS') or "").split():
|
||||
key, value = forced.split("=", 1)
|
||||
codeparserd.setVar(key, value)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
for task in tasklist:
|
||||
deps[task], values[task] = build_dependencies(task, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd)
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
newdeps = deps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
nextdeps = newdeps - ignored_vars
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep not in deps:
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd)
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, ignored_vars, d)
|
||||
newdeps |= deps[dep]
|
||||
newdeps -= seen
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
@@ -417,6 +413,7 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
else:
|
||||
data = [data]
|
||||
|
||||
gendeps[task] -= ignored_vars
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
@@ -424,6 +421,9 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in ignored_vars:
|
||||
continue
|
||||
gendeps[dep] -= ignored_vars
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
@@ -435,7 +435,7 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn):
|
||||
data.append(str(var))
|
||||
k = fn + ":" + task
|
||||
basehash[k] = hashlib.sha256("".join(data).encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = frozenset(seen)
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
|
||||
@@ -16,10 +16,7 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import builtins
|
||||
import copy
|
||||
import re
|
||||
import sys
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
@@ -32,7 +29,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
|
||||
@@ -95,11 +92,10 @@ def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
loginfo['func'] = func
|
||||
|
||||
class VariableParse:
|
||||
def __init__(self, varname, d, unexpanded_value = None, val = None):
|
||||
def __init__(self, varname, d, val = None):
|
||||
self.varname = varname
|
||||
self.d = d
|
||||
self.value = val
|
||||
self.unexpanded_value = unexpanded_value
|
||||
|
||||
self.references = set()
|
||||
self.execs = set()
|
||||
@@ -123,11 +119,6 @@ class VariableParse:
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
@@ -153,21 +144,19 @@ class VariableParse:
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
|
||||
class DataContext(dict):
|
||||
excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe'])
|
||||
|
||||
class DataContext(dict):
|
||||
def __init__(self, metadata, **kwargs):
|
||||
self.metadata = metadata
|
||||
dict.__init__(self, **kwargs)
|
||||
self['d'] = metadata
|
||||
self.context = set(bb.utils.get_context())
|
||||
|
||||
def __missing__(self, key):
|
||||
if key in self.excluded or key in self.context:
|
||||
# Skip commonly accessed invalid variables
|
||||
if key in ['bb', 'oe', 'int', 'bool', 'time', 'str', 'os']:
|
||||
raise KeyError(key)
|
||||
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None:
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
@@ -272,9 +261,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +325,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +379,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
@@ -451,9 +442,9 @@ class DataSmart(MutableMapping):
|
||||
def expandWithRefs(self, s, varname):
|
||||
|
||||
if not isinstance(s, str): # sanity check
|
||||
return VariableParse(varname, self, s, s)
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
varparse = VariableParse(varname, self, s)
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
olds = s
|
||||
@@ -485,19 +476,24 @@ class DataSmart(MutableMapping):
|
||||
def expand(self, s, varname = None):
|
||||
return self.expandWithRefs(s, varname).value
|
||||
|
||||
def finalize(self, parent = False):
|
||||
return
|
||||
|
||||
def internal_finalize(self, parent = False):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
self.overrides = None
|
||||
|
||||
def need_overrides(self):
|
||||
if self.overrides is not None:
|
||||
return
|
||||
if self.inoverride:
|
||||
return
|
||||
overrride_stack = []
|
||||
for count in range(5):
|
||||
self.inoverride = True
|
||||
# Can end up here recursively so setup dummy values
|
||||
self.overrides = []
|
||||
self.overridesset = set()
|
||||
self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
overrride_stack.append(self.overrides)
|
||||
self.overridesset = set(self.overrides)
|
||||
self.inoverride = False
|
||||
self.expand_cache = {}
|
||||
@@ -507,7 +503,7 @@ class DataSmart(MutableMapping):
|
||||
self.overrides = newoverrides
|
||||
self.overridesset = set(self.overrides)
|
||||
else:
|
||||
bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work. The list of failing override expansions: %s" % "\n".join(str(s) for s in overrride_stack))
|
||||
bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work.")
|
||||
|
||||
def initVar(self, var):
|
||||
self.expand_cache = {}
|
||||
@@ -518,18 +514,18 @@ class DataSmart(MutableMapping):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var]
|
||||
return dest[var], self.overridedata.get(var, None)
|
||||
|
||||
if "_data" not in dest:
|
||||
break
|
||||
dest = dest["_data"]
|
||||
return None
|
||||
return None, self.overridedata.get(var, None)
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
return
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
|
||||
if local_var:
|
||||
self.dict[var] = copy.copy(local_var)
|
||||
@@ -637,7 +633,7 @@ class DataSmart(MutableMapping):
|
||||
nextnew.update(vardata.references)
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.overrides = None
|
||||
self.internal_finalize(True)
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
@@ -724,7 +720,7 @@ class DataSmart(MutableMapping):
|
||||
if ':' in var:
|
||||
override = var[var.rfind(':')+1:]
|
||||
shortvar = var[:var.rfind(':')]
|
||||
while override and __override_regexp__.match(override):
|
||||
while override and override.islower():
|
||||
try:
|
||||
if shortvar in self.overridedata:
|
||||
# Force CoW by recreating the list first
|
||||
@@ -779,18 +775,13 @@ class DataSmart(MutableMapping):
|
||||
return None
|
||||
cachename = var + "[" + flag + "]"
|
||||
|
||||
if not expand and retparser and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].unexpanded_value, self.expand_cache[cachename]
|
||||
|
||||
if expand and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].value
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, overridedata = self._findVar(var)
|
||||
value = None
|
||||
removes = set()
|
||||
if flag == "_content" and not parsing:
|
||||
overridedata = self.overridedata.get(var, None)
|
||||
if flag == "_content" and not parsing and overridedata is not None:
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
@@ -905,7 +896,7 @@ class DataSmart(MutableMapping):
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
self.expand_cache = {}
|
||||
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
if not var in self.dict:
|
||||
@@ -948,7 +939,7 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var = self._findVar(var)
|
||||
local_var, _ = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
|
||||
@@ -68,39 +68,29 @@ _catchall_handlers = {}
|
||||
_eventfilter = None
|
||||
_uiready = False
|
||||
_thread_lock = threading.Lock()
|
||||
_heartbeat_enabled = False
|
||||
_should_exit = threading.Event()
|
||||
_thread_lock_enabled = False
|
||||
|
||||
if hasattr(__builtins__, '__setitem__'):
|
||||
builtins = __builtins__
|
||||
else:
|
||||
builtins = __builtins__.__dict__
|
||||
|
||||
def enable_threadlock():
|
||||
# Always needed now
|
||||
return
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = True
|
||||
|
||||
def disable_threadlock():
|
||||
# Always needed now
|
||||
return
|
||||
|
||||
def enable_heartbeat():
|
||||
global _heartbeat_enabled
|
||||
_heartbeat_enabled = True
|
||||
|
||||
def disable_heartbeat():
|
||||
global _heartbeat_enabled
|
||||
_heartbeat_enabled = False
|
||||
|
||||
#
|
||||
# In long running code, this function should be called periodically
|
||||
# to check if we should exit due to an interuption (.e.g Ctrl+C from the UI)
|
||||
#
|
||||
def check_for_interrupts(d):
|
||||
global _should_exit
|
||||
if _should_exit.is_set():
|
||||
bb.warn("Exiting due to interrupt.")
|
||||
raise bb.BBHandledException()
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = False
|
||||
|
||||
def execute_handler(name, handler, event, d):
|
||||
event.data = d
|
||||
addedd = False
|
||||
if 'd' not in builtins:
|
||||
builtins['d'] = d
|
||||
addedd = True
|
||||
try:
|
||||
ret = handler(event, d)
|
||||
ret = handler(event)
|
||||
except (bb.parse.SkipRecipe, bb.BBHandledException):
|
||||
raise
|
||||
except Exception:
|
||||
@@ -114,7 +104,8 @@ def execute_handler(name, handler, event, d):
|
||||
raise
|
||||
finally:
|
||||
del event.data
|
||||
|
||||
if addedd:
|
||||
del builtins['d']
|
||||
|
||||
def fire_class_handlers(event, d):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
@@ -189,30 +180,36 @@ def print_ui_queue():
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
global _thread_lock
|
||||
global _thread_lock_enabled
|
||||
|
||||
if not _uiready:
|
||||
# No UI handlers registered yet, queue up the messages
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
try:
|
||||
if not _ui_logfilters[h].filter(event):
|
||||
continue
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
||||
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
||||
else:
|
||||
_ui_handlers[h].event.send(event)
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.acquire()
|
||||
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
try:
|
||||
if not _ui_logfilters[h].filter(event):
|
||||
continue
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
||||
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
||||
else:
|
||||
_ui_handlers[h].event.send(event)
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.release()
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
@@ -256,16 +253,15 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
if handler is not None:
|
||||
# handle string containing python code
|
||||
if isinstance(handler, str):
|
||||
tmp = "def %s(e, d):\n%s" % (name, handler)
|
||||
# Inject empty lines to make code match lineno in filename
|
||||
if lineno is not None:
|
||||
tmp = "\n" * (lineno-1) + tmp
|
||||
tmp = "def %s(e):\n%s" % (name, handler)
|
||||
try:
|
||||
code = bb.methodpool.compile_cache(tmp)
|
||||
if not code:
|
||||
if filename is None:
|
||||
filename = "%s(e, d)" % name
|
||||
filename = "%s(e)" % name
|
||||
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
if lineno is not None:
|
||||
ast.increment_lineno(code, lineno-1)
|
||||
code = compile(code, filename, "exec")
|
||||
bb.methodpool.compile_cache_add(tmp, code)
|
||||
except SyntaxError:
|
||||
@@ -327,23 +323,21 @@ def set_eventfilter(func):
|
||||
_eventfilter = func
|
||||
|
||||
def register_UIHhandler(handler, mainui=False):
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
|
||||
def unregister_UIHhandler(handlerNum, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = False
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
|
||||
def get_uihandler():
|
||||
@@ -857,19 +851,3 @@ class FindSigInfoResult(Event):
|
||||
def __init__(self, result):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
class GetTaskSignatureResult(Event):
|
||||
"""
|
||||
Event to return results from GetTaskSignatures command
|
||||
"""
|
||||
def __init__(self, sig):
|
||||
Event.__init__(self)
|
||||
self.sig = sig
|
||||
|
||||
class ParseError(Event):
|
||||
"""
|
||||
Event to indicate parse failed
|
||||
"""
|
||||
def __init__(self, msg):
|
||||
super().__init__()
|
||||
self._msg = msg
|
||||
|
||||
@@ -290,12 +290,12 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) if kvdelim in x else (x, None) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
def _param_str_join(self, dict_, elmdelim, kvdelim="="):
|
||||
return elmdelim.join([kvdelim.join([k, v]) if v else k for k, v in dict_.items()])
|
||||
return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
|
||||
|
||||
@property
|
||||
def hostport(self):
|
||||
@@ -388,7 +388,7 @@ def decodeurl(url):
|
||||
if s:
|
||||
if not '=' in s:
|
||||
raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
|
||||
s1, s2 = s.split('=', 1)
|
||||
s1, s2 = s.split('=')
|
||||
p[s1] = s2
|
||||
|
||||
return type, host, urllib.parse.unquote(path), user, pswd, p
|
||||
@@ -469,7 +469,6 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
basename = os.path.basename(mirrortarball)
|
||||
# Kill parameters, they make no sense for mirror tarballs
|
||||
uri_decoded[5] = {}
|
||||
uri_find_decoded[5] = {}
|
||||
elif ud.localpath and ud.method.supports_checksum(ud):
|
||||
basename = os.path.basename(ud.localpath)
|
||||
if basename:
|
||||
@@ -518,7 +517,7 @@ def fetcher_init(d):
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_checksum_cache.init_cache(d)
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
@@ -560,6 +559,7 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
file against those in the recipe each time, rather than only after
|
||||
downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571.
|
||||
"""
|
||||
|
||||
if ud.ignore_checksums or not ud.method.supports_checksum(ud):
|
||||
return {}
|
||||
|
||||
@@ -604,7 +604,11 @@ def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True
|
||||
|
||||
# If strict checking enabled and neither sum defined, raise error
|
||||
if strict == "1":
|
||||
raise NoChecksumError("\n".join(checksum_lines))
|
||||
messages.append("No checksum specified for '%s', please add at " \
|
||||
"least one to the recipe:" % ud.localpath)
|
||||
messages.extend(checksum_lines)
|
||||
logger.error("\n".join(messages))
|
||||
raise NoChecksumError("Missing SRC_URI checksum", ud.url)
|
||||
|
||||
bb.event.fire(MissingChecksumEvent(ud.url, **checksum_event), d)
|
||||
|
||||
@@ -744,16 +748,13 @@ def subprocess_setup():
|
||||
# SIGPIPE errors are known issues with gzip/bash
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
def mark_recipe_nocache(d):
|
||||
def get_autorev(d):
|
||||
# only not cache src rev in autorev case
|
||||
if d.getVar('BB_SRCREV_POLICY') != "cache":
|
||||
d.setVar('BB_DONT_CACHE', '1')
|
||||
|
||||
def get_autorev(d):
|
||||
mark_recipe_nocache(d)
|
||||
d.setVar("__BBAUTOREV_SEEN", True)
|
||||
return "AUTOINC"
|
||||
|
||||
def _get_srcrev(d, method_name='sortable_revision'):
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
@@ -767,14 +768,13 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
that fetcher provides a method with the given name and the same signature as sortable_revision.
|
||||
"""
|
||||
|
||||
d.setVar("__BBSRCREV_SEEN", "1")
|
||||
d.setVar("__BBSEENSRCREV", "1")
|
||||
recursion = d.getVar("__BBINSRCREV")
|
||||
if recursion:
|
||||
raise FetchError("There are recursive references in fetcher variables, likely through SRC_URI")
|
||||
d.setVar("__BBINSRCREV", True)
|
||||
|
||||
scms = []
|
||||
revs = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
@@ -782,19 +782,16 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
scms.append(u)
|
||||
|
||||
if not scms:
|
||||
d.delVar("__BBINSRCREV")
|
||||
return "", revs
|
||||
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
revs.append(rev)
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
d.delVar("__BBINSRCREV")
|
||||
if autoinc:
|
||||
return "AUTOINC+" + rev, revs
|
||||
return rev, revs
|
||||
return "AUTOINC+" + rev
|
||||
return rev
|
||||
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
@@ -810,7 +807,6 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
ud = urldata[scm]
|
||||
for name in ud.names:
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
@@ -828,21 +824,7 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
d.delVar("__BBINSRCREV")
|
||||
return format, revs
|
||||
|
||||
def get_hashvalue(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return " ".join(revs)
|
||||
|
||||
def get_pkgv_string(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return pkgv
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
if not pkgv:
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
return pkgv
|
||||
return format
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
@@ -868,17 +850,10 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'DBUS_SESSION_BUS_ADDRESS',
|
||||
'P4CONFIG',
|
||||
'SSL_CERT_FILE',
|
||||
'NODE_EXTRA_CA_CERTS',
|
||||
'AWS_PROFILE',
|
||||
'AWS_ACCESS_KEY_ID',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_ROLE_ARN',
|
||||
'AWS_WEB_IDENTITY_TOKEN_FILE',
|
||||
'AWS_DEFAULT_REGION',
|
||||
'AWS_SESSION_TOKEN',
|
||||
'GIT_CACHE_PATH',
|
||||
'REMOTE_CONTAINERS_IPC',
|
||||
'SSL_CERT_DIR']
|
||||
'AWS_DEFAULT_REGION']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
newenv = {}
|
||||
@@ -943,10 +918,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
elif e.stderr:
|
||||
output = "output:\n%s" % e.stderr
|
||||
else:
|
||||
if log:
|
||||
output = "see logfile for output"
|
||||
else:
|
||||
output = "no output"
|
||||
output = "no output"
|
||||
error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
|
||||
except bb.process.CmdError as e:
|
||||
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
|
||||
@@ -1118,8 +1090,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
try:
|
||||
if ud.method.cleanup_upon_failure():
|
||||
ud.method.clean(ud, ld)
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
return False
|
||||
@@ -1244,7 +1215,6 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
|
||||
if srcrev == "AUTOINC":
|
||||
d.setVar("__BBAUTOREV_ACTED_UPON", True)
|
||||
srcrev = ud.method.latest_revision(ud, d, name)
|
||||
|
||||
return srcrev
|
||||
@@ -1261,7 +1231,7 @@ def get_checksum_file_list(d):
|
||||
ud = fetch.ud[u]
|
||||
if ud and isinstance(ud.method, local.Local):
|
||||
found = False
|
||||
paths = ud.method.localfile_searchpaths(ud, d)
|
||||
paths = ud.method.localpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
@@ -1317,13 +1287,18 @@ class FetchData(object):
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
|
||||
setattr(self, "%s_expected" % checksum_id, checksum_expected)
|
||||
|
||||
for checksum_id in CHECKSUM_LIST:
|
||||
configure_checksum(checksum_id)
|
||||
|
||||
self.ignore_checksums = False
|
||||
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
|
||||
self.method = None
|
||||
@@ -1345,11 +1320,6 @@ class FetchData(object):
|
||||
if hasattr(self.method, "urldata_init"):
|
||||
self.method.urldata_init(self, d)
|
||||
|
||||
for checksum_id in CHECKSUM_LIST:
|
||||
configure_checksum(checksum_id)
|
||||
|
||||
self.ignore_checksums = False
|
||||
|
||||
if "localpath" in self.parm:
|
||||
# if user sets localpath for file, use it instead.
|
||||
self.localpath = self.parm["localpath"]
|
||||
@@ -1429,9 +1399,6 @@ class FetchMethod(object):
|
||||
Is localpath something that can be represented by a checksum?
|
||||
"""
|
||||
|
||||
# We cannot compute checksums for None
|
||||
if urldata.localpath is None:
|
||||
return False
|
||||
# We cannot compute checksums for directories
|
||||
if os.path.isdir(urldata.localpath):
|
||||
return False
|
||||
@@ -1444,12 +1411,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
"""
|
||||
When a fetch fails, should clean() be called?
|
||||
"""
|
||||
return True
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
"""
|
||||
Verify the donestamp file
|
||||
@@ -1592,7 +1553,6 @@ class FetchMethod(object):
|
||||
unpackdir = rootdir
|
||||
|
||||
if not unpack or not cmd:
|
||||
urldata.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
# If file == dest, then avoid any copies, as we already put the file into dest!
|
||||
dest = os.path.join(unpackdir, os.path.basename(file))
|
||||
if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
|
||||
@@ -1607,8 +1567,6 @@ class FetchMethod(object):
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
@@ -1700,55 +1658,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class DummyUnpackTracer(object):
|
||||
"""
|
||||
Abstract API definition for a class that traces unpacked source files back
|
||||
to their respective upstream SRC_URI entries, for software composition
|
||||
analysis, license compliance and detailed SBOM generation purposes.
|
||||
User may load their own unpack tracer class (instead of the dummy
|
||||
one) by setting the BB_UNPACK_TRACER_CLASS config parameter.
|
||||
"""
|
||||
def start(self, unpackdir, urldata_dict, d):
|
||||
"""
|
||||
Start tracing the core Fetch.unpack process, using an index to map
|
||||
unpacked files to each SRC_URI entry.
|
||||
This method is called by Fetch.unpack and it may receive nested calls by
|
||||
gitsm and npmsw fetchers, that expand SRC_URI entries by adding implicit
|
||||
URLs and by recursively calling Fetch.unpack from new (nested) Fetch
|
||||
instances.
|
||||
"""
|
||||
return
|
||||
def start_url(self, url):
|
||||
"""Start tracing url unpack process.
|
||||
This method is called by Fetch.unpack before the fetcher-specific unpack
|
||||
method starts, and it may receive nested calls by gitsm and npmsw
|
||||
fetchers.
|
||||
"""
|
||||
return
|
||||
def unpack(self, unpack_type, destdir):
|
||||
"""
|
||||
Set unpack_type and destdir for current url.
|
||||
This method is called by the fetcher-specific unpack method after url
|
||||
tracing started.
|
||||
"""
|
||||
return
|
||||
def finish_url(self, url):
|
||||
"""Finish tracing url unpack process and update the file index.
|
||||
This method is called by Fetch.unpack after the fetcher-specific unpack
|
||||
method finished its job, and it may receive nested calls by gitsm
|
||||
and npmsw fetchers.
|
||||
"""
|
||||
return
|
||||
def complete(self):
|
||||
"""
|
||||
Finish tracing the Fetch.unpack process, and check if all nested
|
||||
Fecth.unpack calls (if any) have been completed; if so, save collected
|
||||
metadata.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
if localonly and cache:
|
||||
@@ -1769,30 +1678,10 @@ class Fetch(object):
|
||||
if key in urldata_cache:
|
||||
self.ud = urldata_cache[key]
|
||||
|
||||
# the unpack_tracer object needs to be made available to possible nested
|
||||
# Fetch instances (when those are created by gitsm and npmsw fetchers)
|
||||
# so we set it as a global variable
|
||||
global unpack_tracer
|
||||
try:
|
||||
unpack_tracer
|
||||
except NameError:
|
||||
class_path = d.getVar("BB_UNPACK_TRACER_CLASS")
|
||||
if class_path:
|
||||
# use user-defined unpack tracer class
|
||||
import importlib
|
||||
module_name, _, class_name = class_path.rpartition(".")
|
||||
module = importlib.import_module(module_name)
|
||||
class_ = getattr(module, class_name)
|
||||
unpack_tracer = class_()
|
||||
else:
|
||||
# fall back to the dummy/abstract class
|
||||
unpack_tracer = DummyUnpackTracer()
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
try:
|
||||
self.ud[url] = FetchData(url, d, localonly)
|
||||
self.ud[url].unpack_tracer = unpack_tracer
|
||||
except NonLocalMethod:
|
||||
if localonly:
|
||||
self.ud[url] = None
|
||||
@@ -1831,7 +1720,6 @@ class Fetch(object):
|
||||
network = self.d.getVar("BB_NO_NETWORK")
|
||||
premirroronly = bb.utils.to_boolean(self.d.getVar("BB_FETCH_PREMIRRORONLY"))
|
||||
|
||||
checksum_missing_messages = []
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1843,6 +1731,7 @@ class Fetch(object):
|
||||
|
||||
try:
|
||||
self.d.setVar("BB_NO_NETWORK", network)
|
||||
|
||||
if m.verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
done = True
|
||||
elif m.try_premirror(ud, self.d):
|
||||
@@ -1895,7 +1784,7 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp and m.cleanup_upon_failure():
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
@@ -1914,20 +1803,13 @@ class Fetch(object):
|
||||
raise ChecksumError("Stale Error Detected")
|
||||
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, NoChecksumError):
|
||||
(message, _) = e.args
|
||||
checksum_missing_messages.append(message)
|
||||
continue
|
||||
elif isinstance(e, ChecksumError):
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.error("Checksum failure fetching %s" % u)
|
||||
raise
|
||||
|
||||
finally:
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
if checksum_missing_messages:
|
||||
logger.error("Missing SRC_URI checksum, please add those to the recipe: \n%s", "\n".join(checksum_missing_messages))
|
||||
raise BBFetchException("There was some missing checksums in the recipe")
|
||||
|
||||
def checkstatus(self, urls=None):
|
||||
"""
|
||||
@@ -1958,7 +1840,7 @@ class Fetch(object):
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
raise FetchError("URL doesn't work", u)
|
||||
raise FetchError("URL %s doesn't work" % u, u)
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
@@ -1968,8 +1850,6 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
unpack_tracer.start(root, self.ud, self.d)
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1977,15 +1857,11 @@ class Fetch(object):
|
||||
if ud.lockfile:
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
|
||||
unpack_tracer.start_url(u)
|
||||
ud.method.unpack(ud, root, self.d)
|
||||
unpack_tracer.finish_url(u)
|
||||
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
unpack_tracer.complete()
|
||||
|
||||
def clean(self, urls=None):
|
||||
"""
|
||||
Clean files that the fetcher gets or places
|
||||
@@ -2087,7 +1963,6 @@ from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2109,4 +1984,3 @@ methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
|
||||
@@ -33,7 +33,7 @@ class Crate(Wget):
|
||||
return ud.type in ['crate']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
@@ -56,14 +56,12 @@ class Crate(Wget):
|
||||
if len(parts) < 5:
|
||||
raise bb.fetch2.ParameterError("Invalid URL: Must be crate://HOST/NAME/VERSION", ud.url)
|
||||
|
||||
# version is expected to be the last token
|
||||
# but ignore possible url parameters which will be used
|
||||
# by the top fetcher class
|
||||
version = parts[-1].split(";")[0]
|
||||
# last field is version
|
||||
version = parts[len(parts) - 1]
|
||||
# second to last field is name
|
||||
name = parts[-2]
|
||||
name = parts[len(parts) - 2]
|
||||
# host (this is to allow custom crate registries to be specified
|
||||
host = '/'.join(parts[2:-2])
|
||||
host = '/'.join(parts[2:len(parts) - 2])
|
||||
|
||||
# if using upstream just fix it up nicely
|
||||
if host == 'crates.io':
|
||||
@@ -71,8 +69,7 @@ class Crate(Wget):
|
||||
|
||||
ud.url = "https://%s/%s/%s/download" % (host, name, version)
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
if 'name' not in ud.parm:
|
||||
ud.parm['name'] = '%s-%s' % (name, version)
|
||||
ud.parm['name'] = name
|
||||
|
||||
logger.debug2("Fetching %s to %s" % (ud.url, ud.parm['downloadfilename']))
|
||||
|
||||
@@ -98,13 +95,11 @@ class Crate(Wget):
|
||||
save_cwd = os.getcwd()
|
||||
os.chdir(rootdir)
|
||||
|
||||
bp = d.getVar('BP')
|
||||
if bp == ud.parm.get('name'):
|
||||
pn = d.getVar('BPN')
|
||||
if pn == ud.parm.get('name'):
|
||||
cmd = "tar -xz --no-same-owner -f %s" % thefile
|
||||
ud.unpack_tracer.unpack("crate-extract", rootdir)
|
||||
else:
|
||||
cargo_bitbake = self._cargo_bitbake_path(rootdir)
|
||||
ud.unpack_tracer.unpack("cargo-extract", cargo_bitbake)
|
||||
|
||||
cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake)
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Google Cloup Platform Storage.
|
||||
|
||||
Class for fetching files from Google Cloud Storage using the
|
||||
Google Cloud Storage Python Client. The GCS Python Client must
|
||||
be correctly installed, configured and authenticated prior to use.
|
||||
Additionally, gsutil must also be installed.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2023, Snap Inc.
|
||||
#
|
||||
# Based in part on bb.fetch2.s3:
|
||||
# Copyright (C) 2017 Andre McCurdy
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class GCP(FetchMethod):
|
||||
"""
|
||||
Class to fetch urls via GCP's Python API.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.gcp_client = None
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with GCP.
|
||||
"""
|
||||
return ud.type in ['gs']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.basecmd = "gsutil stat"
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
self.gcp_client = storage.Client(project=None)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
blob.download_to_filename(ud.localpath)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
# tool with a little healthy suspicion).
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError(f"The GCP API returned success for gs://{ud.host}{ud.path} but {ud.localpath} doesn't exist?!")
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError(f"The downloaded file for gs://{ud.host}{ud.path} resulted in a zero size file?! Deleting and failing since this isn't right.")
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL.
|
||||
"""
|
||||
logger.debug2(f"Checking status of gs://{ud.host}{ud.path}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
if self.gcp_client.bucket(ud.host).blob(path).exists() == False:
|
||||
raise FetchError(f"The GCP API reported that gs://{ud.host}{ud.path} does not exist")
|
||||
else:
|
||||
return True
|
||||
@@ -44,27 +44,13 @@ Supported SRC_URI options are:
|
||||
|
||||
- nobranch
|
||||
Don't check the SHA validation for branch. set this option for the recipe
|
||||
referring to commit which is valid in any namespace (branch, tag, ...)
|
||||
instead of branch.
|
||||
referring to commit which is valid in tag instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- subpath
|
||||
Limit the checkout to a specific subpath of the tree.
|
||||
By default, checkout the whole tree, set subpath=<path> if needed
|
||||
|
||||
- destsuffix
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is git/, set destsuffix=<suffix> if needed
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
- lfs
|
||||
Enable the checkout to use LFS for large files. This will download all LFS files
|
||||
in the download step, as the unpack step does not have network access.
|
||||
The default is "1", set lfs=0 to skip.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2005 Richard Purdie
|
||||
@@ -78,7 +64,6 @@ import fnmatch
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
@@ -87,7 +72,6 @@ from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import trusted_network
|
||||
|
||||
|
||||
sha1_re = re.compile(r'^[0-9a-f]{40}$')
|
||||
@@ -150,9 +134,6 @@ class Git(FetchMethod):
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init git specific variable within url data
|
||||
@@ -262,7 +243,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -277,7 +258,7 @@ class Git(FetchMethod):
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_'))
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_'))
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
@@ -328,10 +309,7 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) \
|
||||
or self.shallow_tarball_need_update(ud) \
|
||||
or self.tarball_need_update(ud) \
|
||||
or self.lfs_need_update(ud, d)
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
@@ -343,15 +321,6 @@ class Git(FetchMethod):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
for name in ud.names:
|
||||
if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
for rev in ud.shallow_revs:
|
||||
try:
|
||||
@@ -371,16 +340,6 @@ class Git(FetchMethod):
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
return True
|
||||
# If the url is not in trusted network, that is, BB_NO_NETWORK is set to 0
|
||||
# and BB_ALLOWED_NETWORKS does not contain the host that ud.url uses, then
|
||||
# we need to try premirrors first as using upstream is destined to fail.
|
||||
if not trusted_network(d, ud.url):
|
||||
return True
|
||||
# the following check is to ensure incremental fetch in downloads, this is
|
||||
# because the premirror might be old and does not contain the new rev required,
|
||||
# and this will cause a total removal and new clone. So if we can reach to
|
||||
# network, we prefer upstream over premirror, though the premirror might contain
|
||||
# the new rev.
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
return True
|
||||
@@ -401,47 +360,15 @@ class Git(FetchMethod):
|
||||
else:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir)
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if 'mirror' in output:
|
||||
runfetchcmd("%s remote rm mirror" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s remote add --mirror=fetch mirror %s" % (ud.basecmd, tmpdir), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --update-head-ok --progress mirror " % (ud.basecmd)
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s " % (ud.basecmd, shlex.quote(tmpdir))
|
||||
runfetchcmd(fetch_cmd, d, workdir=ud.clonedir)
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
needs_clone = False
|
||||
if os.path.exists(ud.clonedir):
|
||||
# The directory may exist, but not be the top level of a bare git
|
||||
# repository in which case it needs to be deleted and re-cloned.
|
||||
try:
|
||||
# Since clones can be bare, use --absolute-git-dir instead of --show-toplevel
|
||||
output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
toplevel = output.rstrip()
|
||||
|
||||
if not bb.utils.path_is_descendant(toplevel, ud.clonedir):
|
||||
logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir)
|
||||
needs_clone = True
|
||||
except bb.fetch2.FetchError as e:
|
||||
logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e)
|
||||
needs_clone = True
|
||||
except FileNotFoundError as e:
|
||||
logger.warning("%s", e)
|
||||
needs_clone = True
|
||||
|
||||
if needs_clone:
|
||||
shutil.rmtree(ud.clonedir)
|
||||
else:
|
||||
needs_clone = True
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if needs_clone:
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible,
|
||||
# but it doesn't work when git/objects is a symlink, only works when it is a directory.
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible
|
||||
if repourl.startswith("file://"):
|
||||
repourl_path = repourl[7:]
|
||||
objects = os.path.join(repourl_path, 'objects')
|
||||
if os.path.isdir(objects) and not os.path.islink(objects):
|
||||
repourl = repourl_path
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
@@ -455,11 +382,7 @@ class Git(FetchMethod):
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.nobranch:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
else:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
@@ -482,14 +405,15 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self.lfs_need_update(ud, d):
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
#
|
||||
# It would be nice to just do this inline here by running 'git-lfs fetch'
|
||||
# on the bare clonedir, but that operation requires a working copy on some
|
||||
# releases of Git LFS.
|
||||
with tempfile.TemporaryDirectory(dir=d.getVar('DL_DIR')) as tmpdir:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
try:
|
||||
# Do the checkout. This implicitly involves a Git LFS fetch.
|
||||
Git.unpack(self, ud, tmpdir, d)
|
||||
|
||||
@@ -505,8 +429,10 @@ class Git(FetchMethod):
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(ud.destdir, ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/.git" % ud.destdir)
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
finally:
|
||||
bb.utils.remove(tmpdir, recurse=True)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
@@ -544,7 +470,7 @@ class Git(FetchMethod):
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d,
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
@@ -632,8 +558,6 @@ class Git(FetchMethod):
|
||||
destdir = ud.destdir = os.path.join(destdir, destsuffix)
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
if not ud.bareclone:
|
||||
ud.unpack_tracer.unpack("git", destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
|
||||
@@ -672,8 +596,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)" % (repourl))
|
||||
elif not need_lfs:
|
||||
bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
|
||||
else:
|
||||
runfetchcmd("%s lfs install --local" % ud.basecmd, d, workdir=destdir)
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
@@ -725,35 +647,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _lfs_objects_downloaded(self, ud, d, name, wd):
|
||||
"""
|
||||
Verifies whether the LFS objects for requested revisions have already been downloaded
|
||||
"""
|
||||
# Bail out early if this repository doesn't use LFS
|
||||
if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
|
||||
return True
|
||||
|
||||
# The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revisions[name])
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
return True
|
||||
|
||||
# Match all lines beginning with the hexadecimal OID
|
||||
oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
|
||||
for line in output.split("\n"):
|
||||
oid = re.search(oid_regex, line)
|
||||
if not oid:
|
||||
bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
|
||||
if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
@@ -762,11 +655,13 @@ class Git(FetchMethod):
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
"""
|
||||
|
||||
if ud.nobranch:
|
||||
# If no branch is specified, use the current git commit
|
||||
refname = self._build_revision(ud, d, ud.names[0])
|
||||
elif wd == ud.clonedir:
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if not ud.nobranch:
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
branchname = "master"
|
||||
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
@@ -841,11 +736,11 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSRCREV_SEEN"):
|
||||
if not d.getVar("__BBSEENSRCREV"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.mark_recipe_nocache(d)
|
||||
bb.fetch2.get_autorev(d)
|
||||
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
@@ -871,42 +766,38 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
return pupver
|
||||
|
||||
rev_tag_re = re.compile(r"([0-9a-f]{40})\s+refs/tags/(.*)")
|
||||
pver_re = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
nonrel_re = re.compile(r"(alpha|beta|rc|final)+")
|
||||
|
||||
verstring = ""
|
||||
revision = ""
|
||||
for line in output.split("\n"):
|
||||
if not line:
|
||||
break
|
||||
|
||||
m = rev_tag_re.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
(revision, tag) = m.groups()
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
if nonrel_re.search(tag):
|
||||
m = re.search(r"(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# search for version in the line
|
||||
m = pver_re.search(tag)
|
||||
if not m:
|
||||
tag = tagregex.search(tag_head)
|
||||
if tag is None:
|
||||
continue
|
||||
|
||||
pver = m.group('pver').replace("_", ".")
|
||||
tag = tag.group('pver')
|
||||
tag = tag.replace("_", ".")
|
||||
|
||||
if verstring and bb.utils.vercmp(("0", pver, ""), ("0", verstring, "")) < 0:
|
||||
if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
|
||||
continue
|
||||
|
||||
verstring = pver
|
||||
verstring = tag
|
||||
revision = line.split()[0]
|
||||
pupver = (verstring, revision)
|
||||
|
||||
return pupver
|
||||
|
||||
@@ -90,7 +90,7 @@ class GitSM(Git):
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
@@ -115,21 +115,13 @@ class GitSM(Git):
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
if url.endswith("{}{}".format(ud.host, ud.path)):
|
||||
if "{}{}".format(ud.host, ud.path) in url:
|
||||
raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \
|
||||
"Consider using git fetcher instead.")
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % module
|
||||
url += ";nobranch=1"
|
||||
url += ";lfs=%s" % self._need_lfs(ud)
|
||||
# Note that adding "user=" here to give credentials to the
|
||||
# submodule is not supported. Since using SRC_URI to give git://
|
||||
# URL a password is not supported, one have to use one of the
|
||||
# recommended way (eg. ~/.netrc or SSH config) which does specify
|
||||
# the user (See comment in git.py).
|
||||
# So, we will not take patches adding "user=" support here.
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
@@ -218,10 +210,6 @@ class GitSM(Git):
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
# modpath is needed by unpack tracer to calculate submodule
|
||||
# checkout dir
|
||||
new_ud = newfetch.ud[url]
|
||||
new_ud.modpath = modpath
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
@@ -247,12 +235,10 @@ class GitSM(Git):
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# All submodules should already be downloaded and configured in the tree. This simply
|
||||
# sets up the configuration and checks out the files. The main project config should
|
||||
# remain unmodified, and no download from the internet should occur. As such, lfs smudge
|
||||
# should also be skipped as these files were already smudged in the fetch stage if lfs
|
||||
# was enabled.
|
||||
runfetchcmd("GIT_LFS_SKIP_SMUDGE=1 %s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
# All submodules should already be downloaded and configured in the tree. This simply sets
|
||||
# up the configuration and checks out the files. The main project config should remain
|
||||
# unmodified, and no download from the internet should occur.
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
def implicit_urldata(self, ud, d):
|
||||
import shutil, subprocess, tempfile
|
||||
|
||||
@@ -242,7 +242,6 @@ class Hg(FetchMethod):
|
||||
revflag = "-r %s" % ud.revision
|
||||
subdir = ud.parm.get("destsuffix", ud.module)
|
||||
codir = "%s/%s" % (destdir, subdir)
|
||||
ud.unpack_tracer.unpack("hg", codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
|
||||
@@ -41,9 +41,9 @@ class Local(FetchMethod):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
return self.localfile_searchpaths(urldata, d)[-1]
|
||||
return self.localpaths(urldata, d)[-1]
|
||||
|
||||
def localfile_searchpaths(self, urldata, d):
|
||||
def localpaths(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
@@ -51,13 +51,11 @@ class Local(FetchMethod):
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
logger.debug2("Using absolute %s" % (path))
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
logger.debug2("Using %s for %s" % (newpath, path))
|
||||
searched.extend(hist)
|
||||
return searched
|
||||
|
||||
@@ -74,7 +72,7 @@ class Local(FetchMethod):
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
msg = "Unable to find file " + urldata.url + " anywhere to download to " + urldata.localpath + ". The paths that were searched were:\n " + "\n ".join(locations)
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
return True
|
||||
|
||||
@@ -44,12 +44,9 @@ def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
if package.startswith("@"):
|
||||
return re.sub("/", "-", package[1:])
|
||||
return package
|
||||
|
||||
def npm_filename(package, version):
|
||||
"""Get the filename of a npm package"""
|
||||
@@ -106,7 +103,6 @@ class NpmEnvironment(object):
|
||||
"""Run npm command in a controlled environment"""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("PATH", d.getVar("PATH")) # PATH might contain $HOME - evaluate it before patching
|
||||
d.setVar("HOME", tmpdir)
|
||||
|
||||
if not workdir:
|
||||
@@ -298,7 +294,6 @@ class Npm(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix", "npm")
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
npm_unpack(ud.localpath, destdir, d)
|
||||
ud.unpack_tracer.unpack("npm", destdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
"""Clean any existing full or partial download"""
|
||||
|
||||
@@ -41,9 +41,8 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
with:
|
||||
name = the package name (string)
|
||||
params = the package parameters (dictionary)
|
||||
destdir = the destination of the package (string)
|
||||
deptree = the package dependency tree (array of strings)
|
||||
"""
|
||||
# For handling old style dependencies entries in shinkwrap files
|
||||
def _walk_deps(deps, deptree):
|
||||
for name in deps:
|
||||
subtree = [*deptree, name]
|
||||
@@ -53,22 +52,9 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
continue
|
||||
elif deps[name].get("bundled", False):
|
||||
continue
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in subtree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
callback(name, deps[name], destsuffix)
|
||||
callback(name, deps[name], subtree)
|
||||
|
||||
# packages entry means new style shrinkwrap file, else use dependencies
|
||||
packages = shrinkwrap.get("packages", None)
|
||||
if packages is not None:
|
||||
for package in packages:
|
||||
if package != "":
|
||||
name = package.split('node_modules/')[-1]
|
||||
package_infos = packages.get(package, {})
|
||||
if dev == False and package_infos.get("dev", False):
|
||||
continue
|
||||
callback(name, package_infos, package)
|
||||
else:
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
|
||||
class NpmShrinkWrap(FetchMethod):
|
||||
"""Class to fetch all package from a shrinkwrap file"""
|
||||
@@ -89,10 +75,12 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# Resolve the dependencies
|
||||
ud.deps = []
|
||||
|
||||
def _resolve_dependency(name, params, destsuffix):
|
||||
def _resolve_dependency(name, params, deptree):
|
||||
url = None
|
||||
localpath = None
|
||||
extrapaths = []
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity", None)
|
||||
@@ -141,28 +129,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), localfile)
|
||||
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
# Handle git sources
|
||||
elif version.startswith(("git", "bitbucket","gist")) or (
|
||||
not version.endswith((".tgz", ".tar", ".tar.gz"))
|
||||
and not version.startswith((".", "@", "/"))
|
||||
and "/" in version
|
||||
):
|
||||
elif version.startswith("git"):
|
||||
if version.startswith("github:"):
|
||||
version = "git+https://github.com/" + version[len("github:"):]
|
||||
elif version.startswith("gist:"):
|
||||
version = "git+https://gist.github.com/" + version[len("gist:"):]
|
||||
elif version.startswith("bitbucket:"):
|
||||
version = "git+https://bitbucket.org/" + version[len("bitbucket:"):]
|
||||
elif version.startswith("gitlab:"):
|
||||
version = "git+https://gitlab.com/" + version[len("gitlab:"):]
|
||||
elif not version.startswith(("git+","git:")):
|
||||
version = "git+https://github.com/" + version
|
||||
regex = re.compile(r"""
|
||||
^
|
||||
git\+
|
||||
@@ -188,12 +158,16 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
url = str(uri)
|
||||
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
else:
|
||||
raise ParameterError("Unsupported dependency: %s" % name, ud.url)
|
||||
|
||||
# name is needed by unpack tracer for module mapping
|
||||
ud.deps.append({
|
||||
"name": name,
|
||||
"url": url,
|
||||
"localpath": localpath,
|
||||
"extrapaths": extrapaths,
|
||||
@@ -219,23 +193,19 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# This fetcher resolves multiple URIs from a shrinkwrap file and then
|
||||
# forwards it to a proxy fetcher. The management of the donestamp file,
|
||||
# the lockfile and the checksums are forwarded to the proxy fetcher.
|
||||
shrinkwrap_urls = [dep["url"] for dep in ud.deps if dep["url"]]
|
||||
if shrinkwrap_urls:
|
||||
ud.proxy = Fetch(shrinkwrap_urls, data)
|
||||
ud.proxy = Fetch([dep["url"] for dep in ud.deps if dep["url"]], data)
|
||||
ud.needdonestamp = False
|
||||
|
||||
@staticmethod
|
||||
def _foreach_proxy_method(ud, handle):
|
||||
returns = []
|
||||
#Check if there are dependencies before try to fetch them
|
||||
if len(ud.deps) > 0:
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
@@ -272,7 +242,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
ud.unpack_tracer.unpack("npm-shrinkwrap", destdir)
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
bb.utils.copyfile(ud.shrinkwrap_file,
|
||||
|
||||
@@ -103,7 +103,7 @@ class SFTP(FetchMethod):
|
||||
if path[:3] == '/~/':
|
||||
path = path[3:]
|
||||
|
||||
remote = '"%s%s:%s"' % (user, urlo.hostname, path)
|
||||
remote = '%s%s:%s' % (user, urlo.hostname, path)
|
||||
|
||||
cmd = '%s %s %s %s' % (basecmd, port, remote, lpath)
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.utils import export_proxies
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4 import SoupStrainer
|
||||
|
||||
@@ -87,10 +88,7 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
@@ -108,8 +106,7 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
@@ -129,21 +126,12 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri)
|
||||
|
||||
if os.path.getsize(localpath) == 0:
|
||||
os.remove(localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
@@ -153,6 +141,15 @@ class Wget(FetchMethod):
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
@@ -344,8 +341,7 @@ class Wget(FetchMethod):
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
try:
|
||||
uri_base = ud.url.split(";")[0]
|
||||
uri = "{}://{}{}".format(urllib.parse.urlparse(uri_base).scheme, ud.host, ud.path)
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
@@ -364,22 +360,29 @@ class Wget(FetchMethod):
|
||||
|
||||
try:
|
||||
import netrc
|
||||
auth_data = netrc.netrc().authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
if auth_data:
|
||||
login, _, password = auth_data
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed for %s: %s" % (uri,e))
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
except ConnectionResetError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -641,10 +644,10 @@ class Wget(FetchMethod):
|
||||
# search for version matches on folders inside the path, like:
|
||||
# "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
|
||||
dirver_regex = re.compile(r"(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
m = dirver_regex.findall(path)
|
||||
m = dirver_regex.search(path)
|
||||
if m:
|
||||
pn = d.getVar('PN')
|
||||
dirver = m[-1][0]
|
||||
dirver = m.group('dirver')
|
||||
|
||||
dirver_pn_regex = re.compile(r"%s\d?" % (re.escape(pn)))
|
||||
if not dirver_pn_regex.search(dirver):
|
||||
|
||||
@@ -12,12 +12,11 @@
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
import fcntl
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
import bb
|
||||
from bb import event
|
||||
@@ -44,18 +43,18 @@ def present_options(optionlist):
|
||||
else:
|
||||
return optionlist[0]
|
||||
|
||||
class BitbakeHelpFormatter(argparse.HelpFormatter):
|
||||
def _get_help_string(self, action):
|
||||
class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
def format_option(self, option):
|
||||
# We need to do this here rather than in the text we supply to
|
||||
# add_option() because we don't want to call list_extension_modules()
|
||||
# on every execution (since it imports all of the modules)
|
||||
# Note also that we modify option.help rather than the returned text
|
||||
# - this is so that we don't have to re-format the text ourselves
|
||||
if action.dest == 'ui':
|
||||
if option.dest == 'ui':
|
||||
valid_uis = list_extension_modules(bb.ui, 'main')
|
||||
return action.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
|
||||
return action.help
|
||||
return optparse.IndentedHelpFormatter.format_option(self, option)
|
||||
|
||||
def list_extension_modules(pkg, checkattr):
|
||||
"""
|
||||
@@ -115,207 +114,180 @@ def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
warnings.showwarning = _showwarning
|
||||
|
||||
def create_bitbake_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""\
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.
|
||||
""",
|
||||
formatter_class=BitbakeHelpFormatter,
|
||||
allow_abbrev=False,
|
||||
add_help=False, # help is manually added below in a specific argument group
|
||||
)
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
general_group = parser.add_argument_group('General options')
|
||||
task_group = parser.add_argument_group('Task control options')
|
||||
exec_group = parser.add_argument_group('Execution control options')
|
||||
logging_group = parser.add_argument_group('Logging/output control options')
|
||||
server_group = parser.add_argument_group('Server options')
|
||||
config_group = parser.add_argument_group('Configuration options')
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
general_group.add_argument("targets", nargs="*", metavar="recipename/target",
|
||||
help="Execute the specified task (default is 'build') for these target "
|
||||
"recipes (.bb files).")
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
general_group.add_argument("-s", "--show-versions", action="store_true",
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="halt", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
general_group.add_argument("-e", "--environment", action="store_true",
|
||||
dest="show_environment",
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
|
||||
general_group.add_argument("-g", "--graphviz", action="store_true", dest="dot_graph",
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
general_group.add_argument("-u", "--ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %(default)s).")
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
general_group.add_argument("--version", action="store_true",
|
||||
help="Show programs version and exit.")
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
general_group.add_argument('-h', '--help', action='help',
|
||||
help='Show this help message and exit.')
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
task_group.add_argument("-f", "--force", action="store_true",
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
task_group.add_argument("-c", "--cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
task_group.add_argument("-C", "--clear-stamp", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
|
||||
task_group.add_argument("--runall", action="append", default=[],
|
||||
help="Run the specified task for any recipe in the taskgraph of the "
|
||||
"specified target (even if it wouldn't otherwise have run).")
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
|
||||
task_group.add_argument("--runonly", action="append",
|
||||
help="Run only the specified task within the taskgraph of the "
|
||||
"specified targets (and any task dependencies those tasks may have).")
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
|
||||
task_group.add_argument("--no-setscene", action="store_true",
|
||||
dest="nosetscene",
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
task_group.add_argument("--skip-setscene", action="store_true",
|
||||
dest="skipsetscene",
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene.")
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
task_group.add_argument("--setscene-only", action="store_true",
|
||||
dest="setsceneonly",
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
exec_group.add_argument("-n", "--dry-run", action="store_true",
|
||||
help="Don't execute, just go through the motions.")
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
exec_group.add_argument("-p", "--parse-only", action="store_true",
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
exec_group.add_argument("-k", "--continue", action="store_false", dest="halt",
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
exec_group.add_argument("-P", "--profile", action="store_true",
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
exec_group.add_argument("-S", "--dump-signatures", action="append",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means recursively compare the dumped signature with the most recent"
|
||||
" one in a local build or sstate cache (can be used to find out why tasks re-run"
|
||||
" when that is not expected)")
|
||||
|
||||
exec_group.add_argument("--revisions-changed", action="store_true",
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
exec_group.add_argument("-b", "--buildfile",
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
logging_group.add_argument("-D", "--debug", action="count", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
logging_group.add_argument("-l", "--log-domains", action="append", dest="debug_domains",
|
||||
default=[],
|
||||
help="Show debug logging for the specified logging domains.")
|
||||
|
||||
logging_group.add_argument("-v", "--verbose", action="store_true",
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
logging_group.add_argument("-q", "--quiet", action="count", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this "
|
||||
"more than once.")
|
||||
|
||||
logging_group.add_argument("-w", "--write-log", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
|
||||
server_group.add_argument("-B", "--bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
server_group.add_argument("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
server_group.add_argument("--remote-server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
server_group.add_argument("-m", "--kill-server", action="store_true",
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
server_group.add_argument("--token", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
server_group.add_argument("--observe-only", action="store_true",
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
server_group.add_argument("--status-only", action="store_true",
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
server_group.add_argument("--server-only", action="store_true",
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
|
||||
config_group.add_argument("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
config_group.add_argument("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
|
||||
config_group.add_argument("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing.")
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
return parser
|
||||
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = create_bitbake_parser()
|
||||
options = parser.parse_intermixed_args(argv[1:])
|
||||
|
||||
if options.version:
|
||||
print("BitBake Build Tool Core version %s" % bb.__version__)
|
||||
sys.exit(0)
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
parser.error("options --quiet and --verbose are mutually exclusive")
|
||||
@@ -347,7 +319,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
else:
|
||||
options.xmlrpcinterface = (None, 0)
|
||||
|
||||
return options, options.targets
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def bitbake_main(configParams, configuration):
|
||||
@@ -412,9 +384,6 @@ def bitbake_main(configParams, configuration):
|
||||
|
||||
return 1
|
||||
|
||||
def timestamp():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def setup_bitbake(configParams, extrafeatures=None):
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
@@ -422,11 +391,6 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
if configParams.dump_signatures:
|
||||
if extrafeatures is None:
|
||||
extrafeatures = []
|
||||
extrafeatures.append(bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO)
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
@@ -454,7 +418,7 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
retries = 8
|
||||
while retries:
|
||||
try:
|
||||
topdir, lock, lockfile = lockBitbake()
|
||||
topdir, lock = lockBitbake()
|
||||
sockname = topdir + "/bitbake.sock"
|
||||
if lock:
|
||||
if configParams.status_only or configParams.kill_server:
|
||||
@@ -465,22 +429,18 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
logger.info("Starting bitbake server...")
|
||||
# Clear the event queue since we already displayed messages
|
||||
bb.event.ui_queue = []
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface, configParams.profile)
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface)
|
||||
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry... (%s)" % timestamp())
|
||||
procs = bb.server.process.get_lockfile_process_msg(lockfile)
|
||||
if procs:
|
||||
logger.info("Processes holding bitbake.lock (missing socket %s):\n%s" % (sockname, procs))
|
||||
logger.info("Directory listing: %s" % (str(os.listdir(topdir))))
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
while not lock and i < 50:
|
||||
time.sleep(0.1)
|
||||
_, lock, _ = lockBitbake()
|
||||
_, lock = lockBitbake()
|
||||
i += 1
|
||||
if lock:
|
||||
bb.utils.unlockfile(lock)
|
||||
@@ -499,9 +459,9 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
retries -= 1
|
||||
tryno = 8 - retries
|
||||
if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError, EOFError, SystemExit)):
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, timestamp()))
|
||||
logger.info("Retrying server connection (#%d)..." % tryno)
|
||||
else:
|
||||
logger.info("Retrying server connection (#%d)... (%s, %s)" % (tryno, traceback.format_exc(), timestamp()))
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
|
||||
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).")
|
||||
@@ -530,5 +490,5 @@ def lockBitbake():
|
||||
bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBPATH is unset and/or not in a build directory?")
|
||||
raise BBMainFatal
|
||||
lockfile = topdir + "/bitbake.lock"
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False), lockfile
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False)
|
||||
|
||||
|
||||
@@ -234,10 +234,9 @@ class diskMonitor:
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out.
|
||||
# This is reported by the inode count being 0 (btrfs) or the free
|
||||
# inode count being -1 (cephfs).
|
||||
if st.f_files == 0 or st.f_favail == -1:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
if st.f_files == 0:
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
|
||||
@@ -230,7 +230,7 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers
|
||||
console = logging.StreamHandler(output)
|
||||
console.addFilter(bb.msg.LogFilterShowOnce())
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty() and os.environ.get('NO_COLOR', '') == ''):
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
|
||||
@@ -49,32 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def check_mtime(f, mtime):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = current_mtime
|
||||
except OSError:
|
||||
current_mtime = 0
|
||||
return current_mtime == mtime
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
@@ -111,12 +99,12 @@ def supports(fn, data):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def handle(fn, data, include=0, baseconfig=False):
|
||||
def handle(fn, data, include = 0):
|
||||
"""Call the handler that is appropriate for this file"""
|
||||
for h in handlers:
|
||||
if h['supports'](fn, data):
|
||||
with data.inchistory.include(fn):
|
||||
return h['handle'](fn, data, include, baseconfig)
|
||||
return h['handle'](fn, data, include)
|
||||
raise ParseError("not a BitBake file", fn)
|
||||
|
||||
def init(fn, data):
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import sys
|
||||
import bb
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
@@ -211,12 +210,10 @@ class ExportFuncsNode(AstNode):
|
||||
|
||||
def eval(self, data):
|
||||
|
||||
sentinel = " # Export function set\n"
|
||||
for func in self.n:
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
basevar = data.getVar(func, False)
|
||||
if basevar and sentinel not in basevar:
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
continue
|
||||
|
||||
if data.getVar(func, False):
|
||||
@@ -233,11 +230,12 @@ class ExportFuncsNode(AstNode):
|
||||
data.setVarFlag(func, "lineno", 1)
|
||||
|
||||
if data.getVarFlag(calledfunc, "python", False):
|
||||
data.setVar(func, sentinel + " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
else:
|
||||
if "-" in self.classname:
|
||||
bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
data.setVar(func, " " + calledfunc + "\n", parsing=True)
|
||||
data.setVarFlag(func, 'export_func', '1')
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
@@ -271,41 +269,6 @@ class BBHandlerNode(AstNode):
|
||||
data.setVarFlag(h, "handler", 1)
|
||||
data.setVar('__BBHANDLERS', bbhands)
|
||||
|
||||
class PyLibNode(AstNode):
|
||||
def __init__(self, filename, lineno, libdir, namespace):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.libdir = libdir
|
||||
self.namespace = namespace
|
||||
|
||||
def eval(self, data):
|
||||
global_mods = (data.getVar("BB_GLOBAL_PYMODULES") or "").split()
|
||||
for m in global_mods:
|
||||
if m not in bb.utils._context:
|
||||
bb.utils._context[m] = __import__(m)
|
||||
|
||||
libdir = data.expand(self.libdir)
|
||||
if libdir not in sys.path:
|
||||
sys.path.append(libdir)
|
||||
try:
|
||||
bb.utils._context[self.namespace] = __import__(self.namespace)
|
||||
toimport = getattr(bb.utils._context[self.namespace], "BBIMPORTS", [])
|
||||
for i in toimport:
|
||||
bb.utils._context[self.namespace] = __import__(self.namespace + "." + i)
|
||||
mod = getattr(bb.utils._context[self.namespace], i)
|
||||
fn = getattr(mod, "__file__")
|
||||
funcs = {}
|
||||
for f in dir(mod):
|
||||
if f.startswith("_"):
|
||||
continue
|
||||
fcall = getattr(mod, f)
|
||||
if not callable(fcall):
|
||||
continue
|
||||
funcs[f] = fcall
|
||||
bb.codeparser.add_module_functions(fn, funcs, "%s.%s" % (self.namespace, i))
|
||||
|
||||
except AttributeError as e:
|
||||
bb.error("Error importing OE modules: %s" % str(e))
|
||||
|
||||
class InheritNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -314,16 +277,6 @@ class InheritNode(AstNode):
|
||||
def eval(self, data):
|
||||
bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data)
|
||||
|
||||
class InheritDeferredNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.inherit = (classes, filename, lineno)
|
||||
|
||||
def eval(self, data):
|
||||
inherits = data.getVar('__BBDEFINHERITS', False) or []
|
||||
inherits.append(self.inherit)
|
||||
data.setVar('__BBDEFINHERITS', inherits)
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
@@ -367,17 +320,10 @@ def handleDelTask(statements, filename, lineno, m):
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handlePyLib(statements, filename, lineno, m):
|
||||
statements.append(PyLibNode(filename, lineno, m.group(1), m.group(2)))
|
||||
|
||||
def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def handleInheritDeferred(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritDeferredNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
@@ -415,9 +361,6 @@ def finalize(fn, d, variant = None):
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
if d.getVar('__BBAUTOREV_SEEN') and d.getVar('__BBSRCREV_SEEN') and not d.getVar("__BBAUTOREV_ACTED_UPON"):
|
||||
bb.fatal("AUTOREV/SRCPV set too late for the fetcher to work properly, please set the variables earlier in parsing. Erroring instead of later obtuse build failures.")
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
finally:
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
@@ -444,14 +387,6 @@ def multi_finalize(fn, d):
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
while True:
|
||||
inherits = d.getVar('__BBDEFINHERITS', False) or []
|
||||
if not inherits:
|
||||
break
|
||||
inherit, filename, lineno = inherits.pop(0)
|
||||
d.setVar('__BBDEFINHERITS', inherits)
|
||||
bb.parse.BBHandler.inherit(inherit, filename, lineno, d, deferred=True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
safe_d = d
|
||||
|
||||
@@ -21,7 +21,6 @@ from .ConfHandler import include, init
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
@@ -34,7 +33,6 @@ __infunc__ = []
|
||||
__inpython__ = False
|
||||
__body__ = []
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
@@ -42,10 +40,8 @@ def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
|
||||
def inherit(files, fn, lineno, d, deferred=False):
|
||||
def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
#if "${" in files and not deferred:
|
||||
# bb.warn("%s:%s has non deferred conditional inherit" % (fn, lineno))
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
classtype = d.getVar("__bbclasstype", False)
|
||||
@@ -81,7 +77,7 @@ def inherit(files, fn, lineno, d, deferred=False):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements, __residue__, __body__
|
||||
global cached_statements
|
||||
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
@@ -101,17 +97,12 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
|
||||
if __residue__:
|
||||
raise ParseError("Unparsed lines %s: %s" % (filename, str(__residue__)), filename, lineno)
|
||||
if __body__:
|
||||
raise ParseError("Unparsed lines from unclosed function %s: %s" % (filename, str(__body__)), filename, lineno)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
return statements
|
||||
|
||||
def handle(fn, d, include, baseconfig=False):
|
||||
global __infunc__, __body__, __residue__, __classname__
|
||||
def handle(fn, d, include):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
|
||||
__body__ = []
|
||||
__infunc__ = []
|
||||
__classname__ = ""
|
||||
@@ -163,7 +154,7 @@ def handle(fn, d, include, baseconfig=False):
|
||||
return d
|
||||
|
||||
def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
global __inpython__, __infunc__, __body__, __residue__, __classname__
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
|
||||
|
||||
# Check tabs in python functions:
|
||||
# - def py_funcname(): covered by __inpython__
|
||||
@@ -274,12 +265,7 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
ast.handleInherit(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __inherit_def_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInheritDeferred(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
return ConfHandler.feeder(lineno, s, fn, statements, conffile=False)
|
||||
return ConfHandler.feeder(lineno, s, fn, statements)
|
||||
|
||||
# Add us to the handlers list
|
||||
from .. import handlers
|
||||
|
||||
@@ -21,7 +21,7 @@ __config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]*)\])?
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
(?P<colon>:=) |
|
||||
@@ -45,8 +45,7 @@ __include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]+)\]$" )
|
||||
__addpylib_regexp__ = re.compile(r"addpylib\s+(.+)\s+(.+)" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
|
||||
def init(data):
|
||||
return
|
||||
@@ -103,12 +102,12 @@ def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
# We have an issue where a UI might want to enforce particular settings such as
|
||||
# an empty DISTRO variable. If configuration files do something like assigning
|
||||
# a weak default, it turns out to be very difficult to filter out these changes,
|
||||
# particularly when the weak default might appear half way though parsing a chain
|
||||
# particularly when the weak default might appear half way though parsing a chain
|
||||
# of configuration files. We therefore let the UIs hook into configuration file
|
||||
# parsing. This turns out to be a hard problem to solve any other way.
|
||||
confFilters = []
|
||||
|
||||
def handle(fn, data, include, baseconfig=False):
|
||||
def handle(fn, data, include):
|
||||
init(data)
|
||||
|
||||
if include == 0:
|
||||
@@ -145,7 +144,7 @@ def handle(fn, data, include, baseconfig=False):
|
||||
# skip comments
|
||||
if s[0] == '#':
|
||||
continue
|
||||
feeder(lineno, s, abs_fn, statements, baseconfig=baseconfig)
|
||||
feeder(lineno, s, abs_fn, statements)
|
||||
|
||||
# DONE WITH PARSING... time to evaluate
|
||||
data.setVar('FILE', abs_fn)
|
||||
@@ -158,9 +157,7 @@ def handle(fn, data, include, baseconfig=False):
|
||||
|
||||
return data
|
||||
|
||||
# baseconfig is set for the bblayers/layer.conf cookerdata config parsing
|
||||
# The function is also used by BBHandler, conffile would be False
|
||||
def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
def feeder(lineno, s, fn, statements):
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
@@ -192,11 +189,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handleUnsetFlag(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addpylib_regexp__.match(s)
|
||||
if baseconfig and conffile and m:
|
||||
ast.handlePyLib(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
raise ParseError("unparsed line: '%s'" % s, fn, lineno);
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
@@ -249,23 +249,4 @@ def persist(domain, d):
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
|
||||
try:
|
||||
return SQLTable(cachefile, domain)
|
||||
except sqlite3.OperationalError:
|
||||
# Sqlite fails to open database when its path is too long.
|
||||
# After testing, 504 is the biggest path length that can be opened by
|
||||
# sqlite.
|
||||
# Note: This code is called before sanity.bbclass and its path length
|
||||
# check
|
||||
max_len = 504
|
||||
if len(cachefile) > max_len:
|
||||
logger.critical("The path of the cache file is too long "
|
||||
"({0} chars > {1}) to be opened by sqlite! "
|
||||
"Your cache file is \"{2}\"".format(
|
||||
len(cachefile),
|
||||
max_len,
|
||||
cachefile))
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise
|
||||
return SQLTable(cachefile, domain)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,7 +28,6 @@ import datetime
|
||||
import pickle
|
||||
import traceback
|
||||
import gc
|
||||
import stat
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
@@ -38,46 +37,9 @@ logger = logging.getLogger('BitBake')
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
|
||||
def currenttime():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def serverlog(msg):
|
||||
print(str(os.getpid()) + " " + currenttime() + " " + msg)
|
||||
#Seems a flush here triggers filesytem sync like behaviour and long hangs in the server
|
||||
#sys.stdout.flush()
|
||||
|
||||
#
|
||||
# When we have lockfile issues, try and find infomation about which process is
|
||||
# using the lockfile
|
||||
#
|
||||
def get_lockfile_process_msg(lockfile):
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
pass
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
pass
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs:
|
||||
return procs.decode("utf-8")
|
||||
return None
|
||||
|
||||
class idleFinish():
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
print(str(os.getpid()) + " " + datetime.datetime.now().strftime('%H:%M:%S.%f') + " " + msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
class ProcessServer():
|
||||
profile_filename = "profile.log"
|
||||
@@ -96,19 +58,12 @@ class ProcessServer():
|
||||
self.maxuiwait = 30
|
||||
self.xmlrpc = False
|
||||
|
||||
self.idle = None
|
||||
# Need a lock for _idlefuns changes
|
||||
self._idlefuns = {}
|
||||
self._idlefuncsLock = threading.Lock()
|
||||
self.idle_cond = threading.Condition(self._idlefuncsLock)
|
||||
|
||||
self.bitbake_lock = lock
|
||||
self.bitbake_lock_name = lockname
|
||||
self.sock = sock
|
||||
self.sockname = sockname
|
||||
# It is possible the directory may be renamed. Cache the inode of the socket file
|
||||
# so we can tell if things changed.
|
||||
self.sockinode = os.stat(self.sockname)[stat.ST_INO]
|
||||
|
||||
self.server_timeout = server_timeout
|
||||
self.timeout = self.server_timeout
|
||||
@@ -117,9 +72,7 @@ class ProcessServer():
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
self._idlefuns[function] = data
|
||||
serverlog("Registering idle function %s" % str(function))
|
||||
self._idlefuns[function] = data
|
||||
|
||||
def run(self):
|
||||
|
||||
@@ -158,31 +111,6 @@ class ProcessServer():
|
||||
|
||||
return ret
|
||||
|
||||
def _idle_check(self):
|
||||
return len(self._idlefuns) == 0 and self.cooker.command.currentAsyncCommand is None
|
||||
|
||||
def wait_for_idle(self, timeout=30):
|
||||
# Wait for the idle loop to have cleared
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
return self.idle_cond.wait_for(self._idle_check, timeout) is not False
|
||||
|
||||
def set_async_cmd(self, cmd):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
ret = self.idle_cond.wait_for(self._idle_check, 30)
|
||||
if ret is False:
|
||||
return False
|
||||
self.cooker.command.currentAsyncCommand = cmd
|
||||
return True
|
||||
|
||||
def clear_async_cmd(self):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
self.cooker.command.currentAsyncCommand = None
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
def get_async_cmd(self):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
return self.cooker.command.currentAsyncCommand
|
||||
|
||||
def main(self):
|
||||
self.cooker.pre_serve()
|
||||
|
||||
@@ -197,19 +125,14 @@ class ProcessServer():
|
||||
fds.append(self.xmlrpc)
|
||||
seendata = False
|
||||
serverlog("Entering server connection loop")
|
||||
serverlog("Lockfile is: %s\nSocket is %s (%s)" % (self.bitbake_lock_name, self.sockname, os.path.exists(self.sockname)))
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
serverlog("Disconnecting Client (socket: %s)" % os.path.exists(self.sockname))
|
||||
serverlog("Disconnecting Client")
|
||||
if self.controllersock:
|
||||
fds.remove(self.controllersock)
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
if self.haveui:
|
||||
# Wait for the idle loop to have cleared (30s max)
|
||||
if not self.wait_for_idle(30):
|
||||
serverlog("Idle loop didn't finish queued commands after 30s, exiting.")
|
||||
self.quit = True
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
@@ -221,7 +144,7 @@ class ProcessServer():
|
||||
self.cooker.clientComplete()
|
||||
self.haveui = False
|
||||
ready = select.select(fds,[],[],0)[0]
|
||||
if newconnections and not self.quit:
|
||||
if newconnections:
|
||||
serverlog("Starting new client")
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
@@ -293,10 +216,8 @@ class ProcessServer():
|
||||
continue
|
||||
try:
|
||||
serverlog("Running command %s" % command)
|
||||
reply = self.cooker.command.runCommand(command, self)
|
||||
serverlog("Sending reply %s" % repr(reply))
|
||||
self.command_channel_reply.send(reply)
|
||||
serverlog("Command Completed (socket: %s)" % os.path.exists(self.sockname))
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
serverlog("Command Completed")
|
||||
except Exception as e:
|
||||
stack = traceback.format_exc()
|
||||
serverlog('Exception in server main event loop running command %s (%s)' % (command, stack))
|
||||
@@ -323,25 +244,16 @@ class ProcessServer():
|
||||
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
if self.idle:
|
||||
self.idle.join()
|
||||
|
||||
serverlog("Exiting (socket: %s)" % os.path.exists(self.sockname))
|
||||
serverlog("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
# The build directory could have been renamed so if the file isn't the one we created
|
||||
# we shouldn't delete it.
|
||||
try:
|
||||
sockinode = os.stat(self.sockname)[stat.ST_INO]
|
||||
if sockinode == self.sockinode:
|
||||
os.unlink(self.sockname)
|
||||
else:
|
||||
serverlog("bitbake.sock inode mismatch (%s vs %s), not deleting." % (sockinode, self.sockinode))
|
||||
except Exception as err:
|
||||
serverlog("Removing socket file '%s' failed (%s)" % (self.sockname, err))
|
||||
os.unlink(self.sockname)
|
||||
except:
|
||||
pass
|
||||
self.sock.close()
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True, idle=False)
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
@@ -367,21 +279,20 @@ class ProcessServer():
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
lockcontents = get_lock_contents(lockfile)
|
||||
serverlog("Original lockfile contents: " + str(lockcontents))
|
||||
|
||||
lock.close()
|
||||
lock = None
|
||||
|
||||
while not lock:
|
||||
i = 0
|
||||
lock = None
|
||||
if not os.path.exists(os.path.basename(lockfile)):
|
||||
serverlog("Lockfile directory gone, exiting.")
|
||||
return
|
||||
|
||||
while not lock and i < 30:
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False)
|
||||
if not lock:
|
||||
newlockcontents = get_lock_contents(lockfile)
|
||||
if not newlockcontents[0].startswith([f"{os.getpid()}\n", f"{os.getpid()} "]):
|
||||
if newlockcontents != lockcontents:
|
||||
# A new server was started, the lockfile contents changed, we can exit
|
||||
serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents))
|
||||
return
|
||||
@@ -395,108 +306,80 @@ class ProcessServer():
|
||||
return
|
||||
|
||||
if not lock:
|
||||
procs = get_lockfile_process_msg(lockfile)
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
continue
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
# File was deleted?
|
||||
continue
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
msg = ["Delaying shutdown due to active processes which appear to be holding bitbake.lock"]
|
||||
if procs:
|
||||
msg.append(":\n%s" % procs)
|
||||
msg.append(":\n%s" % str(procs.decode("utf-8")))
|
||||
serverlog("".join(msg))
|
||||
|
||||
def idle_thread(self):
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.idle_thread_internal)
|
||||
|
||||
prof.dump_stats("profile-mainloop.log")
|
||||
bb.utils.process_profilelog("profile-mainloop.log")
|
||||
serverlog("Raw profiling information saved to profile-mainloop.log and processed statistics to profile-mainloop.log.processed")
|
||||
else:
|
||||
self.idle_thread_internal()
|
||||
|
||||
def idle_thread_internal(self):
|
||||
def remove_idle_func(function):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
del self._idlefuns[function]
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
while not self.quit:
|
||||
nextsleep = 0.1
|
||||
fds = []
|
||||
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
items = list(self._idlefuns.items())
|
||||
|
||||
for function, data in items:
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if isinstance(retval, idleFinish):
|
||||
serverlog("Removing idle function %s at idleFinish" % str(function))
|
||||
remove_idle_func(function)
|
||||
self.cooker.command.finishAsyncCommand(retval.msg)
|
||||
nextsleep = None
|
||||
elif retval is False:
|
||||
serverlog("Removing idle function %s" % str(function))
|
||||
remove_idle_func(function)
|
||||
nextsleep = None
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif isinstance(retval, float) and nextsleep:
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running idle function')
|
||||
remove_idle_func(function)
|
||||
serverlog("Exception %s broke the idle_thread, exiting" % traceback.format_exc())
|
||||
self.quit = True
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running heartbeat function')
|
||||
serverlog("Exception %s broke in idle_thread, exiting" % traceback.format_exc())
|
||||
self.quit = True
|
||||
if nextsleep and bb.event._heartbeat_enabled and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
|
||||
if nextsleep is not None:
|
||||
select.select(fds,[],[],nextsleep)[0]
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
if not fds:
|
||||
fds = []
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
nextsleep = None
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif isinstance(retval, float) and nextsleep:
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running idle function')
|
||||
del self._idlefuns[function]
|
||||
self.quit = True
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running heartbeat function')
|
||||
self.quit = True
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
@@ -516,18 +399,12 @@ class ServerCommunicator():
|
||||
self.recv = recv
|
||||
|
||||
def runCommand(self, command):
|
||||
try:
|
||||
self.connection.send(command)
|
||||
except BrokenPipeError as e:
|
||||
raise BrokenPipeError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
logger.info("No reply from server in 30s (for command %s at %s)" % (command[0], currenttime()))
|
||||
logger.info("No reply from server in 30s")
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s at %s)" % currenttime())
|
||||
try:
|
||||
ret, exc = self.recv.get()
|
||||
except EOFError as e:
|
||||
raise EOFError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)")
|
||||
ret, exc = self.recv.get()
|
||||
# Should probably turn all exceptions in exc back into exceptions?
|
||||
# For now, at least handle BBHandledException
|
||||
if exc and ("BBHandledException" in exc or "SystemExit" in exc):
|
||||
@@ -571,14 +448,13 @@ start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
|
||||
|
||||
class BitBakeServer(object):
|
||||
|
||||
def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface, profile):
|
||||
def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface):
|
||||
|
||||
self.server_timeout = server_timeout
|
||||
self.xmlrpcinterface = xmlrpcinterface
|
||||
self.featureset = featureset
|
||||
self.sockname = sockname
|
||||
self.bitbake_lock = lock
|
||||
self.profile = profile
|
||||
self.readypipe, self.readypipein = os.pipe()
|
||||
|
||||
# Place the log in the builddirectory alongside the lock file
|
||||
@@ -642,9 +518,9 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile):
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface):
|
||||
|
||||
import bb.cookerdata
|
||||
import bb.cooker
|
||||
@@ -656,7 +532,6 @@ def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpc
|
||||
|
||||
# Create server control socket
|
||||
if os.path.exists(sockname):
|
||||
serverlog("WARNING: removing existing socket file '%s'" % sockname)
|
||||
os.unlink(sockname)
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
@@ -673,8 +548,7 @@ def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpc
|
||||
writer = ConnectionWriter(readypipeinfd)
|
||||
try:
|
||||
featureset = []
|
||||
cooker = bb.cooker.BBCooker(featureset, server)
|
||||
cooker.configuration.profile = profile
|
||||
cooker = bb.cooker.BBCooker(featureset, server.register_idle_function)
|
||||
except bb.BBHandledException:
|
||||
return None
|
||||
writer.send("r")
|
||||
@@ -793,7 +667,7 @@ class BBUIEventQueue:
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
with self.eventQueueLock:
|
||||
if len(self.eventQueue) == 0:
|
||||
return None
|
||||
|
||||
@@ -808,7 +682,7 @@ class BBUIEventQueue:
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
with self.eventQueueLock:
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
|
||||
@@ -844,7 +718,7 @@ class ConnectionReader(object):
|
||||
return self.reader.poll(timeout)
|
||||
|
||||
def get(self):
|
||||
with bb.utils.lock_timeout(self.rlock):
|
||||
with self.rlock:
|
||||
res = self.reader.recv_bytes()
|
||||
return multiprocessing.reduction.ForkingPickler.loads(res)
|
||||
|
||||
@@ -865,7 +739,7 @@ class ConnectionWriter(object):
|
||||
|
||||
def _send(self, obj):
|
||||
gc.disable()
|
||||
with bb.utils.lock_timeout(self.wlock):
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
gc.enable()
|
||||
|
||||
@@ -878,14 +752,15 @@ class ConnectionWriter(object):
|
||||
# pthread_sigmask block/unblock would be nice but doesn't work, https://bugs.python.org/issue47139
|
||||
process = multiprocessing.current_process()
|
||||
if process and hasattr(process, "queue_signals"):
|
||||
with bb.utils.lock_timeout(process.signal_threadlock):
|
||||
with process.signal_threadlock:
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
|
||||
while len(process.signal_received) > 0:
|
||||
sig = process.signal_received.pop()
|
||||
process.handle_sig(sig, None)
|
||||
try:
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self._send(obj)
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ class BitBakeXMLRPCServerCommands():
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.server.cooker.command.runCommand(command, self.server.parent, self.server.readonly)
|
||||
return self.server.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
@@ -14,8 +14,6 @@ import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
import json
|
||||
import types
|
||||
from contextlib import contextmanager
|
||||
import bb.compress.zstd
|
||||
from bb.checksum import FileChecksumCache
|
||||
from bb import runqueue
|
||||
@@ -25,33 +23,15 @@ import hashserv.client
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv')
|
||||
|
||||
#find_siginfo and find_siginfo_version are set by the metadata siggen
|
||||
# The minimum version of the find_siginfo function we need
|
||||
find_siginfo_minversion = 2
|
||||
|
||||
HASHSERV_ENVVARS = [
|
||||
"SSL_CERT_DIR",
|
||||
"SSL_CERT_FILE",
|
||||
"NO_PROXY",
|
||||
"HTTPS_PROXY",
|
||||
"HTTP_PROXY"
|
||||
]
|
||||
|
||||
def check_siggen_version(siggen):
|
||||
if not hasattr(siggen, "find_siginfo_version"):
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (no version found)")
|
||||
if siggen.find_siginfo_version < siggen.find_siginfo_minversion:
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (%s vs %s)" % (siggen.find_siginfo_version, siggen.find_siginfo_minversion))
|
||||
|
||||
class SetEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, set) or isinstance(obj, frozenset):
|
||||
if isinstance(obj, set):
|
||||
return dict(_set_object=list(sorted(obj)))
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
def SetDecoder(dct):
|
||||
if '_set_object' in dct:
|
||||
return frozenset(dct['_set_object'])
|
||||
return set(dct['_set_object'])
|
||||
return dct
|
||||
|
||||
def init(d):
|
||||
@@ -73,6 +53,11 @@ class SignatureGenerator(object):
|
||||
"""
|
||||
name = "noop"
|
||||
|
||||
# If the derived class supports multiconfig datacaches, set this to True
|
||||
# The default is False for backward compatibility with derived signature
|
||||
# generators that do not understand multiconfig caches
|
||||
supports_multiconfig_datacaches = False
|
||||
|
||||
def __init__(self, data):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
@@ -90,39 +75,9 @@ class SignatureGenerator(object):
|
||||
def postparsing_clean_cache(self):
|
||||
return
|
||||
|
||||
def setup_datacache(self, datacaches):
|
||||
self.datacaches = datacaches
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
# In task context we have no cache so setup internal data structures
|
||||
# from the fully parsed data store provided
|
||||
|
||||
mc = d.getVar("__BBMULTICONFIG", False) or ""
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
|
||||
self.datacaches = {}
|
||||
self.datacaches[mc] = types.SimpleNamespace()
|
||||
setattr(self.datacaches[mc], "stamp", {})
|
||||
self.datacaches[mc].stamp[mcfn] = d.getVar('STAMP')
|
||||
setattr(self.datacaches[mc], "stamp_extrainfo", {})
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn] = {}
|
||||
for t in tasks:
|
||||
flag = d.getVarFlag(t, "stamp-extra-info")
|
||||
if flag:
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn][t] = flag
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
return None
|
||||
|
||||
def get_unihash(self, tid):
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
return unihash
|
||||
return self.taskhash[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
return {tid: self.get_unihash(tid) for tid in tids}
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
return
|
||||
|
||||
@@ -134,51 +89,17 @@ class SignatureGenerator(object):
|
||||
"""Write/update the file checksum cache onto disk"""
|
||||
return
|
||||
|
||||
def stampfile_base(self, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
return self.datacaches[mc].stamp[mcfn]
|
||||
|
||||
def stampfile_mcfn(self, taskname, mcfn, extrainfo=True):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
if not stamp:
|
||||
return
|
||||
|
||||
stamp_extrainfo = ""
|
||||
if extrainfo:
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene"):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or ""
|
||||
|
||||
return self.stampfile(stamp, mcfn, taskname, stamp_extrainfo)
|
||||
|
||||
def stampfile(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
def stampcleanmask_mcfn(self, taskname, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
if not stamp:
|
||||
return []
|
||||
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene"):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or ""
|
||||
|
||||
return self.stampcleanmask(stamp, mcfn, taskname, stamp_extrainfo)
|
||||
|
||||
def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
return
|
||||
|
||||
def invalidate_task(self, task, mcfn):
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
bb.utils.remove(stamp)
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.build.del_stamp(task, d, fn)
|
||||
|
||||
def dump_sigs(self, dataCache, options):
|
||||
return
|
||||
@@ -207,14 +128,41 @@ class SignatureGenerator(object):
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def get_data_caches(cls, dataCaches, mc):
|
||||
"""
|
||||
This function returns the datacaches that should be passed to signature
|
||||
generator functions. If the signature generator supports multiconfig
|
||||
caches, the entire dictionary of data caches is sent, otherwise a
|
||||
special proxy is sent that support both index access to all
|
||||
multiconfigs, and also direct access for the default multiconfig.
|
||||
|
||||
The proxy class allows code in this class itself to always use
|
||||
multiconfig aware code (to ease maintenance), but derived classes that
|
||||
are unaware of multiconfig data caches can still access the default
|
||||
multiconfig as expected.
|
||||
|
||||
Do not override this function in derived classes; it will be removed in
|
||||
the future when support for multiconfig data caches is mandatory
|
||||
"""
|
||||
class DataCacheProxy(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __getitem__(self, key):
|
||||
return dataCaches[key]
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(dataCaches[mc], name)
|
||||
|
||||
if cls.supports_multiconfig_datacaches:
|
||||
return dataCaches
|
||||
|
||||
return DataCacheProxy()
|
||||
|
||||
def exit(self):
|
||||
return
|
||||
|
||||
def build_pnid(mc, pn, taskname):
|
||||
if mc:
|
||||
return "mc:" + mc + ":" + pn + ":" + taskname
|
||||
return pn + ":" + taskname
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -224,9 +172,12 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
self.unihash = {}
|
||||
self.taskdeps = {}
|
||||
self.runtaskdeps = {}
|
||||
self.file_checksum_values = {}
|
||||
self.taints = {}
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.setscenetasks = set()
|
||||
self.basehash_ignore_vars = set((data.getVar("BB_BASEHASH_IGNORE_VARS") or "").split())
|
||||
self.taskhash_ignore_tasks = None
|
||||
@@ -250,15 +201,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
else:
|
||||
self.twl = None
|
||||
|
||||
def _build_data(self, mcfn, d):
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, self.basehash_ignore_vars)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, mcfn)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, fn)
|
||||
|
||||
for task in tasklist:
|
||||
tid = mcfn + ":" + task
|
||||
tid = fn + ":" + task
|
||||
if not ignore_mismatch and tid in self.basehash and self.basehash[tid] != basehash[tid]:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (tid, self.basehash[tid], basehash[tid]))
|
||||
bb.error("The following commands may help:")
|
||||
@@ -269,7 +220,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.error("%s -Sprintdiff\n" % cmd)
|
||||
self.basehash[tid] = basehash[tid]
|
||||
|
||||
return taskdeps, gendeps, lookupcache
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
self.lookupcache[fn] = lookupcache
|
||||
|
||||
return taskdeps
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
self.setscenetasks = set(setscene_tasks)
|
||||
@@ -277,42 +232,31 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def finalise(self, fn, d, variant):
|
||||
|
||||
mc = d.getVar("__BBMULTICONFIG", False) or ""
|
||||
mcfn = fn
|
||||
if variant or mc:
|
||||
mcfn = bb.cache.realfn2virtual(fn, variant, mc)
|
||||
fn = bb.cache.realfn2virtual(fn, variant, mc)
|
||||
|
||||
try:
|
||||
taskdeps, gendeps, lookupcache = self._build_data(mcfn, d)
|
||||
taskdeps = self._build_data(fn, d)
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except:
|
||||
bb.warn("Error during finalise of %s" % mcfn)
|
||||
bb.warn("Error during finalise of %s" % fn)
|
||||
raise
|
||||
|
||||
basehashes = {}
|
||||
for task in taskdeps:
|
||||
basehashes[task] = self.basehash[mcfn + ":" + task]
|
||||
|
||||
d.setVar("__siggen_basehashes", basehashes)
|
||||
d.setVar("__siggen_gendeps", gendeps)
|
||||
d.setVar("__siggen_varvals", lookupcache)
|
||||
d.setVar("__siggen_taskdeps", taskdeps)
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#self.setup_datacache_from_datastore(mcfn, d)
|
||||
#for task in taskdeps:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
super().setup_datacache_from_datastore(mcfn, d)
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH:task-%s" % task, self.basehash[fn + ":" + task])
|
||||
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
for attr in ["siggen_varvals", "siggen_taskdeps", "siggen_gendeps"]:
|
||||
if not hasattr(self.datacaches[mc], attr):
|
||||
setattr(self.datacaches[mc], attr, {})
|
||||
self.datacaches[mc].siggen_varvals[mcfn] = d.getVar("__siggen_varvals")
|
||||
self.datacaches[mc].siggen_taskdeps[mcfn] = d.getVar("__siggen_taskdeps")
|
||||
self.datacaches[mc].siggen_gendeps[mcfn] = d.getVar("__siggen_gendeps")
|
||||
def postparsing_clean_cache(self):
|
||||
#
|
||||
# After parsing we can remove some things from memory to reduce our memory footprint
|
||||
#
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.taskdeps = {}
|
||||
|
||||
def rundep_check(self, fn, recipename, task, dep, depname, dataCaches):
|
||||
# Return True if we should keep the dependency, False to drop it
|
||||
@@ -335,37 +279,38 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
(mc, _, task, mcfn) = bb.runqueue.split_tid_mcfn(tid)
|
||||
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
|
||||
|
||||
self.basehash[tid] = dataCaches[mc].basetaskhash[tid]
|
||||
self.runtaskdeps[tid] = []
|
||||
self.file_checksum_values[tid] = []
|
||||
recipename = dataCaches[mc].pkg_fn[mcfn]
|
||||
recipename = dataCaches[mc].pkg_fn[fn]
|
||||
|
||||
self.tidtopn[tid] = recipename
|
||||
# save hashfn for deps into siginfo?
|
||||
for dep in deps:
|
||||
(depmc, _, deptask, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
dep_pn = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, dep_pn, dataCaches):
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
(depmc, _, _, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
depname = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
if not self.supports_multiconfig_datacaches and mc != depmc:
|
||||
# If the signature generator doesn't understand multiconfig
|
||||
# data caches, any dependency not in the same multiconfig must
|
||||
# be skipped for backward compatibility
|
||||
continue
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCaches):
|
||||
continue
|
||||
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
self.runtaskdeps[tid].append(dep)
|
||||
|
||||
dep_pnid = build_pnid(depmc, dep_pn, deptask)
|
||||
self.runtaskdeps[tid].append((dep_pnid, dep))
|
||||
|
||||
if task in dataCaches[mc].file_checksums[mcfn]:
|
||||
if task in dataCaches[mc].file_checksums[fn]:
|
||||
if self.checksum_cache:
|
||||
checksums = self.checksum_cache.get_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude)
|
||||
checksums = self.checksum_cache.get_checksums(dataCaches[mc].file_checksums[fn][task], recipename, self.localdirsexclude)
|
||||
else:
|
||||
checksums = bb.fetch2.get_file_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude)
|
||||
checksums = bb.fetch2.get_file_checksums(dataCaches[mc].file_checksums[fn][task], recipename, self.localdirsexclude)
|
||||
for (f,cs) in checksums:
|
||||
self.file_checksum_values[tid].append((f,cs))
|
||||
|
||||
taskdep = dataCaches[mc].task_deps[mcfn]
|
||||
taskdep = dataCaches[mc].task_deps[fn]
|
||||
if 'nostamp' in taskdep and task in taskdep['nostamp']:
|
||||
# Nostamp tasks need an implicit taint so that they force any dependent tasks to run
|
||||
if tid in self.taints and self.taints[tid].startswith("nostamp:"):
|
||||
@@ -376,30 +321,30 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
taint = str(uuid.uuid4())
|
||||
self.taints[tid] = "nostamp:" + taint
|
||||
|
||||
taint = self.read_taint(mcfn, task, dataCaches[mc].stamp[mcfn])
|
||||
taint = self.read_taint(fn, task, dataCaches[mc].stamp[fn])
|
||||
if taint:
|
||||
self.taints[tid] = taint
|
||||
logger.warning("%s is tainted from a forced run" % tid)
|
||||
|
||||
return set(dep for _, dep in self.runtaskdeps[tid])
|
||||
return
|
||||
|
||||
def get_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in sorted(self.runtaskdeps[tid]):
|
||||
data += self.get_unihash(dep[1])
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data = data + self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data += "./" + f.split("/./")[1]
|
||||
data += cs
|
||||
data = data + "./" + f.split("/./")[1]
|
||||
data = data + cs
|
||||
|
||||
if tid in self.taints:
|
||||
if self.taints[tid].startswith("nostamp:"):
|
||||
data += self.taints[tid][8:]
|
||||
data = data + self.taints[tid][8:]
|
||||
else:
|
||||
data += self.taints[tid]
|
||||
data = data + self.taints[tid]
|
||||
|
||||
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
self.taskhash[tid] = h
|
||||
@@ -421,9 +366,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
self.unihash_cache.copyfile(targetdir)
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
tid = mcfn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
|
||||
tid = fn + ":" + task
|
||||
referencestamp = stampbase
|
||||
if isinstance(runtime, str) and runtime.startswith("customfile"):
|
||||
sigfile = stampbase
|
||||
@@ -440,32 +385,32 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data['task'] = task
|
||||
data['basehash_ignore_vars'] = self.basehash_ignore_vars
|
||||
data['taskhash_ignore_tasks'] = self.taskhash_ignore_tasks
|
||||
data['taskdeps'] = self.datacaches[mc].siggen_taskdeps[mcfn][task]
|
||||
data['taskdeps'] = self.taskdeps[fn][task]
|
||||
data['basehash'] = self.basehash[tid]
|
||||
data['gendeps'] = {}
|
||||
data['varvals'] = {}
|
||||
data['varvals'][task] = self.datacaches[mc].siggen_varvals[mcfn][task]
|
||||
for dep in self.datacaches[mc].siggen_taskdeps[mcfn][task]:
|
||||
data['varvals'][task] = self.lookupcache[fn][task]
|
||||
for dep in self.taskdeps[fn][task]:
|
||||
if dep in self.basehash_ignore_vars:
|
||||
continue
|
||||
data['gendeps'][dep] = self.datacaches[mc].siggen_gendeps[mcfn][dep]
|
||||
data['varvals'][dep] = self.datacaches[mc].siggen_varvals[mcfn][dep]
|
||||
data['gendeps'][dep] = self.gendeps[fn][dep]
|
||||
data['varvals'][dep] = self.lookupcache[fn][dep]
|
||||
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = [dep[0] for dep in sorted(self.runtaskdeps[tid])]
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
data['file_checksum_values'].append((os.path.basename(f), cs))
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data['runtaskhashes'][dep[0]] = self.get_unihash(dep[1])
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
data['taskhash'] = self.taskhash[tid]
|
||||
data['unihash'] = self.get_unihash(tid)
|
||||
|
||||
taint = self.read_taint(mcfn, task, referencestamp)
|
||||
taint = self.read_taint(fn, task, referencestamp)
|
||||
if taint:
|
||||
data['taint'] = taint
|
||||
|
||||
@@ -496,6 +441,18 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(tid)
|
||||
if tid not in self.taskhash:
|
||||
continue
|
||||
if dataCaches[mc].basetaskhash[tid] != self.basehash[tid]:
|
||||
bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % tid)
|
||||
bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[tid], self.basehash[tid]))
|
||||
self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
|
||||
|
||||
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
name = "basichash"
|
||||
|
||||
@@ -506,11 +463,11 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
# If task is not in basehash, then error
|
||||
return self.basehash[tid]
|
||||
|
||||
def stampfile(self, stampbase, mcfn, taskname, extrainfo, clean=False):
|
||||
if taskname.endswith("_setscene"):
|
||||
tid = mcfn + ":" + taskname[:-9]
|
||||
def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
tid = fn + ":" + taskname[:-9]
|
||||
else:
|
||||
tid = mcfn + ":" + taskname
|
||||
tid = fn + ":" + taskname
|
||||
if clean:
|
||||
h = "*"
|
||||
else:
|
||||
@@ -518,107 +475,42 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
|
||||
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
|
||||
|
||||
def stampcleanmask(self, stampbase, mcfn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, mcfn, taskname, extrainfo, clean=True)
|
||||
def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
|
||||
|
||||
def invalidate_task(self, task, mcfn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (mcfn, task))
|
||||
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
stamp = self.datacaches[mc].stamp[mcfn]
|
||||
|
||||
taintfn = stamp + '.' + task + '.taint'
|
||||
|
||||
import uuid
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
with open(taintfn, 'w') as taintf:
|
||||
taintf.write(str(uuid.uuid4()))
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
|
||||
bb.build.write_taint(task, d, fn)
|
||||
|
||||
class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queries from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.env = {}
|
||||
|
||||
origenv = data.getVar("BB_ORIGENV")
|
||||
for e in HASHSERV_ENVVARS:
|
||||
value = data.getVar(e)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(e)
|
||||
if value:
|
||||
self.env[e] = value
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env = data[:7]
|
||||
super().set_taskdata(data[7:])
|
||||
self.server, self.method, self.extramethod = data[:3]
|
||||
super().set_taskdata(data[3:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
return {
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
@contextmanager
|
||||
def _client_env(self):
|
||||
orig_env = os.environ.copy()
|
||||
try:
|
||||
for k, v in self.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
yield
|
||||
finally:
|
||||
for k, v in self.env.items():
|
||||
if k in orig_env:
|
||||
os.environ[k] = orig_env[k]
|
||||
else:
|
||||
del os.environ[k]
|
||||
|
||||
@contextmanager
|
||||
def client(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
|
||||
@contextmanager
|
||||
def client_pool(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client_pool', None) is None:
|
||||
self._client_pool = hashserv.client.ClientPool(self.server, self.max_parallel, **self.get_hashserv_creds())
|
||||
yield self._client_pool
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server)
|
||||
return self._client
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().reset(data)
|
||||
|
||||
def exit(self):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().exit()
|
||||
|
||||
def __close_clients(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
if getattr(self, '_client_pool', None) is not None:
|
||||
self._client_pool.close()
|
||||
self._client_pool = None
|
||||
|
||||
def get_stampfile_hash(self, tid):
|
||||
if tid in self.taskhash:
|
||||
# If a unique hash is reported, use it as the stampfile hash. This
|
||||
@@ -650,7 +542,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
return None
|
||||
return unihash
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
def get_unihash(self, tid):
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
# If its not a setscene task we can return
|
||||
@@ -665,108 +557,40 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
return None
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
unihash = taskhash
|
||||
|
||||
def _get_method(self, tid):
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
return method
|
||||
|
||||
def unihashes_exist(self, query):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
uncached_query = {}
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
uncached_query[key] = unihash
|
||||
|
||||
if self.max_parallel <= 1 or len(uncached_query) <= 1:
|
||||
# No parallelism required. Make the query serially with the single client
|
||||
with self.client() as client:
|
||||
uncached_result = {
|
||||
key: client.unihash_exists(value) for key, value in uncached_query.items()
|
||||
}
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
uncached_result = client_pool.unihashes_exist(uncached_query)
|
||||
|
||||
for key, exists in uncached_result.items():
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
|
||||
return result
|
||||
|
||||
def get_unihash(self, tid):
|
||||
return self.get_unihashes([tid])[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
"""
|
||||
For a iterable of tids, returns a dictionary that maps each tid to a
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
queries = {}
|
||||
query_result = {}
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
queries[tid] = (self._get_method(tid), self.taskhash[tid])
|
||||
|
||||
if len(queries) == 0:
|
||||
return result
|
||||
|
||||
if self.max_parallel <= 1 or len(queries) <= 1:
|
||||
# No parallelism required. Make the query using a single client
|
||||
with self.client() as client:
|
||||
keys = list(queries.keys())
|
||||
unihashes = client.get_unihash_batch(queries[k] for k in keys)
|
||||
|
||||
for idx, k in enumerate(keys):
|
||||
query_result[k] = unihashes[idx]
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
query_result = client_pool.get_unihashes(queries)
|
||||
|
||||
for tid, unihash in query_result.items():
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
if unihash:
|
||||
try:
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
data = self.client().get_unihash(method, self.taskhash[tid])
|
||||
if data:
|
||||
unihash = data
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.bbdebug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
hashequiv_logger.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
|
||||
return result
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
def report_unihash(self, path, task, d):
|
||||
import importlib
|
||||
@@ -775,8 +599,8 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
unihash = d.getVar('BB_UNIHASH')
|
||||
report_taskdata = d.getVar('SSTATE_HASHEQUIV_REPORT_TASKDATA') == '1'
|
||||
tempdir = d.getVar('T')
|
||||
mcfn = d.getVar('BB_FILENAME')
|
||||
tid = mcfn + ':do_' + task
|
||||
fn = d.getVar('BB_FILENAME')
|
||||
tid = fn + ':do_' + task
|
||||
key = tid + ':' + taskhash
|
||||
|
||||
if self.setscenetasks and tid not in self.setscenetasks:
|
||||
@@ -830,14 +654,12 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
hashequiv_logger.debug('Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(mcfn + ':do_' + task, new_unihash), d)
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
|
||||
self.set_unihash(tid, new_unihash)
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
@@ -863,9 +685,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
hashequiv_logger.verbose('Reported task %s as unihash %s to %s (%s)' % (tid, wanted_unihash, self.server, str(data)))
|
||||
|
||||
if data is None:
|
||||
@@ -898,20 +718,20 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
super().init_rundepcheck(data)
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
self.max_parallel = 1
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return f
|
||||
#
|
||||
# Dummy class used for bitbake-selftest
|
||||
#
|
||||
class SignatureGeneratorTestMulticonfigDepends(SignatureGeneratorBasicHash):
|
||||
name = "TestMulticonfigDepends"
|
||||
supports_multiconfig_datacaches = True
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
mcfn = d.getVar("BB_FILENAME")
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
referencestamp = bb.parse.siggen.stampfile_base(mcfn)
|
||||
bb.parse.siggen.dump_sigtask(mcfn, task, outfile, "customfile:" + referencestamp)
|
||||
referencestamp = bb.build.stamp_internal(task, d, None, True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
|
||||
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
@@ -964,6 +784,39 @@ def list_inline_diff(oldlist, newlist, colors=None):
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(basepath):
|
||||
basepath, dir, recipe_task = basepath.rsplit("/", 2)
|
||||
cleaned = dir + '/' + recipe_task
|
||||
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
mc_suffix = ''
|
||||
|
||||
# mc stuff now removed from basepath. Whatever was next, if present will be the first
|
||||
# suffix. ':/', recipe path start, marks the end of this. Something like
|
||||
# 'virtual:a[:b[:c]]:/path...' (b and c being optional)
|
||||
if basepath[0] != '/':
|
||||
cleaned += ':' + basepath.split(':/', 1)[0]
|
||||
|
||||
return cleaned + mc_suffix
|
||||
|
||||
def clean_basepaths(a):
|
||||
b = {}
|
||||
for x in a:
|
||||
b[clean_basepath(x)] = a[x]
|
||||
return b
|
||||
|
||||
def clean_basepaths_list(a):
|
||||
b = []
|
||||
for x in a:
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
# Handled renamed fields
|
||||
def handle_renames(data):
|
||||
if 'basewhitelist' in data:
|
||||
@@ -994,18 +847,10 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
try:
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (b, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
for data in [a_data, b_data]:
|
||||
handle_renames(data)
|
||||
@@ -1140,11 +985,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = a_data['runtaskdeps']
|
||||
clean_b = b_data['runtaskdeps']
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
@@ -1153,8 +998,8 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
a = a_data['runtaskhashes']
|
||||
b = b_data['runtaskhashes']
|
||||
a = clean_basepaths(a_data['runtaskhashes'])
|
||||
b = clean_basepaths(b_data['runtaskhashes'])
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
@@ -1211,7 +1056,7 @@ def calc_basehash(sigdata):
|
||||
basedata = ''
|
||||
|
||||
alldeps = sigdata['taskdeps']
|
||||
for dep in sorted(alldeps):
|
||||
for dep in alldeps:
|
||||
basedata = basedata + dep
|
||||
val = sigdata['varvals'][dep]
|
||||
if val is not None:
|
||||
@@ -1243,12 +1088,8 @@ def calc_taskhash(sigdata):
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
handle_renames(a_data)
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
def parseExpression(self, exp):
|
||||
parsedvar = self.d.expandWithRefs(exp, None)
|
||||
self.references = parsedvar.references
|
||||
self.execs = parsedvar.execs
|
||||
|
||||
def test_simple_reference(self):
|
||||
self.setEmptyVars(["FOO"])
|
||||
@@ -62,11 +61,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
def test_python_exec_reference(self):
|
||||
self.parseExpression("${@eval('3 * 5')}")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -324,7 +318,7 @@ d.getVar(a(), False)
|
||||
"filename": "example.bb",
|
||||
})
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"]))
|
||||
|
||||
@@ -371,7 +365,7 @@ esac
|
||||
self.d.setVarFlags("FOO", {"func": True})
|
||||
self.setEmptyVars(execs)
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["somevar", "inverted"] + execs))
|
||||
|
||||
@@ -381,7 +375,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "oe_libinstall")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -390,7 +384,7 @@ esac
|
||||
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}")
|
||||
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
@@ -405,7 +399,7 @@ esac
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([expr,
|
||||
'TESTVAR{anothervalue} = Set',
|
||||
@@ -424,49 +418,23 @@ esac
|
||||
self.d.setVar('ANOTHERVAR', varval)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
self.d.setVar('TESTVAR2', 'testval3')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(["TESTVAR"]), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(["TESTVAR"]), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
# Check the vardepsexclude flag is handled by contains functionality
|
||||
self.d.setVarFlag('ANOTHERVAR', 'vardepsexclude', 'TESTVAR')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()), sorted([varval]))
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
def test_contains_vardeps_override_operators(self):
|
||||
# Check override operators handle dependencies correctly with the contains functionality
|
||||
expr_plain = 'testval'
|
||||
expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
|
||||
expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
|
||||
expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr_plain)
|
||||
self.d.prependVar('ANOTHERVAR', expr_prepend)
|
||||
self.d.appendVar('ANOTHERVAR', expr_append)
|
||||
self.d.setVar('ANOTHERVAR:remove', expr_remove)
|
||||
self.d.setVar('TESTVAR1', 'blah')
|
||||
self.d.setVar('TESTVAR2', 'testval2')
|
||||
self.d.setVar('TESTVAR3', 'no-testval')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([
|
||||
expr_prepend + expr_plain + expr_append,
|
||||
'_remove of ' + expr_remove,
|
||||
'TESTVAR1{testval1} = Unset',
|
||||
'TESTVAR2{testval2} = Set',
|
||||
'TESTVAR3{no-testval} = Set',
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
# self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
# self.d.setVarFlag("FOO", "vardeps", "oe_*")
|
||||
# self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
# self.assertEquals(deps, set(["oe_libinstall"]))
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ class ProgressWatcher:
|
||||
def __init__(self):
|
||||
self._reports = []
|
||||
|
||||
def handle_event(self, event, d):
|
||||
def handle_event(self, event):
|
||||
self._reports.append((event.progress, event.rate))
|
||||
|
||||
def reports(self):
|
||||
|
||||
@@ -60,15 +60,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@5*12}")
|
||||
self.assertEqual(str(val), "60")
|
||||
|
||||
def test_python_snippet_w_dict(self):
|
||||
val = self.d.expand("${@{ 'green': 1, 'blue': 2 }['green']}")
|
||||
self.assertEqual(str(val), "1")
|
||||
|
||||
def test_python_unexpanded_multi(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@2*2},${foo},${@d.getVar('foo') + ' ${bar}'},${foo}")
|
||||
self.assertEqual(str(val), "4,value_of_foo,${@d.getVar('foo') + ' ${unsetvar}'},value_of_foo")
|
||||
|
||||
def test_expand_in_python_snippet(self):
|
||||
val = self.d.expand("${@'boo ' + '${foo}'}")
|
||||
self.assertEqual(str(val), "boo value_of_foo")
|
||||
@@ -77,18 +68,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_snippet_function_reference(self):
|
||||
self.d.setVar("TESTVAL", "testvalue")
|
||||
self.d.setVar("testfunc", 'd.getVar("TESTVAL")')
|
||||
context = bb.utils.get_context()
|
||||
context["testfunc"] = lambda d: d.getVar("TESTVAL")
|
||||
val = self.d.expand("${@testfunc(d)}")
|
||||
self.assertEqual(str(val), "testvalue")
|
||||
|
||||
def test_python_snippet_builtin_metadata(self):
|
||||
self.d.setVar("eval", "INVALID")
|
||||
self.d.expand("${@eval('3')}")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
@@ -395,16 +374,6 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override_2(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3 testvalue5")
|
||||
@@ -426,6 +395,16 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST:bar:append", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_append_and_unused_override(self):
|
||||
# Had a bug where an unused override append could return "" instead of None
|
||||
self.d.setVar("BAR:append:unusedoverride", "testvalue2")
|
||||
|
||||
@@ -13,7 +13,6 @@ import pickle
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import tempfile
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
|
||||
@@ -158,7 +157,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self._test_process.event_handler,
|
||||
event,
|
||||
None)
|
||||
self._test_process.event_handler.assert_called_once_with(event, None)
|
||||
self._test_process.event_handler.assert_called_once_with(event)
|
||||
|
||||
def test_fire_class_handlers(self):
|
||||
""" Test fire_class_handlers method """
|
||||
@@ -176,10 +175,10 @@ class EventHandlingTest(unittest.TestCase):
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = [call(event1, None)]
|
||||
expected_event_handler2 = [call(event1, None),
|
||||
call(event2, None),
|
||||
call(event2, None)]
|
||||
expected_event_handler1 = [call(event1)]
|
||||
expected_event_handler2 = [call(event1),
|
||||
call(event2),
|
||||
call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
@@ -206,7 +205,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = []
|
||||
expected_event_handler2 = [call(event1, None)]
|
||||
expected_event_handler2 = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
@@ -224,7 +223,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1, None), call(event2, None)]
|
||||
expected = [call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -238,7 +237,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1, None), call(event2, None), call(event1, None)]
|
||||
expected = [call(event1), call(event2), call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -252,7 +251,7 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1,None), call(event2, None), call(event1, None), call(event2, None)]
|
||||
expected = [call(event1), call(event2), call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -360,10 +359,9 @@ class EventHandlingTest(unittest.TestCase):
|
||||
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire(event1, None)
|
||||
expected = [call(event1, None)]
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
@@ -452,9 +450,10 @@ class EventHandlingTest(unittest.TestCase):
|
||||
and disable threadlocks tests """
|
||||
bb.event.fire(bb.event.OperationStarted(), None)
|
||||
|
||||
def test_event_threadlock(self):
|
||||
def test_enable_threadlock(self):
|
||||
""" Test enable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.enable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be in order as all the registered
|
||||
# handlers for the event coming from the first worker should be
|
||||
@@ -462,6 +461,20 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
|
||||
|
||||
|
||||
def test_disable_threadlock(self):
|
||||
""" Test disable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.disable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be intertwined together. Thanks to the
|
||||
# delay in the registered handlers for the event coming from the first
|
||||
# worker, the event coming from the second worker starts being
|
||||
# processed before finishing handling the first worker event.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
||||
|
||||
|
||||
class EventClassesTest(unittest.TestCase):
|
||||
""" Event classes test class """
|
||||
|
||||
@@ -469,8 +482,6 @@ class EventClassesTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
@@ -953,24 +964,3 @@ class EventClassesTest(unittest.TestCase):
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_lineno_in_eventhandler(self):
|
||||
# The error lineno is 5, not 4 since the first line is '\n'
|
||||
error_line = """
|
||||
# Comment line1
|
||||
# Comment line2
|
||||
python test_lineno_in_eventhandler() {
|
||||
This is an error line
|
||||
}
|
||||
addhandler test_lineno_in_eventhandler
|
||||
test_lineno_in_eventhandler[eventmask] = "bb.event.ConfigParsed"
|
||||
"""
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = tempfile.NamedTemporaryFile(suffix = '.bb')
|
||||
f.write(bytes(error_line, "utf-8"))
|
||||
f.flush()
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue(" line 5\n" in output)
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/2.10/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/2.10/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="LATEST-IS-2.10.3" title="LATEST-IS-2.10.3">LATEST-IS-2.10.3</a></td><td class="size">2.5 MiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.news" title="libxml2-2.10.0.news">libxml2-2.10.0.news</a></td><td class="size">7.1 KiB</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.sha256sum" title="libxml2-2.10.0.sha256sum">libxml2-2.10.0.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.0.tar.xz" title="libxml2-2.10.0.tar.xz">libxml2-2.10.0.tar.xz</a></td><td class="size">2.6 MiB</td><td class="date">2022-Aug-17 11:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.news" title="libxml2-2.10.1.news">libxml2-2.10.1.news</a></td><td class="size">455 B</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.sha256sum" title="libxml2-2.10.1.sha256sum">libxml2-2.10.1.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.1.tar.xz" title="libxml2-2.10.1.tar.xz">libxml2-2.10.1.tar.xz</a></td><td class="size">2.6 MiB</td><td class="date">2022-Aug-25 11:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.news" title="libxml2-2.10.2.news">libxml2-2.10.2.news</a></td><td class="size">309 B</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.sha256sum" title="libxml2-2.10.2.sha256sum">libxml2-2.10.2.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.2.tar.xz" title="libxml2-2.10.2.tar.xz">libxml2-2.10.2.tar.xz</a></td><td class="size">2.5 MiB</td><td class="date">2022-Aug-29 14:56</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.news" title="libxml2-2.10.3.news">libxml2-2.10.3.news</a></td><td class="size">294 B</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.sha256sum" title="libxml2-2.10.3.sha256sum">libxml2-2.10.3.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.10.3.tar.xz" title="libxml2-2.10.3.tar.xz">libxml2-2.10.3.tar.xz</a></td><td class="size">2.5 MiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
</tbody></table></body></html>
|
||||
@@ -1,40 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/2.9/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/2.9/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="LATEST-IS-2.9.14" title="LATEST-IS-2.9.14">LATEST-IS-2.9.14</a></td><td class="size">3.0 MiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.0.sha256sum" title="libxml2-2.9.0.sha256sum">libxml2-2.9.0.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:27</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.0.tar.xz" title="libxml2-2.9.0.tar.xz">libxml2-2.9.0.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:27</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.1.sha256sum" title="libxml2-2.9.1.sha256sum">libxml2-2.9.1.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:28</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.1.tar.xz" title="libxml2-2.9.1.tar.xz">libxml2-2.9.1.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:28</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.10.sha256sum" title="libxml2-2.9.10.sha256sum">libxml2-2.9.10.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.10.tar.xz" title="libxml2-2.9.10.tar.xz">libxml2-2.9.10.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.11.sha256sum" title="libxml2-2.9.11.sha256sum">libxml2-2.9.11.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:43</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.11.tar.xz" title="libxml2-2.9.11.tar.xz">libxml2-2.9.11.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:43</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.12.sha256sum" title="libxml2-2.9.12.sha256sum">libxml2-2.9.12.sha256sum</a></td><td class="size">88 B</td><td class="date">2022-Feb-14 18:45</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.12.tar.xz" title="libxml2-2.9.12.tar.xz">libxml2-2.9.12.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:45</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.news" title="libxml2-2.9.13.news">libxml2-2.9.13.news</a></td><td class="size">26.6 KiB</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.sha256sum" title="libxml2-2.9.13.sha256sum">libxml2-2.9.13.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.13.tar.xz" title="libxml2-2.9.13.tar.xz">libxml2-2.9.13.tar.xz</a></td><td class="size">3.1 MiB</td><td class="date">2022-Feb-20 12:42</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.news" title="libxml2-2.9.14.news">libxml2-2.9.14.news</a></td><td class="size">1.0 KiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.sha256sum" title="libxml2-2.9.14.sha256sum">libxml2-2.9.14.sha256sum</a></td><td class="size">174 B</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.14.tar.xz" title="libxml2-2.9.14.tar.xz">libxml2-2.9.14.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-May-02 12:03</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.2.sha256sum" title="libxml2-2.9.2.sha256sum">libxml2-2.9.2.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:30</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.2.tar.xz" title="libxml2-2.9.2.tar.xz">libxml2-2.9.2.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:30</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.3.sha256sum" title="libxml2-2.9.3.sha256sum">libxml2-2.9.3.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:31</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.3.tar.xz" title="libxml2-2.9.3.tar.xz">libxml2-2.9.3.tar.xz</a></td><td class="size">3.2 MiB</td><td class="date">2022-Feb-14 18:31</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.4.sha256sum" title="libxml2-2.9.4.sha256sum">libxml2-2.9.4.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.4.tar.xz" title="libxml2-2.9.4.tar.xz">libxml2-2.9.4.tar.xz</a></td><td class="size">2.9 MiB</td><td class="date">2022-Feb-14 18:33</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.5.sha256sum" title="libxml2-2.9.5.sha256sum">libxml2-2.9.5.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:35</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.5.tar.xz" title="libxml2-2.9.5.tar.xz">libxml2-2.9.5.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:35</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.6.sha256sum" title="libxml2-2.9.6.sha256sum">libxml2-2.9.6.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:36</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.6.tar.xz" title="libxml2-2.9.6.tar.xz">libxml2-2.9.6.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:36</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.7.sha256sum" title="libxml2-2.9.7.sha256sum">libxml2-2.9.7.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:37</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.7.tar.xz" title="libxml2-2.9.7.tar.xz">libxml2-2.9.7.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:37</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.8.sha256sum" title="libxml2-2.9.8.sha256sum">libxml2-2.9.8.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:39</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.8.tar.xz" title="libxml2-2.9.8.tar.xz">libxml2-2.9.8.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:39</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.9.sha256sum" title="libxml2-2.9.9.sha256sum">libxml2-2.9.9.sha256sum</a></td><td class="size">87 B</td><td class="date">2022-Feb-14 18:40</td></tr>
|
||||
<tr><td class="link"><a href="libxml2-2.9.9.tar.xz" title="libxml2-2.9.9.tar.xz">libxml2-2.9.9.tar.xz</a></td><td class="size">3.0 MiB</td><td class="date">2022-Feb-14 18:40</td></tr>
|
||||
</tbody></table></body></html>
|
||||
@@ -1,19 +0,0 @@
|
||||
<!DOCTYPE html><html><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta name="viewport" content="width=device-width"><style type="text/css">body,html {background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}tr:nth-child(even) {background:#f4f4f4;}th,td {padding:0.1em 0.5em;}th {text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}#list {border:1px solid #aaa;width:100%;}a {color:#a33;}a:hover {color:#e33;}</style>
|
||||
|
||||
<title>Index of /sources/libxml2/</title>
|
||||
</head><body><h1>Index of /sources/libxml2/</h1>
|
||||
<table id="list"><thead><tr><th style="width:55%"><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th style="width:20%"><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th style="width:25%"><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr></thead>
|
||||
<tbody><tr><td class="link"><a href="../">Parent directory/</a></td><td class="size">-</td><td class="date">-</td></tr>
|
||||
<tr><td class="link"><a href="2.0/" title="2.0">2.0/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.1/" title="2.1">2.1/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.10/" title="2.10">2.10/</a></td><td class="size">-</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
<tr><td class="link"><a href="2.2/" title="2.2">2.2/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:04</td></tr>
|
||||
<tr><td class="link"><a href="2.3/" title="2.3">2.3/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.4/" title="2.4">2.4/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.5/" title="2.5">2.5/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.6/" title="2.6">2.6/</a></td><td class="size">-</td><td class="date">2009-Jul-14 13:05</td></tr>
|
||||
<tr><td class="link"><a href="2.7/" title="2.7">2.7/</a></td><td class="size">-</td><td class="date">2022-Feb-14 18:24</td></tr>
|
||||
<tr><td class="link"><a href="2.8/" title="2.8">2.8/</a></td><td class="size">-</td><td class="date">2022-Feb-14 18:26</td></tr>
|
||||
<tr><td class="link"><a href="2.9/" title="2.9">2.9/</a></td><td class="size">-</td><td class="date">2022-May-02 12:04</td></tr>
|
||||
<tr><td class="link"><a href="cache.json" title="cache.json">cache.json</a></td><td class="size">22.8 KiB</td><td class="date">2022-Oct-14 12:55</td></tr>
|
||||
</tbody></table></body></html>
|
||||
@@ -6,7 +6,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import contextlib
|
||||
import unittest
|
||||
import hashlib
|
||||
import tempfile
|
||||
@@ -25,8 +24,7 @@ def skipIfNoNetwork():
|
||||
return lambda f: f
|
||||
|
||||
class TestTimeout(Exception):
|
||||
# Indicate to pytest that this is not a test suite
|
||||
__test__ = False
|
||||
pass
|
||||
|
||||
class Timeout():
|
||||
|
||||
@@ -308,21 +306,6 @@ class URITest(unittest.TestCase):
|
||||
'params': {"someparam" : "1"},
|
||||
'query': {},
|
||||
'relative': True
|
||||
},
|
||||
"https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip": {
|
||||
'uri': 'https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip',
|
||||
'scheme': 'https',
|
||||
'hostname': 'www.innodisk.com',
|
||||
'port': None,
|
||||
'hostport': 'www.innodisk.com',
|
||||
'path': '/Download_file',
|
||||
'userinfo': '',
|
||||
'userinfo': '',
|
||||
'username': '',
|
||||
'password': '',
|
||||
'params': {"downloadfilename" : "EGPL-T101.zip"},
|
||||
'query': {"9BE0BF6657": None},
|
||||
'relative': False
|
||||
}
|
||||
|
||||
}
|
||||
@@ -432,28 +415,18 @@ class FetcherTest(unittest.TestCase):
|
||||
|
||||
def git(self, cmd, cwd=None):
|
||||
if isinstance(cmd, str):
|
||||
cmd = 'git -c safe.bareRepository=all ' + cmd
|
||||
cmd = 'git ' + cmd
|
||||
else:
|
||||
cmd = ['git', '-c', 'safe.bareRepository=all'] + cmd
|
||||
cmd = ['git'] + cmd
|
||||
if cwd is None:
|
||||
cwd = self.gitdir
|
||||
return bb.process.run(cmd, cwd=cwd)[0]
|
||||
|
||||
def git_init(self, cwd=None):
|
||||
self.git('init', cwd=cwd)
|
||||
# Explicitly set initial branch to master as
|
||||
# a common setup is to use other default
|
||||
# branch than master.
|
||||
self.git(['checkout', '-b', 'master'], cwd=cwd)
|
||||
|
||||
try:
|
||||
self.git(['config', 'user.email'], cwd=cwd)
|
||||
except bb.process.ExecutionError:
|
||||
if not self.git(['config', 'user.email'], cwd=cwd):
|
||||
self.git(['config', 'user.email', 'you@example.com'], cwd=cwd)
|
||||
|
||||
try:
|
||||
self.git(['config', 'user.name'], cwd=cwd)
|
||||
except bb.process.ExecutionError:
|
||||
if not self.git(['config', 'user.name'], cwd=cwd):
|
||||
self.git(['config', 'user.name', 'Your Name'], cwd=cwd)
|
||||
|
||||
class MirrorUriTest(FetcherTest):
|
||||
@@ -562,7 +535,7 @@ class MirrorUriTest(FetcherTest):
|
||||
class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitDownloadDirectoryNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_dir = "git.openembedded.org.bitbake"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_dir = "github.com.openembedded.bitbake.git"
|
||||
@@ -610,7 +583,7 @@ class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
class TarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(TarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz"
|
||||
@@ -644,7 +617,7 @@ class TarballNamingTest(FetcherTest):
|
||||
class GitShallowTarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitShallowTarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz"
|
||||
@@ -679,7 +652,7 @@ class GitShallowTarballNamingTest(FetcherTest):
|
||||
class CleanTarballTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(CleanTarballTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1')
|
||||
@@ -700,13 +673,11 @@ class CleanTarballTest(FetcherTest):
|
||||
archive = tarfile.open(os.path.join(self.dldir, self.recipe_tarball))
|
||||
self.assertNotEqual(len(archive.members), 0)
|
||||
for member in archive.members:
|
||||
if member.name == ".":
|
||||
continue
|
||||
self.assertEqual(member.uname, 'oe', "user name for %s differs" % member.name)
|
||||
self.assertEqual(member.uid, 0, "uid for %s differs" % member.name)
|
||||
self.assertEqual(member.gname, 'oe', "group name for %s differs" % member.name)
|
||||
self.assertEqual(member.gid, 0, "gid for %s differs" % member.name)
|
||||
self.assertEqual(member.mtime, mtime, "mtime for %s differs" % member.name)
|
||||
self.assertEqual(member.uname, 'oe')
|
||||
self.assertEqual(member.uid, 0)
|
||||
self.assertEqual(member.gname, 'oe')
|
||||
self.assertEqual(member.gid, 0)
|
||||
self.assertEqual(member.mtime, mtime)
|
||||
|
||||
|
||||
class FetcherLocalTest(FetcherTest):
|
||||
@@ -814,7 +785,7 @@ class FetcherLocalTest(FetcherTest):
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
self.d.setVar("__BBSRCREV_SEEN", "1")
|
||||
self.d.setVar("__BBSEENSRCREV", "1")
|
||||
url = "git://" + self.gitdir + ";branch=master;protocol=file;" + suffix
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
@@ -1040,25 +1011,25 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch(self):
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_goodsrcrev(self):
|
||||
# SRCREV is set but matches rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master"
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_badsrcrev(self):
|
||||
# SRCREV is set but does not match rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master"
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_tagandrev(self):
|
||||
# SRCREV is set but does not match rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1067,7 +1038,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;branch=master;protocol=https"
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;branch=master"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1076,26 +1047,26 @@ class FetcherNetworkTest(FetcherTest):
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName;branch=master;protocol=https"
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName;branch=master"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_for_mirrored_url_when_previous_downloaded_by_the_recipe_url(self):
|
||||
recipeurl = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
mirrorurl = "git://someserver.org/bitbake;branch=master;protocol=https"
|
||||
recipeurl = "git://git.openembedded.org/bitbake;branch=master"
|
||||
mirrorurl = "git://someserver.org/bitbake;branch=master"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake")
|
||||
self.gitfetcher(recipeurl, mirrorurl)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_when_previous_downloaded_from_a_premirror(self):
|
||||
recipeurl = "git://someserver.org/bitbake;branch=master;protocol=https"
|
||||
recipeurl = "git://someserver.org/bitbake;branch=master"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake")
|
||||
self.gitfetcher(recipeurl, recipeurl)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_repository_when_premirror_rewrites_the_recipe_url(self):
|
||||
realurl = "https://git.openembedded.org/bitbake"
|
||||
recipeurl = "git://someserver.org/bitbake;protocol=https"
|
||||
realurl = "git://git.openembedded.org/bitbake"
|
||||
recipeurl = "git://someserver.org/bitbake"
|
||||
self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
|
||||
os.chdir(self.tempdir)
|
||||
self.git(['clone', realurl, self.sourcedir], cwd=self.tempdir)
|
||||
@@ -1105,9 +1076,9 @@ class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule(self):
|
||||
# URL with ssh submodules
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7;branch=master;protocol=https"
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7;branch=master"
|
||||
# Original URL (comment this if you have ssh access to git.yoctoproject.org)
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee;branch=master;protocol=https"
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee;branch=master"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1123,25 +1094,6 @@ class FetcherNetworkTest(FetcherTest):
|
||||
if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')):
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_restricted_network_premirrors(self):
|
||||
# this test is to ensure that premirrors will be tried in restricted network
|
||||
# that is, BB_ALLOWED_NETWORKS does not contain the domain the url uses
|
||||
url = "gitsm://github.com/grpc/grpc.git;protocol=https;name=grpc;branch=v1.60.x;rev=0ef13a7555dbaadd4633399242524129eef5e231"
|
||||
# create a download directory to be used as premirror later
|
||||
tempdir = tempfile.mkdtemp(prefix="bitbake-fetch-")
|
||||
dl_premirror = os.path.join(tempdir, "download-premirror")
|
||||
os.mkdir(dl_premirror)
|
||||
self.d.setVar("DL_DIR", dl_premirror)
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# now use the premirror in restricted network
|
||||
self.d.setVar("DL_DIR", self.dldir)
|
||||
self.d.setVar("PREMIRRORS", "gitsm://.*/.* gitsm://%s/git2/MIRRORNAME;protocol=file" % dl_premirror)
|
||||
self.d.setVar("BB_ALLOWED_NETWORKS", "*.some.domain")
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_dbus_broker(self):
|
||||
# The following external repositories have show failures in fetch and unpack operations
|
||||
@@ -1282,9 +1234,8 @@ class SVNTest(FetcherTest):
|
||||
cwd=repo_dir)
|
||||
|
||||
bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir)
|
||||
# Github won't emulate SVN anymore (see https://github.blog/2023-01-20-sunsetting-subversion-support/)
|
||||
# Use still accessible svn repo (only trunk to avoid longer downloads)
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://svn.apache.org/repos/asf/serf/trunk' .",
|
||||
# Github will emulate SVN. Use this to check if we're downloding...
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://github.com/PhilipHazel/pcre2.git' .",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
bb.process.run("svn commit --non-interactive -m 'Add external'",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
@@ -1312,8 +1263,8 @@ class SVNTest(FetcherTest):
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should NOT exist")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should NOT exit")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk')), msg="External dir should NOT exist")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk', 'README')), msg="External README should NOT exit")
|
||||
|
||||
@skipIfNoSvn()
|
||||
def test_external_svn(self):
|
||||
@@ -1326,8 +1277,8 @@ class SVNTest(FetcherTest):
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should exist")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should exit")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk')), msg="External dir should exist")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk', 'README')), msg="External README should exit")
|
||||
|
||||
class TrustedNetworksTest(FetcherTest):
|
||||
def test_trusted_network(self):
|
||||
@@ -1378,10 +1329,9 @@ class URLHandle(unittest.TestCase):
|
||||
"http://www.google.com/index.html" : ('http', 'www.google.com', '/index.html', '', '', {}),
|
||||
"cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}),
|
||||
"cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', collections.OrderedDict([('tag', 'V0-99-81'), ('module', 'familiar/dist/ipkg')])),
|
||||
"git://git.openembedded.org/bitbake;branch=@foo;protocol=https" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo', 'protocol' : 'https'}),
|
||||
"git://git.openembedded.org/bitbake;branch=@foo" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo'}),
|
||||
"file://somelocation;someparam=1": ('file', '', 'somelocation', '', '', {'someparam': '1'}),
|
||||
"https://somesite.com/somerepo.git;user=anyUser:idtoken=1234" : ('https', 'somesite.com', '/somerepo.git', '', '', {'user': 'anyUser:idtoken=1234'}),
|
||||
r'git://s.o-me_ONE:!#$%^&*()-_={}[]\|:?,.<>~`@git.openembedded.org/bitbake;branch=main;protocol=https': ('git', 'git.openembedded.org', '/bitbake', 's.o-me_ONE', r'!#$%^&*()-_={}[]\|:?,.<>~`', {'branch': 'main', 'protocol' : 'https'}),
|
||||
r'git://s.o-me_ONE:!#$%^&*()-_={}[]\|:?,.<>~`@git.openembedded.org/bitbake;branch=main': ('git', 'git.openembedded.org', '/bitbake', 's.o-me_ONE', r'!#$%^&*()-_={}[]\|:?,.<>~`', {'branch': 'main'}),
|
||||
}
|
||||
# we require a pathname to encodeurl but users can still pass such urls to
|
||||
# decodeurl and we need to handle them
|
||||
@@ -1405,39 +1355,37 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
test_git_uris = {
|
||||
# version pattern "X.Y.Z"
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "", "")
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
: "1.99.4",
|
||||
# version pattern "vX.Y"
|
||||
# mirror of git.infradead.org since network issues interfered with testing
|
||||
("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git;branch=master;protocol=https", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "", "")
|
||||
("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git;branch=master", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "")
|
||||
: "1.5.0",
|
||||
# version pattern "pkg_name-X.Y"
|
||||
# mirror of git://anongit.freedesktop.org/git/xorg/proto/presentproto since network issues interfered with testing
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto;branch=master;protocol=https", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "", "")
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto;branch=master", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "")
|
||||
: "1.0",
|
||||
# version pattern "pkg_name-vX.Y.Z"
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master;protocol=https", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "", "")
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "", "")
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
: "1.2.0",
|
||||
("u-boot-mkimage", "git://source.denx.de/u-boot/u-boot.git;branch=master;protocol=https", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "", "")
|
||||
("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
|
||||
: "2014.01",
|
||||
# version pattern "yyyymmdd"
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "", "")
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
# mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap;branch=master;protocol=https", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P<pver>(\d+\.(\d\.?)*))", "")
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap;branch=master", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P<pver>(\d+\.(\d\.?)*))")
|
||||
: "0.4.3",
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky;branch=master;protocol=https", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P<pver>(([0-9][\.|_]?)+[0-9]))", "")
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky;branch=master", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P<pver>(([0-9][\.|_]?)+[0-9]))")
|
||||
: "11.0.0",
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P<pver>((\d+[\.\-_]*)+))", "")
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
: "1.3.59",
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https;branch=master", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))", "")
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https;branch=master", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
: "3.82+dbg0.9",
|
||||
("sysdig", "git://github.com/draios/sysdig.git;branch=dev;protocol=https", "4fb6288275f567f63515df0ff0a6518043ecfa9b", r"^(?P<pver>\d+(\.\d+)+)", "10.0.0")
|
||||
: "0.28.0",
|
||||
}
|
||||
|
||||
test_wget_uris = {
|
||||
@@ -1453,9 +1401,6 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
# http://www.cmake.org/files/v2.8/cmake-2.8.12.1.tar.gz
|
||||
("cmake", "/files/v2.8/cmake-2.8.12.1.tar.gz", "", "")
|
||||
: "2.8.12.1",
|
||||
# https://download.gnome.org/sources/libxml2/2.9/libxml2-2.9.14.tar.xz
|
||||
("libxml2", "/software/libxml2/2.9/libxml2-2.9.14.tar.xz", "", "")
|
||||
: "2.10.3",
|
||||
#
|
||||
# packages with versions only in current directory
|
||||
#
|
||||
@@ -1505,13 +1450,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
|
||||
r = bb.utils.vercmp_string(v, verstring)
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
|
||||
if k[4]:
|
||||
r = bb.utils.vercmp_string(verstring, k[4])
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], verstring, k[4]))
|
||||
|
||||
def test_wget_latest_versionstring(self):
|
||||
testdata = os.path.dirname(os.path.abspath(__file__)) + "/fetch-testdata"
|
||||
server = HTTPService(testdata, host="127.0.0.1")
|
||||
server = HTTPService(testdata)
|
||||
server.start()
|
||||
port = server.port
|
||||
try:
|
||||
@@ -1519,10 +1461,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.d.setVar("PN", k[0])
|
||||
checkuri = ""
|
||||
if k[2]:
|
||||
checkuri = "http://127.0.0.1:%s/" % port + k[2]
|
||||
checkuri = "http://localhost:%s/" % port + k[2]
|
||||
self.d.setVar("UPSTREAM_CHECK_URI", checkuri)
|
||||
self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
|
||||
url = "http://127.0.0.1:%s/" % port + k[1]
|
||||
url = "http://localhost:%s/" % port + k[1]
|
||||
ud = bb.fetch2.FetchData(url, self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
@@ -1709,7 +1651,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0')
|
||||
self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
|
||||
self.d.setVar("__BBSRCREV_SEEN", "1")
|
||||
self.d.setVar("__BBSEENSRCREV", "1")
|
||||
|
||||
def assertRefs(self, expected_refs, cwd=None):
|
||||
if cwd is None:
|
||||
@@ -2244,7 +2186,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.d.setVar('SRCREV', 'e5939ff608b95cdd4d0ab0e1935781ab9a276ac0')
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
|
||||
fetcher = bb.fetch.Fetch(["git://git.yoctoproject.org/fstests;branch=master;protocol=https"], self.d)
|
||||
fetcher = bb.fetch.Fetch(["git://git.yoctoproject.org/fstests;branch=master"], self.d)
|
||||
fetcher.download()
|
||||
|
||||
bb.utils.remove(self.dldir + "/*.tar.gz")
|
||||
@@ -2254,12 +2196,6 @@ class GitShallowTest(FetcherTest):
|
||||
self.assertIn("fstests.doap", dir)
|
||||
|
||||
class GitLfsTest(FetcherTest):
|
||||
def skipIfNoGitLFS():
|
||||
import shutil
|
||||
if not shutil.which('git-lfs'):
|
||||
return unittest.skip('git-lfs not installed')
|
||||
return lambda f: f
|
||||
|
||||
def setUp(self):
|
||||
FetcherTest.setUp(self)
|
||||
|
||||
@@ -2273,18 +2209,14 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
self.d.setVar('SRCREV', '${AUTOREV}')
|
||||
self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}')
|
||||
self.d.setVar("__BBSRCREV_SEEN", "1")
|
||||
self.d.setVar("__BBSEENSRCREV", "1")
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git_init(cwd=self.srcdir)
|
||||
self.commit_file('.gitattributes', '*.mp3 filter=lfs -text')
|
||||
|
||||
def commit_file(self, filename, content):
|
||||
with open(os.path.join(self.srcdir, filename), "w") as f:
|
||||
f.write(content)
|
||||
self.git(["add", filename], cwd=self.srcdir)
|
||||
self.git(["commit", "-m", "Change"], cwd=self.srcdir)
|
||||
return self.git(["rev-parse", "HEAD"], cwd=self.srcdir).strip()
|
||||
with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
|
||||
attrs.write('*.mp3 filter=lfs -text')
|
||||
self.git(['add', '.gitattributes'], cwd=self.srcdir)
|
||||
self.git(['commit', '-m', "attributes", '.gitattributes'], cwd=self.srcdir)
|
||||
|
||||
def fetch(self, uri=None, download=True):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
@@ -2297,148 +2229,55 @@ class GitLfsTest(FetcherTest):
|
||||
ud = fetcher.ud[uri]
|
||||
return fetcher, ud
|
||||
|
||||
def get_real_git_lfs_file(self):
|
||||
self.d.setVar('PATH', os.environ.get('PATH'))
|
||||
fetcher, ud = self.fetch()
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
unpacked_lfs_file = os.path.join(self.d.getVar('WORKDIR'), 'git', "Cat_poster_1.jpg")
|
||||
return unpacked_lfs_file
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_fetch_lfs_on_srcrev_change(self):
|
||||
"""Test if fetch downloads missing LFS objects when a different revision within an existing repository is requested"""
|
||||
self.git(["lfs", "install", "--local"], cwd=self.srcdir)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def hide_upstream_repository():
|
||||
"""Hide the upstream repository to make sure that git lfs cannot pull from it"""
|
||||
temp_name = self.srcdir + ".bak"
|
||||
os.rename(self.srcdir, temp_name)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.rename(temp_name, self.srcdir)
|
||||
|
||||
def fetch_and_verify(revision, filename, content):
|
||||
self.d.setVar('SRCREV', revision)
|
||||
fetcher, ud = self.fetch()
|
||||
|
||||
with hide_upstream_repository():
|
||||
workdir = self.d.getVar('WORKDIR')
|
||||
fetcher.unpack(workdir)
|
||||
|
||||
with open(os.path.join(workdir, "git", filename)) as f:
|
||||
self.assertEqual(f.read(), content)
|
||||
|
||||
commit_1 = self.commit_file("a.mp3", "version 1")
|
||||
commit_2 = self.commit_file("a.mp3", "version 2")
|
||||
|
||||
self.d.setVar('SRC_URI', "git://%s;protocol=file;lfs=1;branch=master" % self.srcdir)
|
||||
|
||||
# Seed the local download folder by fetching the latest commit and verifying that the LFS contents are
|
||||
# available even when the upstream repository disappears.
|
||||
fetch_and_verify(commit_2, "a.mp3", "version 2")
|
||||
# Verify that even when an older revision is fetched, the needed LFS objects are fetched into the download
|
||||
# folder.
|
||||
fetch_and_verify(commit_1, "a.mp3", "version 1")
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds_without_lfs_param(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master")
|
||||
f = self.get_real_git_lfs_file()
|
||||
self.assertTrue(os.path.exists(f))
|
||||
self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=1")
|
||||
f = self.get_real_git_lfs_file()
|
||||
self.assertTrue(os.path.exists(f))
|
||||
self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_skips(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=0")
|
||||
f = self.get_real_git_lfs_file()
|
||||
# This is the actual non-smudged placeholder file on the repo if git-lfs does not run
|
||||
lfs_file = (
|
||||
'version https://git-lfs.github.com/spec/v1\n'
|
||||
'oid sha256:34be66b1a39a1955b46a12588df9d5f6fc1da790e05cf01f3c7422f4bbbdc26b\n'
|
||||
'size 11423554\n'
|
||||
)
|
||||
|
||||
with open(f) as fh:
|
||||
self.assertEqual(lfs_file, fh.read())
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_lfs_enabled(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# With git-lfs installed, test that we can fetch and unpack
|
||||
fetcher, ud = self.fetch()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
# Careful: suppress initial attempt at downloading until
|
||||
# we know whether git-lfs is installed.
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. Only
|
||||
# attempt this with the real live copy of git-lfs installed.
|
||||
if ud.method._find_git_lfs(self.d):
|
||||
fetcher.download()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_lfs_disabled(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Verify that the fetcher can survive even if the source
|
||||
# In contrast to test_lfs_enabled(), allow the implicit download
|
||||
# done by self.fetch() to occur here. The point of this test case
|
||||
# is to verify that the fetcher can survive even if the source
|
||||
# repository has Git LFS usage configured.
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
def test_lfs_enabled_not_installed(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
|
||||
# Artificially assert that git-lfs is not installed, so
|
||||
# we can verify a failure to unpack in it's absence.
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
finally:
|
||||
ud.method._find_git_lfs = old_find_git_lfs
|
||||
|
||||
def test_lfs_disabled_not_installed(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
|
||||
# Artificially assert that git-lfs is not installed, so
|
||||
# we can verify a failure to unpack in it's absence.
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# Even if git-lfs cannot be found, the unpack should be successful
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
finally:
|
||||
ud.method._find_git_lfs = old_find_git_lfs
|
||||
# If git-lfs cannot be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
class GitURLWithSpacesTest(FetcherTest):
|
||||
test_git_urls = {
|
||||
@@ -2483,13 +2322,6 @@ class CrateTest(FetcherTest):
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "glob-0.2.11")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate")
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.tempdir)
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked'])
|
||||
@@ -2497,55 +2329,6 @@ class CrateTest(FetcherTest):
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/.cargo-checksum.json"))
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_url_matching_recipe(self):
|
||||
|
||||
self.d.setVar('BP', 'glob-0.2.11')
|
||||
|
||||
uri = "crate://crates.io/glob/0.2.11"
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "glob-0.2.11")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate")
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.tempdir)
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir)), ['download', 'glob-0.2.11', 'unpacked'])
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['glob-0.2.11.crate', 'glob-0.2.11.crate.done'])
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/glob-0.2.11/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_url_params(self):
|
||||
|
||||
uri = "crate://crates.io/aho-corasick/0.7.20;name=aho-corasick-renamed"
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "aho-corasick-renamed")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "aho-corasick-0.7.20.crate")
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.tempdir)
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked'])
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['aho-corasick-0.7.20.crate', 'aho-corasick-0.7.20.crate.done'])
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/aho-corasick-0.7.20/.cargo-checksum.json"))
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/aho-corasick-0.7.20/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_url_multi(self):
|
||||
|
||||
@@ -2556,19 +2339,6 @@ class CrateTest(FetcherTest):
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "glob-0.2.11")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate")
|
||||
|
||||
ud = fetcher.ud[fetcher.urls[1]]
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "time-0.1.35")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "time-0.1.35.crate")
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.tempdir)
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked'])
|
||||
@@ -2578,18 +2348,6 @@ class CrateTest(FetcherTest):
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/time-0.1.35/.cargo-checksum.json"))
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/time-0.1.35/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_incorrect_cksum(self):
|
||||
uri = "crate://crates.io/aho-corasick/0.7.20"
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
self.d.setVarFlag("SRC_URI", "aho-corasick-0.7.20.sha256sum", hashlib.sha256("Invalid".encode("utf-8")).hexdigest())
|
||||
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
with self.assertRaisesRegex(bb.fetch2.FetchError, "Fetcher failure for URL"):
|
||||
fetcher.download()
|
||||
|
||||
class NPMTest(FetcherTest):
|
||||
def skipIfNoNpm():
|
||||
import shutil
|
||||
@@ -2851,45 +2609,6 @@ class NPMTest(FetcherTest):
|
||||
self.assertTrue(os.path.exists(os.path.join(self.sdir, 'node_modules', 'array-flatten', 'node_modules', 'content-type', 'package.json')))
|
||||
self.assertTrue(os.path.exists(os.path.join(self.sdir, 'node_modules', 'array-flatten', 'node_modules', 'content-type', 'node_modules', 'cookie', 'package.json')))
|
||||
|
||||
@skipIfNoNpm()
|
||||
@skipIfNoNetwork()
|
||||
def test_npmsw_git(self):
|
||||
swfile = self.create_shrinkwrap_file({
|
||||
'dependencies': {
|
||||
'cookie': {
|
||||
'version': 'github:jshttp/cookie.git#aec1177c7da67e3b3273df96cf476824dbc9ae09',
|
||||
'from': 'github:jshttp/cookie.git'
|
||||
}
|
||||
}
|
||||
})
|
||||
fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d)
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, 'git2', 'github.com.jshttp.cookie.git')))
|
||||
|
||||
swfile = self.create_shrinkwrap_file({
|
||||
'dependencies': {
|
||||
'cookie': {
|
||||
'version': 'jshttp/cookie.git#aec1177c7da67e3b3273df96cf476824dbc9ae09',
|
||||
'from': 'jshttp/cookie.git'
|
||||
}
|
||||
}
|
||||
})
|
||||
fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d)
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, 'git2', 'github.com.jshttp.cookie.git')))
|
||||
|
||||
swfile = self.create_shrinkwrap_file({
|
||||
'dependencies': {
|
||||
'nodejs': {
|
||||
'version': 'gitlab:gitlab-examples/nodejs.git#892a1f16725e56cc3a2cb0d677be42935c8fc262',
|
||||
'from': 'gitlab:gitlab-examples/nodejs'
|
||||
}
|
||||
}
|
||||
})
|
||||
fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d)
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, 'git2', 'gitlab.com.gitlab-examples.nodejs.git')))
|
||||
|
||||
@skipIfNoNpm()
|
||||
@skipIfNoNetwork()
|
||||
def test_npmsw_dev(self):
|
||||
@@ -3097,9 +2816,9 @@ class NPMTest(FetcherTest):
|
||||
class GitSharedTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitSharedTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
self.d.setVar("__BBSRCREV_SEEN", "1")
|
||||
self.d.setVar("__BBSEENSRCREV", "1")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_shared_unpack(self):
|
||||
@@ -3130,59 +2849,31 @@ class FetchPremirroronlyLocalTest(FetcherTest):
|
||||
os.mkdir(self.mirrordir)
|
||||
self.reponame = "bitbake"
|
||||
self.gitdir = os.path.join(self.tempdir, "git", self.reponame)
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.fake.repo/bitbake"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz"
|
||||
self.mirrorfile = os.path.join(self.mirrordir, self.mirrorname)
|
||||
self.testfilename = "bitbake-fetch.test"
|
||||
|
||||
def make_git_repo(self):
|
||||
self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz"
|
||||
recipeurl = "git:/git.fake.repo/bitbake"
|
||||
os.makedirs(self.gitdir)
|
||||
self.git_init(cwd=self.gitdir)
|
||||
self.git("init", self.gitdir)
|
||||
for i in range(0):
|
||||
self.git_new_commit()
|
||||
bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir)
|
||||
|
||||
def git_new_commit(self):
|
||||
import random
|
||||
testfilename = "bibake-fetch.test"
|
||||
os.unlink(os.path.join(self.mirrordir, self.mirrorname))
|
||||
branch = self.git("branch --show-current", self.gitdir).split()
|
||||
with open(os.path.join(self.gitdir, self.testfilename), "w") as testfile:
|
||||
testfile.write("File {} from branch {}; Useless random data {}".format(self.testfilename, branch, random.random()))
|
||||
self.git("add {}".format(self.testfilename), self.gitdir)
|
||||
self.git("commit -a -m \"This random commit {} in branch {}. I'm useless.\"".format(random.random(), branch), self.gitdir)
|
||||
with open(os.path.join(self.gitdir, testfilename), "w") as testfile:
|
||||
testfile.write("Useless random data {}".format(random.random()))
|
||||
self.git("add {}".format(testfilename), self.gitdir)
|
||||
self.git("commit -a -m \"This random commit {}. I'm useless.\"".format(random.random()), self.gitdir)
|
||||
bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir)
|
||||
return self.git("rev-parse HEAD", self.gitdir).strip()
|
||||
|
||||
def git_new_branch(self, name):
|
||||
self.git_new_commit()
|
||||
head = self.git("rev-parse HEAD", self.gitdir).strip()
|
||||
self.git("checkout -b {}".format(name), self.gitdir)
|
||||
newrev = self.git_new_commit()
|
||||
self.git("checkout {}".format(head), self.gitdir)
|
||||
return newrev
|
||||
|
||||
def test_mirror_multiple_fetches(self):
|
||||
self.make_git_repo()
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
## New commit in premirror. it's not in the download_dir
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher2 = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher2.download()
|
||||
fetcher2.unpack(self.unpackdir)
|
||||
## New commit in premirror. it's not in the download_dir
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher3 = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher3.download()
|
||||
fetcher3.unpack(self.unpackdir)
|
||||
|
||||
|
||||
def test_mirror_commit_nonexistent(self):
|
||||
self.make_git_repo()
|
||||
self.d.setVar("SRCREV", "0"*40)
|
||||
@@ -3203,59 +2894,6 @@ class FetchPremirroronlyLocalTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
def test_mirror_tarball_multiple_branches(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
both branches have required revisions
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1,testbranch2;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", branch2rev)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
self.assertTrue(os.path.exists(self.mirrorfile), "Mirror file doesn't exist")
|
||||
fetcher.download()
|
||||
fetcher.unpack(os.path.join(self.tempdir, "unpacked"))
|
||||
unpacked = os.path.join(self.tempdir, "unpacked", "git", self.testfilename)
|
||||
self.assertTrue(os.path.exists(unpacked), "Repo has not been unpackaged properly!")
|
||||
with open(unpacked, 'r') as f:
|
||||
content = f.read()
|
||||
## We expect to see testbranch1 in the file, not master, not testbranch2
|
||||
self.assertTrue(content.find("testbranch1") != -1, "Wrong branch has been checked out!")
|
||||
|
||||
def test_mirror_tarball_multiple_branches_nobranch(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
Unbalanced name/branches raises ParameterError
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", branch2rev)
|
||||
with self.assertRaises(bb.fetch2.ParameterError):
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
def test_mirror_tarball_multiple_branches_norev(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
one of the branches specifies non existing SRCREV
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1,testbranch2;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", "0"*40)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
self.assertTrue(os.path.exists(self.mirrorfile), "Mirror file doesn't exist")
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
|
||||
class FetchPremirroronlyNetworkTest(FetcherTest):
|
||||
|
||||
def setUp(self):
|
||||
@@ -3265,7 +2903,7 @@ class FetchPremirroronlyNetworkTest(FetcherTest):
|
||||
self.reponame = "fstests"
|
||||
self.clonedir = os.path.join(self.tempdir, "git")
|
||||
self.gitdir = os.path.join(self.tempdir, "git", "{}.git".format(self.reponame))
|
||||
self.recipe_url = "git://git.yoctoproject.org/fstests;protocol=https"
|
||||
self.recipe_url = "git://git.yoctoproject.org/fstests"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "0")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
@@ -3295,50 +2933,6 @@ class FetchPremirroronlyNetworkTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
class FetchPremirroronlyMercurialTest(FetcherTest):
|
||||
""" Test for premirrors with mercurial repos
|
||||
the test covers also basic hg:// clone (see fetch_and_create_tarball
|
||||
"""
|
||||
def skipIfNoHg():
|
||||
import shutil
|
||||
if not shutil.which('hg'):
|
||||
return unittest.skip('Mercurial not installed')
|
||||
return lambda f: f
|
||||
|
||||
def setUp(self):
|
||||
super(FetchPremirroronlyMercurialTest, self).setUp()
|
||||
self.mirrordir = os.path.join(self.tempdir, "mirrors")
|
||||
os.mkdir(self.mirrordir)
|
||||
self.reponame = "libgnt"
|
||||
self.clonedir = os.path.join(self.tempdir, "hg")
|
||||
self.recipe_url = "hg://keep.imfreedom.org/libgnt;module=libgnt"
|
||||
self.d.setVar("SRCREV", "53e8b422faaf")
|
||||
self.mirrorname = "hg_libgnt_keep.imfreedom.org_.libgnt.tar.gz"
|
||||
|
||||
def fetch_and_create_tarball(self):
|
||||
"""
|
||||
Ask bitbake to download repo and prepare mirror tarball for us
|
||||
"""
|
||||
self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1")
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher.download()
|
||||
mirrorfile = os.path.join(self.d.getVar("DL_DIR"), self.mirrorname)
|
||||
self.assertTrue(os.path.exists(mirrorfile), "Mirror tarball {} has not been created".format(mirrorfile))
|
||||
## moving tarball to mirror directory
|
||||
os.rename(mirrorfile, os.path.join(self.mirrordir, self.mirrorname))
|
||||
self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "0")
|
||||
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@skipIfNoHg()
|
||||
def test_premirror_mercurial(self):
|
||||
self.fetch_and_create_tarball()
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
|
||||
def setUp(self):
|
||||
@@ -3347,7 +2941,7 @@ class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
os.mkdir(self.mirrordir)
|
||||
self.reponame = "bitbake"
|
||||
self.gitdir = os.path.join(self.tempdir, "git", self.reponame)
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;protocol=https"
|
||||
self.recipe_url = "git://git.fake.repo/bitbake"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
@@ -3359,7 +2953,7 @@ class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
import sys
|
||||
self.d.setVar("SRCREV", "0"*40)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
with self.assertRaises(bb.fetch2.FetchError), self.assertLogs() as logs:
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
output = "".join(logs.output)
|
||||
self.assertFalse(" not a git repository (or any parent up to mount point /)" in output)
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertFalse(" not a git repository (or any parent up to mount point /)" in stdout)
|
||||
|
||||
@@ -186,16 +186,14 @@ deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
import sys
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in output)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in output)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in output)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in output)
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in stdout)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in stdout)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
@@ -220,124 +218,3 @@ VAR = " \\
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
at_sign_in_var_flag = """
|
||||
A[flag@.service] = "nonet"
|
||||
B[flag@.target] = "ntb"
|
||||
C[f] = "flag"
|
||||
|
||||
unset A[flag@.service]
|
||||
"""
|
||||
def test_parse_at_sign_in_var_flag(self):
|
||||
f = self.parsehelper(self.at_sign_in_var_flag)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), None)
|
||||
self.assertEqual(d.getVar("B"), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag@.service"), None)
|
||||
self.assertEqual(d.getVarFlag("B","flag@.target"), "ntb")
|
||||
self.assertEqual(d.getVarFlag("C","f"), "flag")
|
||||
|
||||
def test_parse_invalid_at_sign_in_var_flag(self):
|
||||
invalid_at_sign = self.at_sign_in_var_flag.replace("B[f", "B[@f")
|
||||
f = self.parsehelper(invalid_at_sign)
|
||||
with self.assertRaises(bb.parse.ParseError):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
export_function_recipe = """
|
||||
inherit someclass
|
||||
"""
|
||||
|
||||
export_function_recipe2 = """
|
||||
inherit someclass
|
||||
|
||||
do_compile () {
|
||||
false
|
||||
}
|
||||
|
||||
python do_compilepython () {
|
||||
bb.note("Something else")
|
||||
}
|
||||
|
||||
"""
|
||||
export_function_class = """
|
||||
someclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python someclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
export_function_class2 = """
|
||||
secondclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python secondclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
def test_parse_export_functions(self):
|
||||
def check_function_flags(d):
|
||||
self.assertEqual(d.getVarFlag("do_compile", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compile", "python"), None)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "python"), "1")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
recipename = tempdir + "/recipe.bb"
|
||||
os.makedirs(tempdir + "/classes")
|
||||
with open(tempdir + "/classes/someclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class)
|
||||
f.flush()
|
||||
with open(tempdir + "/classes/secondclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class2)
|
||||
f.flush()
|
||||
|
||||
with open(recipename, "w") as f:
|
||||
f.write(self.export_function_recipe)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
recipename2 = tempdir + "/recipe2.bb"
|
||||
with open(recipename2, "w") as f:
|
||||
f.write(self.export_function_recipe2)
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
with open(recipename, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
with open(recipename2, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("secondclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("secondclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
|
||||
@@ -288,7 +288,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BB_SIGNATURE_HANDLER" : "basichash",
|
||||
"BB_SIGNATURE_HANDLER" : "TestMulticonfigDepends",
|
||||
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
|
||||
}
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
|
||||
@@ -17,12 +17,75 @@ import bb.siggen
|
||||
|
||||
class SiggenTest(unittest.TestCase):
|
||||
|
||||
def test_build_pnid(self):
|
||||
tests = {
|
||||
('', 'helloworld', 'do_sometask') : 'helloworld:do_sometask',
|
||||
('XX', 'helloworld', 'do_sometask') : 'mc:XX:helloworld:do_sometask',
|
||||
}
|
||||
def test_clean_basepath_simple_target_basepath(self):
|
||||
basepath = '/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
|
||||
for t in tests:
|
||||
self.assertEqual(bb.siggen.build_pnid(*t), tests[t])
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_basic_virtual_basepath(self):
|
||||
basepath = 'virtual:something:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_basepath(self):
|
||||
basepath = 'mc:somemachine:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:mc:somemachine'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_virtual_long_prefix_basepath(self):
|
||||
basepath = 'virtual:something:A:B:C:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:A:B:C'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_virtual_basepath(self):
|
||||
basepath = 'mc:somemachine:virtual:something:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:mc:somemachine'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_virtual_long_prefix_basepath(self):
|
||||
basepath = 'mc:X:virtual:something:C:B:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:C:B:A:mc:X'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
|
||||
# def test_clean_basepath_performance(self):
|
||||
# input_basepaths = [
|
||||
# 'mc:X:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'mc:X:virtual:something:C:B:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'virtual:something:C:B:A:/different/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'virtual:something:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/this/is/most/common/input/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/and/should/be/tested/with/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/more/weight/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# ]
|
||||
|
||||
# time_start = time.time()
|
||||
|
||||
# i = 2000000
|
||||
# while i >= 0:
|
||||
# for basepath in input_basepaths:
|
||||
# bb.siggen.clean_basepath(basepath)
|
||||
# i -= 1
|
||||
|
||||
# elapsed = time.time() - time_start
|
||||
# print('{} ({}s)'.format(self.id(), round(elapsed, 3)))
|
||||
|
||||
# self.assertTrue(False)
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
import re
|
||||
from collections import OrderedDict, defaultdict
|
||||
@@ -325,11 +324,11 @@ class Tinfoil:
|
||||
self.recipes_parsed = False
|
||||
self.quiet = 0
|
||||
self.oldhandlers = self.logger.handlers[:]
|
||||
self.localhandlers = []
|
||||
if setup_logging:
|
||||
# This is the *client-side* logger, nothing to do with
|
||||
# logging messages from the server
|
||||
bb.msg.logger_create('BitBake', output)
|
||||
self.localhandlers = []
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.localhandlers.append(handler)
|
||||
@@ -449,12 +448,6 @@ class Tinfoil:
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
def modified_files(self):
|
||||
"""
|
||||
Notify the server it needs to revalidate it's caches since the client has modified files
|
||||
"""
|
||||
self.run_command("revalidateCaches")
|
||||
|
||||
def run_command(self, command, *params, handle_events=True):
|
||||
"""
|
||||
Run a command on the server (as implemented in bb.command).
|
||||
@@ -736,7 +729,6 @@ class Tinfoil:
|
||||
|
||||
ret = self.run_command('buildTargets', targets, task)
|
||||
if handle_events:
|
||||
lastevent = time.time()
|
||||
result = False
|
||||
# Borrowed from knotty, instead somewhat hackily we use the helper
|
||||
# as the object to store "shutdown" on
|
||||
@@ -749,7 +741,6 @@ class Tinfoil:
|
||||
try:
|
||||
event = self.wait_event(0.25)
|
||||
if event:
|
||||
lastevent = time.time()
|
||||
if event_callback and event_callback(event):
|
||||
continue
|
||||
if helper.eventHandler(event):
|
||||
@@ -782,7 +773,7 @@ class Tinfoil:
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
result = True
|
||||
break
|
||||
if isinstance(event, (bb.command.CommandFailed, bb.command.CommandExit)):
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
@@ -794,13 +785,10 @@ class Tinfoil:
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
|
||||
elif helper.shutdown > 1:
|
||||
break
|
||||
termfilter.updateFooter()
|
||||
if time.time() > (lastevent + (3*60)):
|
||||
if not self.run_command('ping', handle_events=False):
|
||||
print("\nUnable to ping server and no events, closing down...\n")
|
||||
return False
|
||||
except KeyboardInterrupt:
|
||||
termfilter.clearFooter()
|
||||
if helper.shutdown == 1:
|
||||
|
||||
@@ -1746,6 +1746,7 @@ class BuildInfoHelper(object):
|
||||
|
||||
buildname = self.server.runCommand(['getVariable', 'BUILDNAME'])[0]
|
||||
machine = self.server.runCommand(['getVariable', 'MACHINE'])[0]
|
||||
image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0]
|
||||
|
||||
# location of the manifest files for this build;
|
||||
# note that this file is only produced if an image is produced
|
||||
@@ -1766,18 +1767,6 @@ class BuildInfoHelper(object):
|
||||
# filter out anything which isn't an image target
|
||||
image_targets = [target for target in targets if target.is_image]
|
||||
|
||||
if len(image_targets) > 0:
|
||||
#if there are image targets retrieve image_name
|
||||
image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0]
|
||||
if not image_name:
|
||||
#When build target is an image and image_name is not found as an environment variable
|
||||
logger.info("IMAGE_NAME not found, extracting from bitbake command")
|
||||
cmd = self.server.runCommand(['getVariable','BB_CMDLINE'])[0]
|
||||
#filter out tokens that are command line options
|
||||
cmd = [token for token in cmd if not token.startswith('-')]
|
||||
image_name = cmd[1].split(':', 1)[0] # remove everything after : in image name
|
||||
logger.info("IMAGE_NAME found as : %s " % image_name)
|
||||
|
||||
for image_target in image_targets:
|
||||
# this is set to True if we find at least one file relating to
|
||||
# this target; if this remains False after the scan, we copy the
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# This file re-uses code spread throughout other Bitbake source files.
|
||||
# As such, all other copyrights belong to their own right holders.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
decodedline = json.loads(line)
|
||||
if 'allvariables' in decodedline:
|
||||
self.variables = decodedline['allvariables']
|
||||
return
|
||||
if not 'vars' in decodedline:
|
||||
raise ValueError
|
||||
event_str = decodedline['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
@@ -179,7 +179,7 @@ class TerminalFilter(object):
|
||||
new[3] = new[3] & ~termios.ECHO
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, new)
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 2 and os.environ.get('NO_COLOR', '') == '':
|
||||
if curses.tigetnum("colors") > 2:
|
||||
for h in handlers:
|
||||
try:
|
||||
h.formatter.enable_color()
|
||||
@@ -420,11 +420,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
except bb.BBHandledException:
|
||||
drain_events_errorhandling(eventHandler)
|
||||
return 1
|
||||
except Exception as e:
|
||||
# bitbake-server comms failure
|
||||
early_logger = bb.msg.logger_create('bitbake', sys.stdout)
|
||||
early_logger.fatal("Attempting to set server environment: %s", e)
|
||||
return 1
|
||||
|
||||
if params.options.quiet == 0:
|
||||
console_loglevel = loglevel
|
||||
@@ -590,12 +585,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return
|
||||
|
||||
llevel, debug_domains = bb.msg.constructLogOptions()
|
||||
try:
|
||||
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Attempting to set event mask: %s", e)
|
||||
return 1
|
||||
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
|
||||
|
||||
# The logging_tree module is *extremely* helpful in debugging logging
|
||||
# domains. Uncomment here to dump the logging tree when bitbake starts
|
||||
@@ -604,11 +594,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
universe = False
|
||||
if not params.observe_only:
|
||||
try:
|
||||
params.updateFromServer(server)
|
||||
except Exception as e:
|
||||
logger.fatal("Fetching command line: %s", e)
|
||||
return 1
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
@@ -619,12 +605,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if cmdline['action'][0] == "buildTargets" and "universe" in cmdline['action'][1]:
|
||||
universe = True
|
||||
|
||||
try:
|
||||
ret, error = server.runCommand(cmdline['action'])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Command '{}' failed: %s".format(cmdline), e)
|
||||
return 1
|
||||
ret, error = server.runCommand(cmdline['action'])
|
||||
if error:
|
||||
logger.error("Command '%s' failed: %s" % (cmdline, error))
|
||||
return 1
|
||||
@@ -644,38 +625,25 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
printintervaldelta = 10 * 60 # 10 minutes
|
||||
printinterval = printintervaldelta
|
||||
pinginterval = 1 * 60 # 1 minute
|
||||
lastevent = lastprint = time.time()
|
||||
lastprint = time.time()
|
||||
|
||||
termfilter = tf(main, helper, console_handlers, params.options.quiet)
|
||||
atexit.register(termfilter.finish)
|
||||
|
||||
# shutdown levels
|
||||
# 0 - normal operation
|
||||
# 1 - no new task execution, let current running tasks finish
|
||||
# 2 - interrupting currently executing tasks
|
||||
# 3 - we're done, exit
|
||||
while main.shutdown < 3:
|
||||
while True:
|
||||
try:
|
||||
if (lastprint + printinterval) <= time.time():
|
||||
termfilter.keepAlive(printinterval)
|
||||
printinterval += printintervaldelta
|
||||
event = eventHandler.waitEvent(0)
|
||||
if event is None:
|
||||
if (lastevent + pinginterval) <= time.time():
|
||||
ret, error = server.runCommand(["ping"])
|
||||
if error or not ret:
|
||||
termfilter.clearFooter()
|
||||
print("No reply after pinging server (%s, %s), exiting." % (str(error), str(ret)))
|
||||
return_value = 3
|
||||
main.shutdown = 3
|
||||
lastevent = time.time()
|
||||
if main.shutdown > 1:
|
||||
break
|
||||
if not parseprogress:
|
||||
termfilter.updateFooter()
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if event is None:
|
||||
continue
|
||||
lastevent = time.time()
|
||||
helper.eventHandler(event)
|
||||
if isinstance(event, bb.runqueue.runQueueExitWait):
|
||||
if not main.shutdown:
|
||||
@@ -780,15 +748,15 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if event.error:
|
||||
errors = errors + 1
|
||||
logger.error(str(event))
|
||||
main.shutdown = 3
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
if not return_value:
|
||||
return_value = event.exitcode
|
||||
main.shutdown = 3
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)):
|
||||
main.shutdown = 3
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, bb.event.MultipleProviders):
|
||||
logger.info(str(event))
|
||||
@@ -873,26 +841,15 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
logger.error("Unknown event: %s", event)
|
||||
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Executing event: %s", e)
|
||||
return_value = 1
|
||||
errors = errors + 1
|
||||
main.shutdown = 3
|
||||
except EnvironmentError as ioerror:
|
||||
termfilter.clearFooter()
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
continue
|
||||
sys.stderr.write(str(ioerror))
|
||||
main.shutdown = 2
|
||||
if not params.observe_only:
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Unable to force shutdown: %s", e)
|
||||
main.shutdown = 3
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
main.shutdown = 2
|
||||
except KeyboardInterrupt:
|
||||
termfilter.clearFooter()
|
||||
if params.observe_only:
|
||||
@@ -901,13 +858,9 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
def state_force_shutdown():
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly stop: %s" % error)
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Unable to cleanly stop: %s", e)
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly stop: %s" % error)
|
||||
|
||||
if not params.observe_only and main.shutdown == 1:
|
||||
state_force_shutdown()
|
||||
@@ -920,9 +873,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly shutdown: %s" % error)
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Unable to cleanly shutdown: %s", e)
|
||||
except KeyboardInterrupt:
|
||||
state_force_shutdown()
|
||||
|
||||
@@ -930,14 +880,9 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
except Exception as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
main.shutdown = 2
|
||||
if not params.observe_only:
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Unable to force shutdown: %s", e)
|
||||
main.shudown = 3
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
main.shutdown = 2
|
||||
return_value = 1
|
||||
try:
|
||||
termfilter.clearFooter()
|
||||
|
||||
@@ -227,9 +227,6 @@ class NCursesUI:
|
||||
shutdown = 0
|
||||
|
||||
try:
|
||||
if not params.observe_only:
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
|
||||
@@ -177,7 +177,7 @@ class gtkthread(threading.Thread):
|
||||
quit = threading.Event()
|
||||
def __init__(self, shutdown):
|
||||
threading.Thread.__init__(self)
|
||||
self.daemon = True
|
||||
self.setDaemon(True)
|
||||
self.shutdown = shutdown
|
||||
if not Gtk.init_check()[0]:
|
||||
sys.stderr.write("Gtk+ init failed. Make sure DISPLAY variable is set.\n")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -385,7 +385,7 @@ def main(server, eventHandler, params):
|
||||
main.shutdown = 1
|
||||
|
||||
logger.info("ToasterUI build done, brbe: %s", brbe)
|
||||
break
|
||||
continue
|
||||
|
||||
if isinstance(event, (bb.command.CommandCompleted,
|
||||
bb.command.CommandFailed,
|
||||
|
||||
@@ -65,27 +65,35 @@ class BBUIEventQueue:
|
||||
self.server = server
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.daemon = True
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
if not self.eventQueue:
|
||||
return None
|
||||
item = self.eventQueue.pop(0)
|
||||
if not self.eventQueue:
|
||||
self.eventQueueNotify.clear()
|
||||
return item
|
||||
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if not self.eventQueue:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if not self.eventQueue:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
self.eventQueueNotify.wait(delay)
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
with bb.utils.lock_timeout(self.eventQueueLock):
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
|
||||
@@ -13,7 +13,6 @@ import errno
|
||||
import logging
|
||||
import bb
|
||||
import bb.msg
|
||||
import locale
|
||||
import multiprocessing
|
||||
import fcntl
|
||||
import importlib
|
||||
@@ -50,7 +49,7 @@ def clean_context():
|
||||
|
||||
def get_context():
|
||||
return _context
|
||||
|
||||
|
||||
|
||||
def set_context(ctx):
|
||||
_context = ctx
|
||||
@@ -212,8 +211,8 @@ def explode_dep_versions2(s, *, sort=True):
|
||||
inversion = True
|
||||
# This list is based on behavior and supported comparisons from deb, opkg and rpm.
|
||||
#
|
||||
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
|
||||
# we list each possibly valid item.
|
||||
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
|
||||
# we list each possibly valid item.
|
||||
# The build system is responsible for validation of what it supports.
|
||||
if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
|
||||
lastcmp = i[0:2]
|
||||
@@ -347,7 +346,7 @@ def _print_exception(t, value, tb, realfile, text, context):
|
||||
exception = traceback.format_exception_only(t, value)
|
||||
error.append('Error executing a python function in %s:\n' % realfile)
|
||||
|
||||
# Strip 'us' from the stack (better_exec call) unless that was where the
|
||||
# Strip 'us' from the stack (better_exec call) unless that was where the
|
||||
# error came from
|
||||
if tb.tb_next is not None:
|
||||
tb = tb.tb_next
|
||||
@@ -604,25 +603,11 @@ def preserved_envvars():
|
||||
v = [
|
||||
'BBPATH',
|
||||
'BB_PRESERVE_ENV',
|
||||
'BB_ENV_PASSTHROUGH',
|
||||
'BB_ENV_PASSTHROUGH_ADDITIONS',
|
||||
]
|
||||
return v + preserved_envvars_exported()
|
||||
|
||||
def check_system_locale():
|
||||
"""Make sure the required system locale are available and configured"""
|
||||
default_locale = locale.getlocale(locale.LC_CTYPE)
|
||||
|
||||
try:
|
||||
locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8"))
|
||||
except:
|
||||
sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system")
|
||||
else:
|
||||
locale.setlocale(locale.LC_CTYPE, default_locale)
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n"
|
||||
"Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
def filter_environment(good_vars):
|
||||
"""
|
||||
Create a pristine environment for bitbake. This will remove variables that
|
||||
@@ -745,9 +730,9 @@ def prunedir(topdir, ionice=False):
|
||||
# but thats possibly insane and suffixes is probably going to be small
|
||||
#
|
||||
def prune_suffix(var, suffixes, d):
|
||||
"""
|
||||
"""
|
||||
See if var ends with any of the suffixes listed and
|
||||
remove it if found
|
||||
remove it if found
|
||||
"""
|
||||
for suffix in suffixes:
|
||||
if suffix and var.endswith(suffix):
|
||||
@@ -758,8 +743,7 @@ def mkdirhier(directory):
|
||||
"""Create a directory like 'mkdir -p', but does not complain if
|
||||
directory already exists like os.makedirs
|
||||
"""
|
||||
if '${' in str(directory):
|
||||
bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory))
|
||||
|
||||
try:
|
||||
os.makedirs(directory)
|
||||
except OSError as e:
|
||||
@@ -1001,16 +985,13 @@ def umask(new_mask):
|
||||
os.umask(current_mask)
|
||||
|
||||
def to_boolean(string, default=None):
|
||||
"""
|
||||
"""
|
||||
Check input string and return boolean value True/False/None
|
||||
depending upon the checks
|
||||
depending upon the checks
|
||||
"""
|
||||
if not string:
|
||||
return default
|
||||
|
||||
if isinstance(string, int):
|
||||
return string != 0
|
||||
|
||||
normalized = string.lower()
|
||||
if normalized in ("y", "yes", "1", "true"):
|
||||
return True
|
||||
@@ -1142,10 +1123,7 @@ def get_referenced_vars(start_expr, d):
|
||||
|
||||
|
||||
def cpu_count():
|
||||
try:
|
||||
return len(os.sched_getaffinity(0))
|
||||
except OSError:
|
||||
return multiprocessing.cpu_count()
|
||||
return multiprocessing.cpu_count()
|
||||
|
||||
def nonblockingfd(fd):
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
@@ -1701,11 +1679,25 @@ def disable_network(uid=None, gid=None):
|
||||
f.write("%s %s 1" % (gid, gid))
|
||||
|
||||
def export_proxies(d):
|
||||
from bb.fetch2 import get_fetcher_environment
|
||||
""" export common proxies variables from datastore to environment """
|
||||
newenv = get_fetcher_environment(d)
|
||||
for v in newenv:
|
||||
os.environ[v] = newenv[v]
|
||||
import os
|
||||
|
||||
variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
|
||||
'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
|
||||
'GIT_PROXY_COMMAND']
|
||||
exported = False
|
||||
|
||||
for v in variables:
|
||||
if v in os.environ.keys():
|
||||
exported = True
|
||||
else:
|
||||
v_proxy = d.getVar(v)
|
||||
if v_proxy is not None:
|
||||
os.environ[v] = v_proxy
|
||||
exported = True
|
||||
|
||||
return exported
|
||||
|
||||
|
||||
def load_plugins(logger, plugins, pluginpath):
|
||||
def load_plugin(name):
|
||||
@@ -1830,39 +1822,3 @@ def mkstemp(suffix=None, prefix=None, dir=None, text=False):
|
||||
else:
|
||||
prefix = tempfile.gettempprefix() + entropy
|
||||
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
|
||||
|
||||
def path_is_descendant(descendant, ancestor):
|
||||
"""
|
||||
Returns True if the path `descendant` is a descendant of `ancestor`
|
||||
(including being equivalent to `ancestor` itself). Otherwise returns False.
|
||||
Correctly accounts for symlinks, bind mounts, etc. by using
|
||||
os.path.samestat() to compare paths
|
||||
|
||||
May raise any exception that os.stat() raises
|
||||
"""
|
||||
|
||||
ancestor_stat = os.stat(ancestor)
|
||||
|
||||
# Recurse up each directory component of the descendant to see if it is
|
||||
# equivalent to the ancestor
|
||||
check_dir = os.path.abspath(descendant).rstrip("/")
|
||||
while check_dir:
|
||||
check_stat = os.stat(check_dir)
|
||||
if os.path.samestat(check_stat, ancestor_stat):
|
||||
return True
|
||||
check_dir = os.path.dirname(check_dir).rstrip("/")
|
||||
|
||||
return False
|
||||
|
||||
# If we don't have a timeout of some kind and a process/thread exits badly (for example
|
||||
# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
|
||||
# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
|
||||
@contextmanager
|
||||
def lock_timeout(lock):
|
||||
held = lock.acquire(timeout=5*60)
|
||||
try:
|
||||
if not held:
|
||||
os._exit(1)
|
||||
yield held
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
|
||||
libc = ctypes.CDLL("libc.so.6", use_errno=True)
|
||||
fsencoding = sys.getfilesystemencoding()
|
||||
|
||||
|
||||
libc.listxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t]
|
||||
libc.llistxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t]
|
||||
|
||||
|
||||
def listxattr(path, follow=True):
|
||||
func = libc.listxattr if follow else libc.llistxattr
|
||||
|
||||
os_path = os.fsencode(path)
|
||||
|
||||
while True:
|
||||
length = func(os_path, None, 0)
|
||||
|
||||
if length < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
if length == 0:
|
||||
return []
|
||||
|
||||
arr = ctypes.create_string_buffer(length)
|
||||
|
||||
read_length = func(os_path, arr, length)
|
||||
if read_length != length:
|
||||
# Race!
|
||||
continue
|
||||
|
||||
return [a.decode(fsencoding) for a in arr.raw.split(b"\x00") if a]
|
||||
|
||||
|
||||
libc.getxattr.argtypes = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_size_t,
|
||||
]
|
||||
libc.lgetxattr.argtypes = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_size_t,
|
||||
]
|
||||
|
||||
|
||||
def getxattr(path, name, follow=True):
|
||||
func = libc.getxattr if follow else libc.lgetxattr
|
||||
|
||||
os_path = os.fsencode(path)
|
||||
os_name = os.fsencode(name)
|
||||
|
||||
while True:
|
||||
length = func(os_path, os_name, None, 0)
|
||||
|
||||
if length < 0:
|
||||
err = ctypes.get_errno()
|
||||
if err == errno.ENODATA:
|
||||
return None
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
if length == 0:
|
||||
return ""
|
||||
|
||||
arr = ctypes.create_string_buffer(length)
|
||||
|
||||
read_length = func(os_path, os_name, arr, length)
|
||||
if read_length != length:
|
||||
# Race!
|
||||
continue
|
||||
|
||||
return arr.raw
|
||||
|
||||
|
||||
def get_all_xattr(path, follow=True):
|
||||
attrs = {}
|
||||
|
||||
names = listxattr(path, follow)
|
||||
|
||||
for name in names:
|
||||
value = getxattr(path, name, follow)
|
||||
if value is None:
|
||||
# This can happen if a value is erased after listxattr is called,
|
||||
# so ignore it
|
||||
continue
|
||||
attrs[name] = value
|
||||
|
||||
return attrs
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
attrs = get_all_xattr(args.path)
|
||||
|
||||
for name, value in attrs.items():
|
||||
try:
|
||||
value = value.decode(fsencoding)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
print(f"{name} = {value}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -11,7 +11,6 @@ import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from bb.cookerdata import findTopdir
|
||||
import bb.utils
|
||||
|
||||
from bblayers.common import LayerPlugin
|
||||
@@ -38,7 +37,7 @@ class ActionPlugin(LayerPlugin):
|
||||
sys.stderr.write("Specified layer directory %s doesn't contain a conf/layer.conf file\n" % layerdir)
|
||||
return 1
|
||||
|
||||
bblayers_conf = os.path.join(findTopdir(),'conf', 'bblayers.conf')
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
sys.stderr.write("Unable to find bblayers.conf\n")
|
||||
return 1
|
||||
@@ -50,14 +49,12 @@ class ActionPlugin(LayerPlugin):
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
self.tinfoil.modified_files()
|
||||
if not (args.force or notadded):
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
self.tinfoil.modified_files()
|
||||
bb.fatal("Parse failure with the specified layer added, exiting.")
|
||||
else:
|
||||
for item in notadded:
|
||||
@@ -68,7 +65,7 @@ class ActionPlugin(LayerPlugin):
|
||||
|
||||
def do_remove_layer(self, args):
|
||||
"""Remove one or more layers from bblayers.conf."""
|
||||
bblayers_conf = os.path.join(findTopdir() ,'conf', 'bblayers.conf')
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
sys.stderr.write("Unable to find bblayers.conf\n")
|
||||
return 1
|
||||
@@ -83,7 +80,6 @@ class ActionPlugin(LayerPlugin):
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
self.tinfoil.modified_files()
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item)
|
||||
@@ -243,9 +239,6 @@ build results (as the layer priority order has effectively changed).
|
||||
if not entry_found:
|
||||
logger.warning("File %s does not match the flattened layer's BBFILES setting, you may need to edit conf/layer.conf or move the file elsewhere" % f1full)
|
||||
|
||||
self.tinfoil.modified_files()
|
||||
|
||||
|
||||
def get_file_layer(self, filename):
|
||||
layerdir = self.get_file_layerdir(filename)
|
||||
if layerdir:
|
||||
|
||||
@@ -29,12 +29,12 @@ class QueryPlugin(LayerPlugin):
|
||||
|
||||
def do_show_layers(self, args):
|
||||
"""show current configured layers."""
|
||||
logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(70), "priority"))
|
||||
logger.plain('=' * 104)
|
||||
logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority"))
|
||||
logger.plain('=' * 74)
|
||||
for layer, _, regex, pri in self.tinfoil.cooker.bbfile_config_priorities:
|
||||
layerdir = self.bbfile_collections.get(layer, None)
|
||||
layername = layer
|
||||
logger.plain("%s %s %s" % (layername.ljust(20), layerdir.ljust(70), pri))
|
||||
layername = self.get_layer_name(layerdir)
|
||||
logger.plain("%s %s %d" % (layername.ljust(20), layerdir.ljust(40), pri))
|
||||
|
||||
def version_str(self, pe, pv, pr = None):
|
||||
verstr = "%s" % pv
|
||||
@@ -282,10 +282,7 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
else:
|
||||
logger.plain('=== Appended recipes ===')
|
||||
|
||||
|
||||
cooker_data = self.tinfoil.cooker.recipecaches[args.mc]
|
||||
|
||||
pnlist = list(cooker_data.pkg_pn.keys())
|
||||
pnlist = list(self.tinfoil.cooker_data.pkg_pn.keys())
|
||||
pnlist.sort()
|
||||
appends = False
|
||||
for pn in pnlist:
|
||||
@@ -298,7 +295,7 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
if not found:
|
||||
continue
|
||||
|
||||
if self.show_appends_for_pn(pn, cooker_data, args.mc):
|
||||
if self.show_appends_for_pn(pn):
|
||||
appends = True
|
||||
|
||||
if not args.pnspec and self.show_appends_for_skipped():
|
||||
@@ -307,10 +304,8 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
if not appends:
|
||||
logger.plain('No append files found')
|
||||
|
||||
def show_appends_for_pn(self, pn, cooker_data, mc):
|
||||
filenames = cooker_data.pkg_pn[pn]
|
||||
if mc:
|
||||
pn = "mc:%s:%s" % (mc, pn)
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.tinfoil.cooker_data.pkg_pn[pn]
|
||||
|
||||
best = self.tinfoil.find_best_provider(pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
@@ -535,7 +530,6 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
|
||||
parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
|
||||
parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_show_appends.add_argument('--mc', help='use specified multiconfig', default='')
|
||||
|
||||
parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends)
|
||||
parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true')
|
||||
|
||||
@@ -585,7 +585,7 @@ class SiblingTest(TreeTest):
|
||||
</html>'''
|
||||
# All that whitespace looks good but makes the tests more
|
||||
# difficult. Get rid of it.
|
||||
markup = re.compile(r"\n\s*").sub("", markup)
|
||||
markup = re.compile("\n\s*").sub("", markup)
|
||||
self.tree = self.soup(markup)
|
||||
|
||||
|
||||
|
||||
@@ -392,7 +392,19 @@ class SourceGenerator(NodeVisitor):
|
||||
def visit_Name(self, node):
|
||||
self.write(node.id)
|
||||
|
||||
def visit_Str(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Bytes(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Num(self, node):
|
||||
self.write(repr(node.n))
|
||||
|
||||
def visit_Constant(self, node):
|
||||
# Python 3.8 deprecated visit_Num(), visit_Str(), visit_Bytes(),
|
||||
# visit_NameConstant() and visit_Ellipsis(). They can be removed once we
|
||||
# require 3.8+.
|
||||
self.write(repr(node.value))
|
||||
|
||||
def visit_Tuple(self, node):
|
||||
|
||||
@@ -5,102 +5,151 @@
|
||||
|
||||
import asyncio
|
||||
from contextlib import closing
|
||||
import re
|
||||
import sqlite3
|
||||
import itertools
|
||||
import json
|
||||
from collections import namedtuple
|
||||
from urllib.parse import urlparse
|
||||
from bb.asyncrpc.client import parse_address, ADDR_TYPE_UNIX, ADDR_TYPE_WS
|
||||
|
||||
User = namedtuple("User", ("username", "permissions"))
|
||||
UNIX_PREFIX = "unix://"
|
||||
|
||||
def create_server(
|
||||
addr,
|
||||
dbname,
|
||||
*,
|
||||
sync=True,
|
||||
upstream=None,
|
||||
read_only=False,
|
||||
db_username=None,
|
||||
db_password=None,
|
||||
anon_perms=None,
|
||||
admin_username=None,
|
||||
admin_password=None,
|
||||
):
|
||||
def sqlite_engine():
|
||||
from .sqlite import DatabaseEngine
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
|
||||
return DatabaseEngine(dbname, sync)
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
def sqlalchemy_engine():
|
||||
from .sqlalchemy import DatabaseEngine
|
||||
UNIHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("unihash", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
return DatabaseEngine(dbname, db_username, db_password)
|
||||
UNIHASH_TABLE_COLUMNS = tuple(name for name, _, _ in UNIHASH_TABLE_DEFINITION)
|
||||
|
||||
from . import server
|
||||
OUTHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("outhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("created", "DATETIME", ""),
|
||||
|
||||
if "://" in dbname:
|
||||
db_engine = sqlalchemy_engine()
|
||||
# Optional fields
|
||||
("owner", "TEXT", ""),
|
||||
("PN", "TEXT", ""),
|
||||
("PV", "TEXT", ""),
|
||||
("PR", "TEXT", ""),
|
||||
("task", "TEXT", ""),
|
||||
("outhash_siginfo", "TEXT", ""),
|
||||
)
|
||||
|
||||
OUTHASH_TABLE_COLUMNS = tuple(name for name, _, _ in OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
def _make_table(cursor, name, definition):
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS {name} (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
{fields}
|
||||
UNIQUE({unique})
|
||||
)
|
||||
'''.format(
|
||||
name=name,
|
||||
fields=" ".join("%s %s," % (name, typ) for name, typ, _ in definition),
|
||||
unique=", ".join(name for name, _, flags in definition if "UNIQUE" in flags)
|
||||
))
|
||||
|
||||
|
||||
def setup_database(database, sync=True):
|
||||
db = sqlite3.connect(database)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
_make_table(cursor, "unihashes_v2", UNIHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "outhashes_v2", OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
cursor.execute('PRAGMA journal_mode = WAL')
|
||||
cursor.execute('PRAGMA synchronous = %s' % ('NORMAL' if sync else 'OFF'))
|
||||
|
||||
# Drop old indexes
|
||||
cursor.execute('DROP INDEX IF EXISTS taskhash_lookup')
|
||||
cursor.execute('DROP INDEX IF EXISTS outhash_lookup')
|
||||
cursor.execute('DROP INDEX IF EXISTS taskhash_lookup_v2')
|
||||
cursor.execute('DROP INDEX IF EXISTS outhash_lookup_v2')
|
||||
|
||||
# TODO: Upgrade from tasks_v2?
|
||||
cursor.execute('DROP TABLE IF EXISTS tasks_v2')
|
||||
|
||||
# Create new indexes
|
||||
cursor.execute('CREATE INDEX IF NOT EXISTS taskhash_lookup_v3 ON unihashes_v2 (method, taskhash)')
|
||||
cursor.execute('CREATE INDEX IF NOT EXISTS outhash_lookup_v3 ON outhashes_v2 (method, outhash)')
|
||||
|
||||
return db
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX):],))
|
||||
else:
|
||||
db_engine = sqlite_engine()
|
||||
m = re.match(r'\[(?P<host>[^\]]*)\]:(?P<port>\d+)$', addr)
|
||||
if m is not None:
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
else:
|
||||
host, port = addr.split(':')
|
||||
|
||||
if anon_perms is None:
|
||||
anon_perms = server.DEFAULT_ANON_PERMS
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
s = server.Server(
|
||||
db_engine,
|
||||
upstream=upstream,
|
||||
read_only=read_only,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=admin_username,
|
||||
admin_password=admin_password,
|
||||
)
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def create_server(addr, dbname, *, sync=True, upstream=None, read_only=False):
|
||||
from . import server
|
||||
db = setup_database(dbname, sync=sync)
|
||||
s = server.Server(db, upstream=upstream, read_only=read_only)
|
||||
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
s.start_unix_server(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
url = urlparse(a[0])
|
||||
s.start_websocket_server(url.hostname, url.port)
|
||||
else:
|
||||
s.start_tcp_server(*a)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def create_client(addr, username=None, password=None):
|
||||
def create_client(addr):
|
||||
from . import client
|
||||
c = client.Client()
|
||||
|
||||
c = client.Client(username, password)
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
c.connect_unix(*a)
|
||||
else:
|
||||
c.connect_tcp(*a)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
c.connect_websocket(*a)
|
||||
else:
|
||||
c.connect_tcp(*a)
|
||||
return c
|
||||
except Exception as e:
|
||||
c.close()
|
||||
raise e
|
||||
return c
|
||||
|
||||
|
||||
async def create_async_client(addr, username=None, password=None):
|
||||
async def create_async_client(addr):
|
||||
from . import client
|
||||
c = client.AsyncClient()
|
||||
|
||||
c = client.AsyncClient(username, password)
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
await c.connect_websocket(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
return c
|
||||
except Exception as e:
|
||||
await c.close()
|
||||
raise e
|
||||
return c
|
||||
|
||||
@@ -5,430 +5,117 @@
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import asyncio
|
||||
import bb.asyncrpc
|
||||
import json
|
||||
from . import create_async_client
|
||||
|
||||
|
||||
logger = logging.getLogger("hashserv.client")
|
||||
|
||||
|
||||
class Batch(object):
|
||||
def __init__(self):
|
||||
self.done = False
|
||||
self.cond = asyncio.Condition()
|
||||
self.pending = []
|
||||
self.results = []
|
||||
self.sent_count = 0
|
||||
|
||||
async def recv(self, socket):
|
||||
while True:
|
||||
async with self.cond:
|
||||
await self.cond.wait_for(lambda: self.pending or self.done)
|
||||
|
||||
if not self.pending:
|
||||
if self.done:
|
||||
return
|
||||
continue
|
||||
|
||||
r = await socket.recv()
|
||||
self.results.append(r)
|
||||
|
||||
async with self.cond:
|
||||
self.pending.pop(0)
|
||||
|
||||
async def send(self, socket, msgs):
|
||||
try:
|
||||
# In the event of a restart due to a reconnect, all in-flight
|
||||
# messages need to be resent first to keep to result count in sync
|
||||
for m in self.pending:
|
||||
await socket.send(m)
|
||||
|
||||
for m in msgs:
|
||||
# Add the message to the pending list before attempting to send
|
||||
# it so that if the send fails it will be retried
|
||||
async with self.cond:
|
||||
self.pending.append(m)
|
||||
self.cond.notify()
|
||||
self.sent_count += 1
|
||||
|
||||
await socket.send(m)
|
||||
|
||||
finally:
|
||||
async with self.cond:
|
||||
self.done = True
|
||||
self.cond.notify()
|
||||
|
||||
async def process(self, socket, msgs):
|
||||
await asyncio.gather(
|
||||
self.recv(socket),
|
||||
self.send(socket, msgs),
|
||||
)
|
||||
|
||||
if len(self.results) != self.sent_count:
|
||||
raise ValueError(
|
||||
f"Expected result count {len(self.results)}. Expected {self.sent_count}"
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
|
||||
class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
MODE_NORMAL = 0
|
||||
MODE_GET_STREAM = 1
|
||||
MODE_EXIST_STREAM = 2
|
||||
|
||||
def __init__(self, username=None, password=None):
|
||||
super().__init__("OEHASHEQUIV", "1.1", logger)
|
||||
def __init__(self):
|
||||
super().__init__('OEHASHEQUIV', '1.1', logger)
|
||||
self.mode = self.MODE_NORMAL
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.saved_become_user = None
|
||||
|
||||
async def setup_connection(self):
|
||||
await super().setup_connection()
|
||||
cur_mode = self.mode
|
||||
self.mode = self.MODE_NORMAL
|
||||
if self.username:
|
||||
# Save off become user temporarily because auth() resets it
|
||||
become = self.saved_become_user
|
||||
await self.auth(self.username, self.password)
|
||||
|
||||
if become:
|
||||
await self.become_user(become)
|
||||
|
||||
async def send_stream_batch(self, mode, msgs):
|
||||
"""
|
||||
Does a "batch" process of stream messages. This sends the query
|
||||
messages as fast as possible, and simultaneously attempts to read the
|
||||
messages back. This helps to mitigate the effects of latency to the
|
||||
hash equivalence server be allowing multiple queries to be "in-flight"
|
||||
at once
|
||||
|
||||
The implementation does more complicated tracking using a count of sent
|
||||
messages so that `msgs` can be a generator function (i.e. its length is
|
||||
unknown)
|
||||
|
||||
"""
|
||||
|
||||
b = Batch()
|
||||
await self._set_mode(cur_mode)
|
||||
|
||||
async def send_stream(self, msg):
|
||||
async def proc():
|
||||
nonlocal b
|
||||
|
||||
await self._set_mode(mode)
|
||||
return await b.process(self.socket, msgs)
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
raise ConnectionError("Connection closed")
|
||||
return l.decode("utf-8").rstrip()
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def invoke(self, *args, **kwargs):
|
||||
# It's OK if connection errors cause a failure here, because the mode
|
||||
# is also reset to normal on a new connection
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await super().invoke(*args, **kwargs)
|
||||
|
||||
async def _set_mode(self, new_mode):
|
||||
async def stream_to_normal():
|
||||
await self.socket.send("END")
|
||||
return await self.socket.recv()
|
||||
|
||||
async def normal_to_stream(command):
|
||||
r = await self.invoke({command: None})
|
||||
if new_mode == self.MODE_NORMAL and self.mode == self.MODE_GET_STREAM:
|
||||
r = await self.send_stream("END")
|
||||
if r != "ok":
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to stream mode: Bad response from server {r!r}"
|
||||
)
|
||||
|
||||
self.logger.debug("Mode is now %s", command)
|
||||
|
||||
if new_mode == self.mode:
|
||||
return
|
||||
|
||||
self.logger.debug("Transitioning mode %s -> %s", self.mode, new_mode)
|
||||
|
||||
# Always transition to normal mode before switching to any other mode
|
||||
if self.mode != self.MODE_NORMAL:
|
||||
r = await self._send_wrapper(stream_to_normal)
|
||||
raise ConnectionError("Bad response from server %r" % r)
|
||||
elif new_mode == self.MODE_GET_STREAM and self.mode == self.MODE_NORMAL:
|
||||
r = await self.send_message({"get-stream": None})
|
||||
if r != "ok":
|
||||
self.check_invoke_error(r)
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to normal mode: Bad response from server {r!r}"
|
||||
)
|
||||
self.logger.debug("Mode is now normal")
|
||||
|
||||
if new_mode == self.MODE_GET_STREAM:
|
||||
await normal_to_stream("get-stream")
|
||||
elif new_mode == self.MODE_EXIST_STREAM:
|
||||
await normal_to_stream("exists-stream")
|
||||
elif new_mode != self.MODE_NORMAL:
|
||||
raise Exception("Undefined mode transition {self.mode!r} -> {new_mode!r}")
|
||||
raise ConnectionError("Bad response from server %r" % r)
|
||||
elif new_mode != self.mode:
|
||||
raise Exception(
|
||||
"Undefined mode transition %r -> %r" % (self.mode, new_mode)
|
||||
)
|
||||
|
||||
self.mode = new_mode
|
||||
|
||||
async def get_unihash(self, method, taskhash):
|
||||
r = await self.get_unihash_batch([(method, taskhash)])
|
||||
return r[0]
|
||||
|
||||
async def get_unihash_batch(self, args):
|
||||
result = await self.send_stream_batch(
|
||||
self.MODE_GET_STREAM,
|
||||
(f"{method} {taskhash}" for method, taskhash in args),
|
||||
)
|
||||
return [r if r else None for r in result]
|
||||
await self._set_mode(self.MODE_GET_STREAM)
|
||||
r = await self.send_stream("%s %s" % (method, taskhash))
|
||||
if not r:
|
||||
return None
|
||||
return r
|
||||
|
||||
async def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["outhash"] = outhash
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report": m})
|
||||
return await self.send_message({"report": m})
|
||||
|
||||
async def report_unihash_equiv(self, taskhash, method, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report-equiv": m})
|
||||
return await self.send_message({"report-equiv": m})
|
||||
|
||||
async def get_taskhash(self, method, taskhash, all_properties=False):
|
||||
return await self.invoke(
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get": {"taskhash": taskhash, "method": method, "all": all_properties}}
|
||||
)
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
r = await self.unihash_exists_batch([unihash])
|
||||
return r[0]
|
||||
|
||||
async def unihash_exists_batch(self, unihashes):
|
||||
result = await self.send_stream_batch(self.MODE_EXIST_STREAM, unihashes)
|
||||
return [r == "true" for r in result]
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash, with_unihash=True):
|
||||
return await self.invoke(
|
||||
{
|
||||
"get-outhash": {
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
"method": method,
|
||||
"with_unihash": with_unihash,
|
||||
}
|
||||
}
|
||||
async def get_outhash(self, method, outhash, taskhash):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get-outhash": {"outhash": outhash, "taskhash": taskhash, "method": method}}
|
||||
)
|
||||
|
||||
async def get_stats(self):
|
||||
return await self.invoke({"get-stats": None})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"get-stats": None})
|
||||
|
||||
async def reset_stats(self):
|
||||
return await self.invoke({"reset-stats": None})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"reset-stats": None})
|
||||
|
||||
async def backfill_wait(self):
|
||||
return (await self.invoke({"backfill-wait": None}))["tasks"]
|
||||
|
||||
async def remove(self, where):
|
||||
return await self.invoke({"remove": {"where": where}})
|
||||
|
||||
async def clean_unused(self, max_age):
|
||||
return await self.invoke({"clean-unused": {"max_age_seconds": max_age}})
|
||||
|
||||
async def auth(self, username, token):
|
||||
result = await self.invoke({"auth": {"username": username, "token": token}})
|
||||
self.username = username
|
||||
self.password = token
|
||||
self.saved_become_user = None
|
||||
return result
|
||||
|
||||
async def refresh_token(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
result = await self.invoke({"refresh-token": m})
|
||||
if (
|
||||
self.username
|
||||
and not self.saved_become_user
|
||||
and result["username"] == self.username
|
||||
):
|
||||
self.password = result["token"]
|
||||
return result
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"set-user-perms": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def get_user(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
return await self.invoke({"get-user": m})
|
||||
|
||||
async def get_all_users(self):
|
||||
return (await self.invoke({"get-all-users": {}}))["users"]
|
||||
|
||||
async def new_user(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"new-user": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def delete_user(self, username):
|
||||
return await self.invoke({"delete-user": {"username": username}})
|
||||
|
||||
async def become_user(self, username):
|
||||
result = await self.invoke({"become-user": {"username": username}})
|
||||
if username == self.username:
|
||||
self.saved_become_user = None
|
||||
else:
|
||||
self.saved_become_user = username
|
||||
return result
|
||||
|
||||
async def get_db_usage(self):
|
||||
return (await self.invoke({"get-db-usage": {}}))["usage"]
|
||||
|
||||
async def get_db_query_columns(self):
|
||||
return (await self.invoke({"get-db-query-columns": {}}))["columns"]
|
||||
|
||||
async def gc_status(self):
|
||||
return await self.invoke({"gc-status": {}})
|
||||
|
||||
async def gc_mark(self, mark, where):
|
||||
"""
|
||||
Starts a new garbage collection operation identified by "mark". If
|
||||
garbage collection is already in progress with "mark", the collection
|
||||
is continued.
|
||||
|
||||
All unihash entries that match the "where" clause are marked to be
|
||||
kept. In addition, any new entries added to the database after this
|
||||
command will be automatically marked with "mark"
|
||||
"""
|
||||
return await self.invoke({"gc-mark": {"mark": mark, "where": where}})
|
||||
|
||||
async def gc_sweep(self, mark):
|
||||
"""
|
||||
Finishes garbage collection for "mark". All unihash entries that have
|
||||
not been marked will be deleted.
|
||||
|
||||
It is recommended to clean unused outhash entries after running this to
|
||||
cleanup any dangling outhashes
|
||||
"""
|
||||
return await self.invoke({"gc-sweep": {"mark": mark}})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return (await self.send_message({"backfill-wait": None}))["tasks"]
|
||||
|
||||
|
||||
class Client(bb.asyncrpc.Client):
|
||||
def __init__(self, username=None, password=None):
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._add_methods(
|
||||
"connect_tcp",
|
||||
"connect_websocket",
|
||||
"get_unihash",
|
||||
"get_unihash_batch",
|
||||
"report_unihash",
|
||||
"report_unihash_equiv",
|
||||
"get_taskhash",
|
||||
"unihash_exists",
|
||||
"unihash_exists_batch",
|
||||
"get_outhash",
|
||||
"get_stats",
|
||||
"reset_stats",
|
||||
"backfill_wait",
|
||||
"remove",
|
||||
"clean_unused",
|
||||
"auth",
|
||||
"refresh_token",
|
||||
"set_user_perms",
|
||||
"get_user",
|
||||
"get_all_users",
|
||||
"new_user",
|
||||
"delete_user",
|
||||
"become_user",
|
||||
"get_db_usage",
|
||||
"get_db_query_columns",
|
||||
"gc_status",
|
||||
"gc_mark",
|
||||
"gc_sweep",
|
||||
)
|
||||
|
||||
def _get_async_client(self):
|
||||
return AsyncClient(self.username, self.password)
|
||||
|
||||
|
||||
class ClientPool(bb.asyncrpc.ClientPool):
|
||||
def __init__(
|
||||
self,
|
||||
address,
|
||||
max_clients,
|
||||
*,
|
||||
username=None,
|
||||
password=None,
|
||||
become=None,
|
||||
):
|
||||
super().__init__(max_clients)
|
||||
self.address = address
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.become = become
|
||||
|
||||
async def _new_client(self):
|
||||
client = await create_async_client(
|
||||
self.address,
|
||||
username=self.username,
|
||||
password=self.password,
|
||||
)
|
||||
if self.become:
|
||||
await client.become_user(self.become)
|
||||
return client
|
||||
|
||||
def _run_key_tasks(self, queries, call):
|
||||
results = {key: None for key in queries.keys()}
|
||||
|
||||
def make_task(key, args):
|
||||
async def task(client):
|
||||
nonlocal results
|
||||
unihash = await call(client, args)
|
||||
results[key] = unihash
|
||||
|
||||
return task
|
||||
|
||||
def gen_tasks():
|
||||
for key, args in queries.items():
|
||||
yield make_task(key, args)
|
||||
|
||||
self.run_tasks(gen_tasks())
|
||||
return results
|
||||
|
||||
def get_unihashes(self, queries):
|
||||
"""
|
||||
Query multiple unihashes in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a tuple of (method, taskhash).
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is the queried unihash (which might be none if the query
|
||||
failed)
|
||||
"""
|
||||
|
||||
async def call(client, args):
|
||||
method, taskhash = args
|
||||
return await client.get_unihash(method, taskhash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
|
||||
def unihashes_exist(self, queries):
|
||||
"""
|
||||
Query multiple unihash existence checks in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a unihash.
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is True or False if the unihash is known by the server (or
|
||||
None if there was a failure)
|
||||
"""
|
||||
|
||||
async def call(client, unihash):
|
||||
return await client.unihash_exists(unihash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
return AsyncClient()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,598 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from . import User
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy import (
|
||||
MetaData,
|
||||
Column,
|
||||
Table,
|
||||
Text,
|
||||
Integer,
|
||||
UniqueConstraint,
|
||||
DateTime,
|
||||
Index,
|
||||
select,
|
||||
insert,
|
||||
exists,
|
||||
literal,
|
||||
and_,
|
||||
delete,
|
||||
update,
|
||||
func,
|
||||
inspect,
|
||||
)
|
||||
import sqlalchemy.engine
|
||||
from sqlalchemy.orm import declarative_base
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.dialects.postgresql import insert as postgres_insert
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV3(Base):
|
||||
__tablename__ = "unihashes_v3"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
gc_mark = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v4", "method", "taskhash"),
|
||||
Index("unihash_lookup_v1", "unihash"),
|
||||
)
|
||||
|
||||
|
||||
class OuthashesV2(Base):
|
||||
__tablename__ = "outhashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
outhash = Column(Text, nullable=False)
|
||||
created = Column(DateTime)
|
||||
owner = Column(Text)
|
||||
PN = Column(Text)
|
||||
PV = Column(Text)
|
||||
PR = Column(Text)
|
||||
task = Column(Text)
|
||||
outhash_siginfo = Column(Text)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash", "outhash"),
|
||||
Index("outhash_lookup_v3", "method", "outhash"),
|
||||
)
|
||||
|
||||
|
||||
class Users(Base):
|
||||
__tablename__ = "users"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
username = Column(Text, nullable=False)
|
||||
token = Column(Text, nullable=False)
|
||||
permissions = Column(Text)
|
||||
|
||||
__table_args__ = (UniqueConstraint("username"),)
|
||||
|
||||
|
||||
class Config(Base):
|
||||
__tablename__ = "config"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
name = Column(Text, nullable=False)
|
||||
value = Column(Text)
|
||||
__table_args__ = (
|
||||
UniqueConstraint("name"),
|
||||
Index("config_lookup", "name"),
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Old table versions
|
||||
#
|
||||
DeprecatedBase = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV2(DeprecatedBase):
|
||||
__tablename__ = "unihashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v3", "method", "taskhash"),
|
||||
)
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, url, username=None, password=None):
|
||||
self.logger = logging.getLogger("hashserv.sqlalchemy")
|
||||
self.url = sqlalchemy.engine.make_url(url)
|
||||
|
||||
if username is not None:
|
||||
self.url = self.url.set(username=username)
|
||||
|
||||
if password is not None:
|
||||
self.url = self.url.set(password=password)
|
||||
|
||||
async def create(self):
|
||||
def check_table_exists(conn, name):
|
||||
return inspect(conn).has_table(name)
|
||||
|
||||
self.logger.info("Using database %s", self.url)
|
||||
if self.url.drivername == 'postgresql+psycopg':
|
||||
# Psygopg 3 (psygopg) driver can handle async connection pooling
|
||||
self.engine = create_async_engine(self.url, max_overflow=-1)
|
||||
else:
|
||||
self.engine = create_async_engine(self.url, poolclass=NullPool)
|
||||
|
||||
async with self.engine.begin() as conn:
|
||||
# Create tables
|
||||
self.logger.info("Creating tables...")
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
|
||||
if await conn.run_sync(check_table_exists, UnihashesV2.__tablename__):
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
statement = insert(UnihashesV3).from_select(
|
||||
["id", "method", "unihash", "taskhash", "gc_mark"],
|
||||
select(
|
||||
UnihashesV2.id,
|
||||
UnihashesV2.method,
|
||||
UnihashesV2.unihash,
|
||||
UnihashesV2.taskhash,
|
||||
literal("").label("gc_mark"),
|
||||
),
|
||||
)
|
||||
self.logger.debug("%s", statement)
|
||||
await conn.execute(statement)
|
||||
|
||||
await conn.run_sync(Base.metadata.drop_all, [UnihashesV2.__table__])
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(self.engine, logger)
|
||||
|
||||
|
||||
def map_row(row):
|
||||
if row is None:
|
||||
return None
|
||||
return dict(**row._mapping)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row.username,
|
||||
permissions=set(row.permissions.split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(table, condition):
|
||||
where = {}
|
||||
for c in table.__table__.columns:
|
||||
if c.key in condition and condition[c.key] is not None:
|
||||
where[c] = condition[c.key]
|
||||
|
||||
return [(k == v) for k, v in where.items()]
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, engine, logger):
|
||||
self.engine = engine
|
||||
self.db = None
|
||||
self.logger = logger
|
||||
|
||||
async def __aenter__(self):
|
||||
self.db = await self.engine.connect()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
await self.db.close()
|
||||
self.db = None
|
||||
|
||||
async def _execute(self, statement):
|
||||
self.logger.debug("%s", statement)
|
||||
return await self.db.execute(statement)
|
||||
|
||||
async def _set_config(self, name, value):
|
||||
while True:
|
||||
result = await self._execute(
|
||||
update(Config).where(Config.name == name).values(value=value)
|
||||
)
|
||||
|
||||
if result.rowcount == 0:
|
||||
self.logger.debug("Config '%s' not found. Adding it", name)
|
||||
try:
|
||||
await self._execute(insert(Config).values(name=name, value=value))
|
||||
except IntegrityError:
|
||||
# Race. Try again
|
||||
continue
|
||||
|
||||
break
|
||||
|
||||
def _get_config_subquery(self, name, default=None):
|
||||
if default is not None:
|
||||
return func.coalesce(
|
||||
select(Config.value).where(Config.name == name).scalar_subquery(),
|
||||
default,
|
||||
)
|
||||
return select(Config.value).where(Config.name == name).scalar_subquery()
|
||||
|
||||
async def _get_config(self, name):
|
||||
result = await self._execute(select(Config.value).where(Config.name == name))
|
||||
row = result.first()
|
||||
if row is None:
|
||||
return None
|
||||
return row.value
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2,
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.taskhash == taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2, UnihashesV3.unihash.label("unihash"))
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(UnihashesV3).where(UnihashesV3.unihash == unihash).limit(1)
|
||||
)
|
||||
|
||||
return result.first() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2.taskhash.label("taskhash"),
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
OuthashesV2.taskhash != taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
UnihashesV3.unihash,
|
||||
UnihashesV3.method,
|
||||
UnihashesV3.taskhash,
|
||||
).where(
|
||||
UnihashesV3.method == method,
|
||||
UnihashesV3.taskhash == taskhash,
|
||||
)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def remove(self, condition):
|
||||
async def do_remove(table):
|
||||
where = _make_condition_statement(table, condition)
|
||||
if where:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(delete(table).where(*where))
|
||||
return result.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
count += await do_remove(UnihashesV3)
|
||||
count += await do_remove(OuthashesV2)
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
async with self.db.begin():
|
||||
return await self._get_config("gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
async with self.db.begin():
|
||||
gc_mark_subquery = self._get_config_subquery("gc-mark", "")
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark == gc_mark_subquery)
|
||||
)
|
||||
keep_rows = result.scalar()
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark != gc_mark_subquery)
|
||||
)
|
||||
remove_rows = result.scalar()
|
||||
|
||||
return (keep_rows, remove_rows, await self._get_config("gc-mark"))
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
async with self.db.begin():
|
||||
await self._set_config("gc-mark", mark)
|
||||
|
||||
where = _make_condition_statement(UnihashesV3, condition)
|
||||
if not where:
|
||||
return 0
|
||||
|
||||
result = await self._execute(
|
||||
update(UnihashesV3)
|
||||
.values(gc_mark=self._get_config_subquery("gc-mark", ""))
|
||||
.where(*where)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def gc_sweep(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(UnihashesV3).where(
|
||||
# A sneaky conditional that provides some errant use
|
||||
# protection: If the config mark is NULL, this will not
|
||||
# match any rows because No default is specified in the
|
||||
# select statement
|
||||
UnihashesV3.gc_mark
|
||||
!= self._get_config_subquery("gc-mark")
|
||||
)
|
||||
)
|
||||
await self._set_config("gc-mark", None)
|
||||
|
||||
return result.rowcount
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(OuthashesV2).where(
|
||||
OuthashesV2.created < oldest,
|
||||
~(
|
||||
select(UnihashesV3.id)
|
||||
.where(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
)
|
||||
.limit(1)
|
||||
.exists()
|
||||
),
|
||||
)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(UnihashesV3)
|
||||
.values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
.on_conflict_do_nothing(index_elements=("method", "taskhash"))
|
||||
)
|
||||
else:
|
||||
statement = insert(UnihashesV3).values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s, %s already in unihash database", method, taskhash, unihash
|
||||
)
|
||||
return False
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
outhash_columns = set(c.key for c in OuthashesV2.__table__.columns)
|
||||
|
||||
data = {k: v for k, v in data.items() if k in outhash_columns}
|
||||
|
||||
if "created" in data and not isinstance(data["created"], datetime):
|
||||
data["created"] = datetime.fromisoformat(data["created"])
|
||||
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(OuthashesV2)
|
||||
.values(**data)
|
||||
.on_conflict_do_nothing(
|
||||
index_elements=("method", "taskhash", "outhash")
|
||||
)
|
||||
)
|
||||
else:
|
||||
statement = insert(OuthashesV2).values(**data)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s already in outhash database", data["method"], data["outhash"]
|
||||
)
|
||||
return False
|
||||
|
||||
async def _get_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
Users.token,
|
||||
).where(
|
||||
Users.username == username,
|
||||
)
|
||||
)
|
||||
return result.first()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = await self._get_user(username)
|
||||
if not row:
|
||||
return None, None
|
||||
return map_user(row), row.token
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(await self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(
|
||||
Users.username == username,
|
||||
)
|
||||
.values(
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(Users.username == username)
|
||||
.values(permissions=" ".join(permissions))
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
)
|
||||
)
|
||||
return [map_user(row) for row in result]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
try:
|
||||
async with self.db.begin():
|
||||
await self._execute(
|
||||
insert(Users).values(
|
||||
username=username,
|
||||
permissions=" ".join(permissions),
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return True
|
||||
except IntegrityError as e:
|
||||
self.logger.debug("Cannot create new user %s: %s", username, e)
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(Users).where(Users.username == username)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
async with self.db.begin() as session:
|
||||
for name, table in Base.metadata.tables.items():
|
||||
result = await self._execute(
|
||||
statement=select(func.count()).select_from(table)
|
||||
)
|
||||
usage[name] = {
|
||||
"rows": result.scalar(),
|
||||
}
|
||||
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for table in (UnihashesV3, OuthashesV2):
|
||||
for c in table.__table__.columns:
|
||||
if not isinstance(c.type, Text):
|
||||
continue
|
||||
columns.add(c.key)
|
||||
|
||||
return list(columns)
|
||||
@@ -1,562 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
import sqlite3
|
||||
import logging
|
||||
from contextlib import closing
|
||||
from . import User
|
||||
|
||||
logger = logging.getLogger("hashserv.sqlite")
|
||||
|
||||
UNIHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("unihash", "TEXT NOT NULL", ""),
|
||||
("gc_mark", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
UNIHASH_TABLE_COLUMNS = tuple(name for name, _, _ in UNIHASH_TABLE_DEFINITION)
|
||||
|
||||
OUTHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("outhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("created", "DATETIME", ""),
|
||||
# Optional fields
|
||||
("owner", "TEXT", ""),
|
||||
("PN", "TEXT", ""),
|
||||
("PV", "TEXT", ""),
|
||||
("PR", "TEXT", ""),
|
||||
("task", "TEXT", ""),
|
||||
("outhash_siginfo", "TEXT", ""),
|
||||
)
|
||||
|
||||
OUTHASH_TABLE_COLUMNS = tuple(name for name, _, _ in OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
USERS_TABLE_DEFINITION = (
|
||||
("username", "TEXT NOT NULL", "UNIQUE"),
|
||||
("token", "TEXT NOT NULL", ""),
|
||||
("permissions", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
USERS_TABLE_COLUMNS = tuple(name for name, _, _ in USERS_TABLE_DEFINITION)
|
||||
|
||||
|
||||
CONFIG_TABLE_DEFINITION = (
|
||||
("name", "TEXT NOT NULL", "UNIQUE"),
|
||||
("value", "TEXT", ""),
|
||||
)
|
||||
|
||||
CONFIG_TABLE_COLUMNS = tuple(name for name, _, _ in CONFIG_TABLE_DEFINITION)
|
||||
|
||||
|
||||
def _make_table(cursor, name, definition):
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS {name} (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
{fields}
|
||||
UNIQUE({unique})
|
||||
)
|
||||
""".format(
|
||||
name=name,
|
||||
fields=" ".join("%s %s," % (name, typ) for name, typ, _ in definition),
|
||||
unique=", ".join(
|
||||
name for name, _, flags in definition if "UNIQUE" in flags
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row["username"],
|
||||
permissions=set(row["permissions"].split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(columns, condition):
|
||||
where = {}
|
||||
for c in columns:
|
||||
if c in condition and condition[c] is not None:
|
||||
where[c] = condition[c]
|
||||
|
||||
return where, " AND ".join("%s=:%s" % (k, k) for k in where.keys())
|
||||
|
||||
|
||||
def _get_sqlite_version(cursor):
|
||||
cursor.execute("SELECT sqlite_version()")
|
||||
|
||||
version = []
|
||||
for v in cursor.fetchone()[0].split("."):
|
||||
try:
|
||||
version.append(int(v))
|
||||
except ValueError:
|
||||
version.append(v)
|
||||
|
||||
return tuple(version)
|
||||
|
||||
|
||||
def _schema_table_name(version):
|
||||
if version >= (3, 33):
|
||||
return "sqlite_schema"
|
||||
|
||||
return "sqlite_master"
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
self.sync = sync
|
||||
|
||||
async def create(self):
|
||||
db = sqlite3.connect(self.dbname)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
_make_table(cursor, "unihashes_v3", UNIHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "outhashes_v2", OUTHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "users", USERS_TABLE_DEFINITION)
|
||||
_make_table(cursor, "config", CONFIG_TABLE_DEFINITION)
|
||||
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if self.sync else "OFF")
|
||||
)
|
||||
|
||||
# Drop old indexes
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v3")
|
||||
|
||||
# TODO: Upgrade from tasks_v2?
|
||||
cursor.execute("DROP TABLE IF EXISTS tasks_v2")
|
||||
|
||||
# Create new indexes
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS taskhash_lookup_v4 ON unihashes_v3 (method, taskhash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS unihash_lookup_v1 ON unihashes_v3 (unihash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS outhash_lookup_v3 ON outhashes_v2 (method, outhash)"
|
||||
)
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS config_lookup ON config (name)")
|
||||
|
||||
sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(sqlite_version)} WHERE type = 'table' AND name = 'unihashes_v2'
|
||||
"""
|
||||
)
|
||||
if cursor.fetchone():
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO unihashes_v3 (id, method, unihash, taskhash, gc_mark)
|
||||
SELECT id, method, unihash, taskhash, '' FROM unihashes_v2
|
||||
"""
|
||||
)
|
||||
cursor.execute("DROP TABLE unihashes_v2")
|
||||
db.commit()
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(logger, self.dbname, self.sync)
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, logger, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
|
||||
self.db = sqlite3.connect(self.dbname)
|
||||
self.db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if sync else "OFF")
|
||||
)
|
||||
|
||||
self.sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def _set_config(self, cursor, name, value):
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO config (id, name, value) VALUES
|
||||
((SELECT id FROM config WHERE name=:name), :name, :value)
|
||||
""",
|
||||
{
|
||||
"name": name,
|
||||
"value": value,
|
||||
},
|
||||
)
|
||||
|
||||
async def _get_config(self, cursor, name):
|
||||
cursor.execute(
|
||||
"SELECT value FROM config WHERE name=:name",
|
||||
{
|
||||
"name": name,
|
||||
},
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return row["value"]
|
||||
|
||||
async def close(self):
|
||||
self.db.close()
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.taskhash=:taskhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM unihashes_v3 WHERE unihash=:unihash
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM outhashes_v2
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT outhashes_v2.taskhash AS taskhash, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
-- Select any matching output hash except the one we just inserted
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash AND outhashes_v2.taskhash!=:taskhash
|
||||
-- Pick the oldest hash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"SELECT taskhash, method, unihash FROM unihashes_v3 WHERE method=:method AND taskhash=:taskhash",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def remove(self, condition):
|
||||
def do_remove(columns, table_name, cursor):
|
||||
where, clause = _make_condition_statement(columns, condition)
|
||||
if where:
|
||||
query = f"DELETE FROM {table_name} WHERE {clause}"
|
||||
cursor.execute(query, where)
|
||||
return cursor.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
count += do_remove(OUTHASH_TABLE_COLUMNS, "outhashes_v2", cursor)
|
||||
count += do_remove(UNIHASH_TABLE_COLUMNS, "unihashes_v3", cursor)
|
||||
self.db.commit()
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
return await self._get_config(cursor, "gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
keep_rows = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark!=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
remove_rows = cursor.fetchone()[0]
|
||||
|
||||
current_mark = await self._get_config(cursor, "gc-mark")
|
||||
|
||||
return (keep_rows, remove_rows, current_mark)
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
await self._set_config(cursor, "gc-mark", mark)
|
||||
|
||||
where, clause = _make_condition_statement(UNIHASH_TABLE_COLUMNS, condition)
|
||||
|
||||
new_rows = 0
|
||||
if where:
|
||||
cursor.execute(
|
||||
f"""
|
||||
UPDATE unihashes_v3 SET
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
WHERE {clause}
|
||||
""",
|
||||
where,
|
||||
)
|
||||
new_rows = cursor.rowcount
|
||||
|
||||
self.db.commit()
|
||||
return new_rows
|
||||
|
||||
async def gc_sweep(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
# NOTE: COALESCE is not used in this query so that if the current
|
||||
# mark is NULL, nothing will happen
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM unihashes_v3 WHERE
|
||||
gc_mark!=(SELECT value FROM config WHERE name='gc-mark')
|
||||
"""
|
||||
)
|
||||
count = cursor.rowcount
|
||||
await self._set_config(cursor, "gc-mark", None)
|
||||
|
||||
self.db.commit()
|
||||
return count
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM outhashes_v2 WHERE created<:oldest AND NOT EXISTS (
|
||||
SELECT unihashes_v3.id FROM unihashes_v3 WHERE unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash LIMIT 1
|
||||
)
|
||||
""",
|
||||
{
|
||||
"oldest": oldest,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO unihashes_v3 (method, taskhash, unihash, gc_mark) VALUES
|
||||
(
|
||||
:method,
|
||||
:taskhash,
|
||||
:unihash,
|
||||
COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
)
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
data = {k: v for k, v in data.items() if k in OUTHASH_TABLE_COLUMNS}
|
||||
keys = sorted(data.keys())
|
||||
query = "INSERT OR IGNORE INTO outhashes_v2 ({fields}) VALUES({values})".format(
|
||||
fields=", ".join(keys),
|
||||
values=", ".join(":" + k for k in keys),
|
||||
)
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(query, data)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
def _get_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT username, permissions, token FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = self._get_user(username)
|
||||
if row is None:
|
||||
return None, None
|
||||
return map_user(row), row["token"]
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET token=:token WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET permissions=:permissions WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("SELECT username, permissions FROM users")
|
||||
return [map_user(r) for r in cursor.fetchall()]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO users (username, token, permissions) VALUES (:username, :token, :permissions)
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return True
|
||||
except sqlite3.IntegrityError:
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(self.sqlite_version)} WHERE type = 'table' AND name NOT LIKE 'sqlite_%'
|
||||
"""
|
||||
)
|
||||
for row in cursor.fetchall():
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM %s
|
||||
"""
|
||||
% row["name"],
|
||||
)
|
||||
usage[row["name"]] = {
|
||||
"rows": cursor.fetchone()[0],
|
||||
}
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for name, typ, _ in UNIHASH_TABLE_DEFINITION + OUTHASH_TABLE_DEFINITION:
|
||||
if typ.startswith("TEXT"):
|
||||
columns.add(name)
|
||||
return list(columns)
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user