mirror of
https://git.yoctoproject.org/poky
synced 2026-02-15 13:13:02 +01:00
Compare commits
43 Commits
scarthgap-
...
yocto-4.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21790e71d5 | ||
|
|
b8007d3c22 | ||
|
|
bca7ec652f | ||
|
|
f73e712b6b | ||
|
|
60012ae54a | ||
|
|
45ccdcfcbc | ||
|
|
8b3b075dd5 | ||
|
|
c3248e0da1 | ||
|
|
65cc65fa8d | ||
|
|
410290c2f5 | ||
|
|
ea2feb23bc | ||
|
|
eb292619e7 | ||
|
|
b93e695de6 | ||
|
|
338bc72e4d | ||
|
|
3c0b78802d | ||
|
|
23d946b9ba | ||
|
|
1b9bcc7b19 | ||
|
|
1d4d5371ec | ||
|
|
4f833991c2 | ||
|
|
e55e243f84 | ||
|
|
20c58a6cb2 | ||
|
|
c3c439d62a | ||
|
|
bdf37e43b0 | ||
|
|
958d52f37b | ||
|
|
42a6d47754 | ||
|
|
64111246ce | ||
|
|
b1b4ad9a80 | ||
|
|
e9af582acd | ||
|
|
0a75b4afc8 | ||
|
|
d109d6452f | ||
|
|
18d1bcefec | ||
|
|
1000c4f2c0 | ||
|
|
801734bc6c | ||
|
|
a91fb4ff74 | ||
|
|
54f3339f38 | ||
|
|
50c5035dc8 | ||
|
|
c078df73b9 | ||
|
|
c570cf1733 | ||
|
|
39428da6b6 | ||
|
|
acf268757f | ||
|
|
4bb775aecb | ||
|
|
7ebcf1477a | ||
|
|
fe76a450eb |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -36,4 +36,3 @@ _toaster_clones/
|
||||
downloads/
|
||||
sstate-cache/
|
||||
toaster.sqlite
|
||||
.vscode/
|
||||
|
||||
35
Makefile
Normal file
35
Makefile
Normal file
@@ -0,0 +1,35 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
endif
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile.sphinx clean publish
|
||||
|
||||
publish: Makefile.sphinx html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/
|
||||
cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/
|
||||
cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR)
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile.sphinx
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -16,13 +16,9 @@ which can be found at:
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/dev/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,22 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla]
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -18,19 +18,16 @@ Bitbake requires Python version 3.8 or newer.
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to bitbake-devel@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
If you're sending a patch related to the BitBake manual, make sure you copy
|
||||
the Yocto Project documentation mailing list:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org --cc docs@lists.yoctoproject.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/bitbake-devel
|
||||
@@ -48,7 +45,8 @@ it has so many corner cases. The datastore has many tests too. Testing with the
|
||||
recommended before submitting patches, particularly to the fetcher and datastore. We also
|
||||
appreciate new test cases and may require them for more obscure issues.
|
||||
|
||||
To run the tests "zstd" and "git" must be installed.
|
||||
To run the tests "zstd" and "git" must be installed. Git must be correctly configured, in
|
||||
particular the user.email and user.name values must be set.
|
||||
|
||||
The assumption is made that this testsuite is run from an initialized OpenEmbedded build
|
||||
environment (i.e. `source oe-init-build-env` is used). If this is not the case, run the
|
||||
@@ -56,8 +54,3 @@ testsuite as follows:
|
||||
|
||||
export PATH=$(pwd)/bin:$PATH
|
||||
bin/bitbake-selftest
|
||||
|
||||
The testsuite can alternatively be executed using pytest, e.g. obtained from PyPI (in this
|
||||
case, the PATH is configured automatically):
|
||||
|
||||
pytest
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -27,7 +27,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
__version__ = "2.8.0"
|
||||
__version__ = "2.4.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -72,17 +72,13 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
elif sig2 not in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[latestsigs[0]]['path']]
|
||||
if len(latestsigs) > 1:
|
||||
latestfiles.append(sigfiles[latestsigs[1]]['path'])
|
||||
|
||||
return latestfiles
|
||||
|
||||
@@ -100,7 +96,7 @@ def recursecb(key, hash1, hash2):
|
||||
elif hash2 not in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, color=color)
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
|
||||
@@ -26,35 +26,26 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
|
||||
parser.add_argument('--ignore-undefined', help='Suppress any errors related to undefined variables', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.value:
|
||||
if args.unexpand:
|
||||
sys.exit("--unexpand only makes sense with --value")
|
||||
if args.unexpand and not args.value:
|
||||
print("--unexpand only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
if args.flag:
|
||||
sys.exit("--flag only makes sense with --value")
|
||||
if args.flag and not args.value:
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
quiet = args.quiet or args.value
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not args.quiet) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
d = tinfoil.config_data
|
||||
|
||||
value = None
|
||||
if args.flag:
|
||||
value = d.getVarFlag(args.variable, args.flag, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The flag '{args.flag}' is not defined for variable '{args.variable}'")
|
||||
else:
|
||||
value = d.getVar(args.variable, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The variable '{args.variable}' is not defined")
|
||||
if args.value:
|
||||
print(str(value if value is not None else ""))
|
||||
print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
|
||||
elif args.value:
|
||||
print(str(d.getVar(args.variable, expand=(not args.unexpand))))
|
||||
else:
|
||||
bb.data.emit_var(args.variable, d=d, all=True)
|
||||
|
||||
@@ -14,8 +14,6 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -38,42 +36,18 @@ except ImportError:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
import bb.asyncrpc
|
||||
|
||||
DEFAULT_ADDRESS = 'unix://./hashserve.sock'
|
||||
METHOD = 'stress.test.method'
|
||||
|
||||
def print_user(u):
|
||||
print(f"Username: {u['username']}")
|
||||
if "permissions" in u:
|
||||
print("Permissions: " + " ".join(u["permissions"]))
|
||||
if "token" in u:
|
||||
print(f"Token: {u['token']}")
|
||||
|
||||
|
||||
def main():
|
||||
def handle_get(args, client):
|
||||
result = client.get_taskhash(args.method, args.taskhash, all_properties=True)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_get_outhash(args, client):
|
||||
result = client.get_outhash(args.method, args.outhash, args.taskhash)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_stats(args, client):
|
||||
if args.reset:
|
||||
s = client.reset_stats()
|
||||
else:
|
||||
s = client.get_stats()
|
||||
print(json.dumps(s, sort_keys=True, indent=4))
|
||||
pprint.pprint(s)
|
||||
return 0
|
||||
|
||||
def handle_stress(args, client):
|
||||
@@ -82,24 +56,25 @@ def main():
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
@@ -138,114 +113,12 @@ def main():
|
||||
with lock:
|
||||
pbar.update()
|
||||
|
||||
def handle_remove(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
if where:
|
||||
result = client.remove(where)
|
||||
print("Removed %d row(s)" % (result["count"]))
|
||||
else:
|
||||
print("No query specified")
|
||||
|
||||
def handle_clean_unused(args, client):
|
||||
result = client.clean_unused(args.max_age)
|
||||
print("Removed %d rows" % (result["count"]))
|
||||
return 0
|
||||
|
||||
def handle_refresh_token(args, client):
|
||||
r = client.refresh_token(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_set_user_permissions(args, client):
|
||||
r = client.set_user_perms(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_user(args, client):
|
||||
r = client.get_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_all_users(args, client):
|
||||
users = client.get_all_users()
|
||||
print("{username:20}| {permissions}".format(username="Username", permissions="Permissions"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for u in users:
|
||||
print("{username:20}| {permissions}".format(username=u["username"], permissions=" ".join(u["permissions"])))
|
||||
|
||||
def handle_new_user(args, client):
|
||||
r = client.new_user(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_delete_user(args, client):
|
||||
r = client.delete_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_db_usage(args, client):
|
||||
usage = client.get_db_usage()
|
||||
print(usage)
|
||||
tables = sorted(usage.keys())
|
||||
print("{name:20}| {rows:20}".format(name="Table name", rows="Rows"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for t in tables:
|
||||
print("{name:20}| {rows:<20}".format(name=t, rows=usage[t]["rows"]))
|
||||
print()
|
||||
|
||||
total_rows = sum(t["rows"] for t in usage.values())
|
||||
print(f"Total rows: {total_rows}")
|
||||
|
||||
def handle_get_db_query_columns(args, client):
|
||||
columns = client.get_db_query_columns()
|
||||
print("\n".join(sorted(columns)))
|
||||
|
||||
def handle_gc_status(args, client):
|
||||
result = client.gc_status()
|
||||
if not result["mark"]:
|
||||
print("No Garbage collection in progress")
|
||||
return 0
|
||||
|
||||
print("Current Mark: %s" % result["mark"])
|
||||
print("Total hashes to keep: %d" % result["keep"])
|
||||
print("Total hashes to remove: %s" % result["remove"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
result = client.gc_mark(args.mark, where)
|
||||
print("New hashes marked: %d" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_gc_sweep(args, client):
|
||||
result = client.gc_sweep(args.mark)
|
||||
print("Removed %d rows" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_unihash_exists(args, client):
|
||||
result = client.unihash_exists(args.unihash)
|
||||
if args.quiet:
|
||||
return 0 if result else 1
|
||||
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME")
|
||||
parser.add_argument('--password', '-p', metavar="TOKEN", help="Authenticate using token TOKEN")
|
||||
parser.add_argument('--become', '-b', metavar="USERNAME", help="Impersonate user USERNAME (if allowed) when performing actions")
|
||||
parser.add_argument('--no-netrc', '-n', action="store_false", dest="netrc", help="Do not use .netrc")
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
get_parser = subparsers.add_parser('get', help="Get the unihash for a taskhash")
|
||||
get_parser.add_argument("method", help="Method to query")
|
||||
get_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_parser.set_defaults(func=handle_get)
|
||||
|
||||
get_outhash_parser = subparsers.add_parser('get-outhash', help="Get output hash information")
|
||||
get_outhash_parser.add_argument("method", help="Method to query")
|
||||
get_outhash_parser.add_argument("outhash", help="Output hash to query")
|
||||
get_outhash_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_outhash_parser.set_defaults(func=handle_get_outhash)
|
||||
|
||||
stats_parser = subparsers.add_parser('stats', help='Show server stats')
|
||||
stats_parser.add_argument('--reset', action='store_true',
|
||||
help='Reset server stats')
|
||||
@@ -264,64 +137,6 @@ def main():
|
||||
help='Include string in outhash')
|
||||
stress_parser.set_defaults(func=handle_stress)
|
||||
|
||||
remove_parser = subparsers.add_parser('remove', help="Remove hash entries")
|
||||
remove_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Remove entries from table where KEY == VALUE")
|
||||
remove_parser.set_defaults(func=handle_remove)
|
||||
|
||||
clean_unused_parser = subparsers.add_parser('clean-unused', help="Remove unused database entries")
|
||||
clean_unused_parser.add_argument("max_age", metavar="SECONDS", type=int, help="Remove unused entries older than SECONDS old")
|
||||
clean_unused_parser.set_defaults(func=handle_clean_unused)
|
||||
|
||||
refresh_token_parser = subparsers.add_parser('refresh-token', help="Refresh auth token")
|
||||
refresh_token_parser.add_argument("--username", "-u", help="Refresh the token for another user (if authorized)")
|
||||
refresh_token_parser.set_defaults(func=handle_refresh_token)
|
||||
|
||||
set_user_perms_parser = subparsers.add_parser('set-user-perms', help="Set new permissions for user")
|
||||
set_user_perms_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
set_user_perms_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
set_user_perms_parser.set_defaults(func=handle_set_user_permissions)
|
||||
|
||||
get_user_parser = subparsers.add_parser('get-user', help="Get user")
|
||||
get_user_parser.add_argument("--username", "-u", help="Username")
|
||||
get_user_parser.set_defaults(func=handle_get_user)
|
||||
|
||||
get_all_users_parser = subparsers.add_parser('get-all-users', help="List all users")
|
||||
get_all_users_parser.set_defaults(func=handle_get_all_users)
|
||||
|
||||
new_user_parser = subparsers.add_parser('new-user', help="Create new user")
|
||||
new_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
new_user_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
new_user_parser.set_defaults(func=handle_new_user)
|
||||
|
||||
delete_user_parser = subparsers.add_parser('delete-user', help="Delete user")
|
||||
delete_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
delete_user_parser.set_defaults(func=handle_delete_user)
|
||||
|
||||
db_usage_parser = subparsers.add_parser('get-db-usage', help="Database Usage")
|
||||
db_usage_parser.set_defaults(func=handle_get_db_usage)
|
||||
|
||||
db_query_columns_parser = subparsers.add_parser('get-db-query-columns', help="Show columns that can be used in database queries")
|
||||
db_query_columns_parser.set_defaults(func=handle_get_db_query_columns)
|
||||
|
||||
gc_status_parser = subparsers.add_parser("gc-status", help="Show garbage collection status")
|
||||
gc_status_parser.set_defaults(func=handle_gc_status)
|
||||
|
||||
gc_mark_parser = subparsers.add_parser('gc-mark', help="Mark hashes to be kept for garbage collection")
|
||||
gc_mark_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Keep entries in table where KEY == VALUE")
|
||||
gc_mark_parser.set_defaults(func=handle_gc_mark)
|
||||
|
||||
gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked")
|
||||
gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_sweep_parser.set_defaults(func=handle_gc_sweep)
|
||||
|
||||
unihash_exists_parser = subparsers.add_parser('unihash-exists', help="Check if a unihash is known to the server")
|
||||
unihash_exists_parser.add_argument("--quiet", action="store_true", help="Don't print status. Instead, exit with 0 if unihash exists and 1 if it does not")
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
@@ -335,30 +150,11 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
login = args.login
|
||||
password = args.password
|
||||
|
||||
if login is None and args.netrc:
|
||||
try:
|
||||
n = netrc.netrc()
|
||||
auth = n.authenticators(args.address)
|
||||
if auth is not None:
|
||||
login, _, password = auth
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except netrc.NetrcParseError as e:
|
||||
sys.stderr.write(f"Error parsing {e.filename}:{e.lineno}: {e.msg}\n")
|
||||
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
try:
|
||||
with hashserv.create_client(args.address, login, password) as client:
|
||||
if args.become:
|
||||
client.become_user(args.become)
|
||||
return func(args, client)
|
||||
except bb.asyncrpc.InvokeError as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
return func(args, client)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -11,161 +11,56 @@ import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
from hashserv.server import DEFAULT_ANON_PERMS
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DEFAULT_BIND = "unix://./hashserve.sock"
|
||||
DEFAULT_BIND = 'unix://./hashserve.sock'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hash Equivalence Reference Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
epilog="""
|
||||
The bind address may take one of the following formats:
|
||||
unix://PATH - Bind to unix domain socket at PATH
|
||||
ws://ADDRESS:PORT - Bind to websocket on ADDRESS:PORT
|
||||
ADDRESS:PORT - Bind to raw TCP socket on ADDRESS:PORT
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Reference Server. Version=%s' % VERSION,
|
||||
epilog='''The bind address is the path to a unix domain socket if it is
|
||||
prefixed with "unix://". Otherwise, it is an IP address
|
||||
and port in form ADDRESS:PORT. To bind to all addresses, leave
|
||||
the ADDRESS empty, e.g. "--bind :8686". To bind to a specific
|
||||
IPv6 address, enclose the address in "[]", e.g.
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
To bind to all addresses, leave the ADDRESS empty, e.g. "--bind :8686" or
|
||||
"--bind ws://:8686". To bind to a specific IPv6 address, enclose the address in
|
||||
"[]", e.g. "--bind [::1]:8686" or "--bind ws://[::1]:8686"
|
||||
|
||||
Note that the default Anonymous permissions are designed to not break existing
|
||||
server instances when upgrading, but are not particularly secure defaults. If
|
||||
you want to use authentication, it is recommended that you use "--anon-perms
|
||||
@read" to only give anonymous users read access, or "--anon-perms @none" to
|
||||
give un-authenticated users no access at all.
|
||||
|
||||
Setting "--anon-perms @all" or "--anon-perms @user-admin" is not allowed, since
|
||||
this would allow anonymous users to manage all users accounts, which is a bad
|
||||
idea.
|
||||
|
||||
If you are using user authentication, you should run your server in websockets
|
||||
mode with an SSL terminating load balancer in front of it (as this server does
|
||||
not implement SSL). Otherwise all usernames and passwords will be transmitted
|
||||
in the clear. When configured this way, clients can connect using a secure
|
||||
websocket, as in "wss://SERVER:PORT"
|
||||
|
||||
The following permissions are supported by the server:
|
||||
|
||||
@none - No permissions
|
||||
@read - The ability to read equivalent hashes from the server
|
||||
@report - The ability to report equivalent hashes to the server
|
||||
@db-admin - Manage the hash database(s). This includes cleaning the
|
||||
database, removing hashes, etc.
|
||||
@user-admin - The ability to manage user accounts. This includes, creating
|
||||
users, deleting users, resetting login tokens, and assigning
|
||||
permissions.
|
||||
@all - All possible permissions, including any that may be added
|
||||
in the future
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--bind",
|
||||
default=os.environ.get("HASHSERVER_BIND", DEFAULT_BIND),
|
||||
help='Bind address (default $HASHSERVER_BIND, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--database",
|
||||
default=os.environ.get("HASHSERVER_DB", "./hashserv.db"),
|
||||
help='Database file (default $HASHSERVER_DB, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default=os.environ.get("HASHSERVER_LOG_LEVEL", "WARNING"),
|
||||
help='Set logging level (default $HASHSERVER_LOG_LEVEL, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("HASHSERVER_UPSTREAM", None),
|
||||
help="Upstream hashserv to pull hashes from ($HASHSERVER_UPSTREAM)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="Disallow write operations from clients ($HASHSERVER_READ_ONLY)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-username",
|
||||
default=os.environ.get("HASHSERVER_DB_USERNAME", None),
|
||||
help="Database username ($HASHSERVER_DB_USERNAME)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-password",
|
||||
default=os.environ.get("HASHSERVER_DB_PASSWORD", None),
|
||||
help="Database password ($HASHSERVER_DB_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--anon-perms",
|
||||
metavar="PERM[,PERM[,...]]",
|
||||
default=os.environ.get("HASHSERVER_ANON_PERMS", ",".join(DEFAULT_ANON_PERMS)),
|
||||
help='Permissions to give anonymous users (default $HASHSERVER_ANON_PERMS, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-user",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_USER", None),
|
||||
help="Create default admin user with name ADMIN_USER ($HASHSERVER_ADMIN_USER)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-password",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger("hashserv")
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError("Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log)
|
||||
raise ValueError('Invalid log level: %s' % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
read_only = (os.environ.get("HASHSERVER_READ_ONLY", "0") == "1") or args.read_only
|
||||
if "," in args.anon_perms:
|
||||
anon_perms = args.anon_perms.split(",")
|
||||
else:
|
||||
anon_perms = args.anon_perms.split()
|
||||
|
||||
server = hashserv.create_server(
|
||||
args.bind,
|
||||
args.database,
|
||||
upstream=args.upstream,
|
||||
read_only=read_only,
|
||||
db_username=args.db_username,
|
||||
db_password=args.db_password,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
)
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -7,77 +7,49 @@
|
||||
|
||||
import os
|
||||
import sys,logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "1.1.0"
|
||||
__version__="1.0.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRHOST_DEFAULT='0.0.0.0'
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser = optparse.OptionParser(
|
||||
version="Bitbake PR Service Core version %s, %%prog version %s" % (prserv.__version__, __version__),
|
||||
usage = "%prog < --start | --stop > [options]")
|
||||
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--file",
|
||||
default="prserv.sqlite3",
|
||||
help="database filename (default: prserv.sqlite3)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default="prserv.log",
|
||||
help="log filename(default: prserv.log)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
default="INFO",
|
||||
help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--start",
|
||||
action="store_true",
|
||||
help="start daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stop",
|
||||
action="store_true",
|
||||
help="stop daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
help="ip address to bind",
|
||||
default=PRHOST_DEFAULT,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=PRPORT_DEFAULT,
|
||||
help="port number (default: 8585)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_option("-f", "--file", help="database filename(default: prserv.sqlite3)", action="store",
|
||||
dest="dbfile", type="string", default="prserv.sqlite3")
|
||||
parser.add_option("-l", "--log", help="log filename(default: prserv.log)", action="store",
|
||||
dest="logfile", type="string", default="prserv.log")
|
||||
parser.add_option("--loglevel", help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
action = "store", type="string", dest="loglevel", default = "INFO")
|
||||
parser.add_option("--start", help="start daemon",
|
||||
action="store_true", dest="start")
|
||||
parser.add_option("--stop", help="stop daemon",
|
||||
action="store_true", dest="stop")
|
||||
parser.add_option("--host", help="ip address to bind", action="store",
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default: 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
parser.add_option("-r", "--read-only", help="open database in read-only mode",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
prserv.init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(args.file, args.host, args.port, os.path.abspath(args.log), args.read_only)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
if options.start:
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile), options.read_only)
|
||||
elif options.stop:
|
||||
ret=prserv.serv.stop_daemon(options.host, options.port)
|
||||
else:
|
||||
ret=parser.print_help()
|
||||
return ret
|
||||
|
||||
@@ -91,19 +91,19 @@ def worker_fire_prepickled(event):
|
||||
worker_thread_exit = False
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = bytearray()
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int.extend(worker_queue.get(True, 1))
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
@@ -151,7 +151,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
taskhash = runtask['taskhash']
|
||||
unihash = runtask['unihash']
|
||||
appends = runtask['appends']
|
||||
layername = runtask['layername']
|
||||
taskdepdata = runtask['taskdepdata']
|
||||
quieterrors = runtask['quieterrors']
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
@@ -183,7 +182,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (runtask['fakerootenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -195,7 +194,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (runtask['fakerootnoenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -237,13 +236,11 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
|
||||
# No stdin & stdout
|
||||
# stdout is used as a status report channel and must not be used by child processes.
|
||||
dumbio = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(dumbio, sys.stdin.fileno())
|
||||
os.dup2(dumbio, sys.stdout.fileno())
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
if umask is not None:
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
try:
|
||||
@@ -265,7 +262,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
bb.parse.siggen.set_taskhashes(workerdata["newhashes"])
|
||||
ret = 0
|
||||
|
||||
the_data = databuilder.parseRecipe(fn, appends, layername)
|
||||
the_data = databuilder.parseRecipe(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', taskhash)
|
||||
the_data.setVar('BB_UNIHASH', unihash)
|
||||
bb.parse.siggen.setup_datacache_from_datastore(fn, the_data)
|
||||
@@ -307,10 +304,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if not quieterrors:
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
@@ -352,12 +345,12 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
@@ -385,7 +378,7 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
@@ -419,7 +412,7 @@ class BitbakeWorker(object):
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue.extend(r)
|
||||
self.queue = self.queue + r
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
@@ -439,30 +432,18 @@ class BitbakeWorker(object):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
opening_tag = b"<" + item + b">"
|
||||
if not self.queue.startswith(opening_tag):
|
||||
return
|
||||
|
||||
tag_len = len(opening_tag)
|
||||
if len(self.queue) < tag_len + 4:
|
||||
# we need to receive more data
|
||||
return
|
||||
header = self.queue[tag_len:tag_len + 4]
|
||||
payload_len = int.from_bytes(header, 'big')
|
||||
# closing tag has length (tag_len + 1)
|
||||
if len(self.queue) < tag_len * 2 + 1 + payload_len:
|
||||
# we need to receive more data
|
||||
return
|
||||
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
if index != -1:
|
||||
try:
|
||||
func(self.queue[(tag_len + 4):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(b"</") + len(item) + len(b">")):]
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
while index != -1:
|
||||
try:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
|
||||
@@ -24,17 +24,15 @@ warnings.simplefilter("default")
|
||||
version = 1.0
|
||||
|
||||
|
||||
git_cmd = ['git', '-c', 'safe.bareRepository=all']
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(git_cmd + ['rev-parse', '--git-dir']).rstrip()
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(git_cmd + ['fetch', '--unshallow'])
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
@@ -43,21 +41,21 @@ def main():
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(git_cmd + ['rev-list'] + args.revisions).splitlines()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines()
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(git_cmd + ['rev-parse', '{}^@'.format(rev)]):
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(git_cmd + ['fsck', '--unreachable'])
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
@@ -74,12 +72,12 @@ def process_args():
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(git_cmd + ['rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
@@ -97,7 +95,7 @@ def make_shallow(shallow_file, revisions, refs):
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(git_cmd + ['for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
@@ -115,7 +113,7 @@ def filter_refs(refs):
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(['xargs', '-0', '-n', '1'] + git_cmd + ['update-ref', '-d', '--no-deref'],
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
@@ -128,7 +126,7 @@ def follow_history_intersections(revisions, refs):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(git_cmd + ['rev-parse', '%s^@' % rev]).splitlines()
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
@@ -136,12 +134,12 @@ def follow_history_intersections(revisions, refs):
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(git_cmd + ['merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(git_cmd + ['merge-base', parent, ref]).rstrip()
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
@@ -161,14 +159,14 @@ def iter_except(func, exception, start=None):
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(git_cmd + ['reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(git_cmd + ['repack', '-ad'])
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(git_cmd + ['prune', '--expire', 'now'])
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -84,7 +84,7 @@ webserverStartAll()
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/web.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
sleep 1
|
||||
@@ -181,14 +181,6 @@ WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
# ${BUILDDIR}/toaster_logs/ became the default location for toaster logs
|
||||
# This is needed for implemented django-log-viewer: https://pypi.org/project/django-log-viewer/
|
||||
# If the directory does not exist, create it.
|
||||
TOASTER_LOGS_DIR="${BUILDDIR}/toaster_logs/"
|
||||
if [ ! -d $TOASTER_LOGS_DIR ]
|
||||
then
|
||||
mkdir $TOASTER_LOGS_DIR
|
||||
fi
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
@@ -307,7 +299,7 @@ case $CMD in
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/toaster_runbuilds.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
|
||||
@@ -30,23 +30,79 @@ sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib'))
|
||||
|
||||
import bb.cooker
|
||||
from bb.ui import toasterui
|
||||
from bb.ui import eventreplay
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
event_str = json.loads(line)['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
|
||||
def main(argv):
|
||||
with open(argv[-1]) as eventfile:
|
||||
# load variables from the first line
|
||||
variables = None
|
||||
while line := eventfile.readline().strip():
|
||||
try:
|
||||
variables = json.loads(line)['allvariables']
|
||||
break
|
||||
except (KeyError, json.JSONDecodeError):
|
||||
continue
|
||||
if not variables:
|
||||
sys.exit("Cannot find allvariables entry in event log file %s" % argv[-1])
|
||||
eventfile.seek(0)
|
||||
variables = json.loads(eventfile.readline().strip())['allvariables']
|
||||
|
||||
params = namedtuple('ConfigParams', ['observe_only'])(True)
|
||||
player = eventreplay.EventPlayer(eventfile, variables)
|
||||
player = EventPlayer(eventfile, variables)
|
||||
|
||||
return toasterui.main(player, player, params)
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ set cpo&vim
|
||||
|
||||
let s:maxoff = 50 " maximum number of lines to look backwards for ()
|
||||
|
||||
function! GetBBPythonIndent(lnum)
|
||||
function GetPythonIndent(lnum)
|
||||
|
||||
" If this line is explicitly joined: If the previous line was also joined,
|
||||
" line it up with that one, otherwise add two 'shiftwidth'
|
||||
@@ -257,7 +257,7 @@ let b:did_indent = 1
|
||||
setlocal indentkeys+=0\"
|
||||
|
||||
|
||||
function! BitbakeIndent(lnum)
|
||||
function BitbakeIndent(lnum)
|
||||
if !has('syntax_items')
|
||||
return -1
|
||||
endif
|
||||
@@ -315,7 +315,7 @@ function! BitbakeIndent(lnum)
|
||||
endif
|
||||
|
||||
if index(["bbPyDefRegion", "bbPyFuncRegion"], name) != -1
|
||||
let ret = GetBBPythonIndent(a:lnum)
|
||||
let ret = GetPythonIndent(a:lnum)
|
||||
" Should normally always be indented by at least one shiftwidth; but allow
|
||||
" return of -1 (defer to autoindent) or -2 (force indent to 0)
|
||||
if ret == 0
|
||||
|
||||
@@ -63,14 +63,13 @@ syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*
|
||||
|
||||
" Includes and requires
|
||||
syn keyword bbInclude inherit include require contained
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref
|
||||
syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest
|
||||
|
||||
" Add taks and similar
|
||||
syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained
|
||||
syn match bbStatementRest /[^\\]*$/ skipwhite contained contains=bbStatement,bbVarDeref,bbVarPyValue
|
||||
syn region bbStatementRestCont start=/.*\\$/ end=/^[^\\]*$/ contained contains=bbStatement,bbVarDeref,bbVarPyValue,bbContinue keepend
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest,bbStatementRestCont
|
||||
syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
|
||||
" OE Important Functions
|
||||
syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained
|
||||
@@ -123,7 +122,6 @@ hi def link bbPyFlag Type
|
||||
hi def link bbPyDef Statement
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbStatementRestCont Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
hi def link bbOverrideOperator Operator
|
||||
|
||||
@@ -47,8 +47,8 @@ To install all required packages run:
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
$ cd doc
|
||||
$ make html
|
||||
$ cd documentation
|
||||
$ make -f Makefile.sphinx html
|
||||
|
||||
The resulting HTML index page will be _build/html/index.html, and you
|
||||
can browse your own copy of the locally generated documentation with
|
||||
|
||||
@@ -586,11 +586,10 @@ or possibly those defined in the metadata/signature handler itself. The
|
||||
simplest parameter to pass is "none", which causes a set of signature
|
||||
information to be written out into ``STAMPS_DIR`` corresponding to the
|
||||
targets specified. The other currently available parameter is
|
||||
"printdiff", which causes BitBake to try to establish the most recent
|
||||
"printdiff", which causes BitBake to try to establish the closest
|
||||
signature match it can (e.g. in the sstate cache) and then run
|
||||
compare the matched signatures to determine the stamps and delta
|
||||
where these two stamp trees diverge. This can be used to determine why
|
||||
tasks need to be re-run in situations where that is not expected.
|
||||
``bitbake-diffsigs`` over the matches to determine the stamps and delta
|
||||
where these two stamp trees diverge.
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -476,14 +476,6 @@ Here are some example URLs::
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
Using tags with the git fetcher may cause surprising behaviour. Bitbake needs to
|
||||
resolve the tag to a specific revision and to do that, it has to connect to and use
|
||||
the upstream repository. This is because the revision the tags point at can change and
|
||||
we've seen cases of this happening in well known public repositories. This can mean
|
||||
many more network connections than expected and recipes may be reparsed at every build.
|
||||
Source mirrors will also be bypassed as the upstream repository is the only source
|
||||
of truth to resolve the revision accurately. For these reasons, whilst the fetcher
|
||||
can support tags, we recommend being specific about revisions in recipes.
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
@@ -696,41 +688,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _gcp-fetcher:
|
||||
|
||||
GCP Fetcher (``gs://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from a
|
||||
`Google Cloud Storage Bucket <https://cloud.google.com/storage/docs/buckets>`__.
|
||||
It uses the `Google Cloud Storage Python Client <https://cloud.google.com/python/docs/reference/storage/latest>`__
|
||||
to check the status of objects in the bucket and download them.
|
||||
The use of the Python client makes it substantially faster than using command
|
||||
line tools such as gsutil.
|
||||
|
||||
The fetcher requires the Google Cloud Storage Python Client to be installed, along
|
||||
with the gsutil tool.
|
||||
|
||||
The fetcher requires that the machine has valid credentials for accessing the
|
||||
chosen bucket. Instructions for authentication can be found in the
|
||||
`Google Cloud documentation <https://cloud.google.com/docs/authentication/provide-credentials-adc#local-dev>`__.
|
||||
|
||||
If it used from the OpenEmbedded build system, the fetcher can be used for
|
||||
fetching sstate artifacts from a GCS bucket by specifying the
|
||||
``SSTATE_MIRRORS`` variable as shown below::
|
||||
|
||||
SSTATE_MIRRORS ?= "\
|
||||
file://.* gs://<bucket name>/PATH \
|
||||
"
|
||||
|
||||
The fetcher can also be used in recipes::
|
||||
|
||||
SRC_URI = "gs://<bucket name>/<foo_container>/<bar_file>"
|
||||
|
||||
However, the checksum of the file should be also be provided::
|
||||
|
||||
SRC_URI[sha256sum] = "<sha256 string>"
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
@@ -834,8 +791,6 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- OSC (``osc://``)
|
||||
|
||||
- S3 (``s3://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
- Secure Shell (``ssh://``)
|
||||
|
||||
@@ -209,12 +209,12 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
Without a value for :term:`PN`, the variables :term:`STAMP`, :term:`T`, and :term:`B`, prevent more
|
||||
than one recipe from working. You can fix this by either setting :term:`PN` to
|
||||
Without a value for PN , the variables STAMP , T , and B , prevent more
|
||||
than one recipe from working. You can fix this by either setting PN to
|
||||
have a value similar to what OpenEmbedded and BitBake use in the default
|
||||
``bitbake.conf`` file (see previous example). Or, by manually updating each
|
||||
recipe to set :term:`PN`. You will also need to include :term:`PN` as part of the :term:`STAMP`,
|
||||
:term:`T`, and :term:`B` variable definitions in the ``local.conf`` file.
|
||||
bitbake.conf file (see previous example). Or, by manually updating each
|
||||
recipe to set PN . You will also need to include PN as part of the STAMP
|
||||
, T , and B variable definitions in the local.conf file.
|
||||
|
||||
The ``TMPDIR`` variable establishes a directory that BitBake uses
|
||||
for build output and intermediate files other than the cached
|
||||
@@ -319,9 +319,9 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
We are setting both ``LAYERSERIES_CORENAMES`` and :term:`LAYERSERIES_COMPAT` in this particular case, because we
|
||||
We are setting both LAYERSERIES_CORENAMES and LAYERSERIES_COMPAT in this particular case, because we
|
||||
are using bitbake without OpenEmbedded.
|
||||
You should usually just use :term:`LAYERSERIES_COMPAT` to specify the OE-Core versions for which your layer
|
||||
You should usually just use LAYERSERIES_COMPAT to specify the OE-Core versions for which your layer
|
||||
is compatible, and add the meta-openembedded layer to your project.
|
||||
|
||||
You need to create the recipe file next. Inside your layer at the
|
||||
|
||||
@@ -1519,12 +1519,6 @@ functionality of the task:
|
||||
released. You can use this variable flag to accomplish mutual
|
||||
exclusion.
|
||||
|
||||
- ``[network]``: When set to "1", allows a task to access the network. By
|
||||
default, only the ``do_fetch`` task is granted network access. Recipes
|
||||
shouldn't access the network outside of ``do_fetch`` as it usually
|
||||
undermines fetcher source mirroring, image and licence manifests, software
|
||||
auditing and supply chain security.
|
||||
|
||||
- ``[noexec]``: When set to "1", marks the task as being empty, with
|
||||
no execution required. You can use the ``[noexec]`` flag to set up
|
||||
tasks as dependency placeholders, or to disable tasks defined
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
================
|
||||
Variable Context
|
||||
================
|
||||
|
||||
|
|
||||
|
||||
Variables might only have an impact or can be used in certain contexts. Some
|
||||
should only be used in global files like ``.conf``, while others are intended only
|
||||
for local files like ``.bb``. This chapter aims to describe some important variable
|
||||
contexts.
|
||||
|
||||
.. _ref-varcontext-configuration:
|
||||
|
||||
BitBake's own configuration
|
||||
===========================
|
||||
|
||||
Variables starting with ``BB_`` usually configure the behaviour of BitBake itself.
|
||||
For example, one could configure:
|
||||
|
||||
- System resources, like disk space to be used (:term:`BB_DISKMON_DIRS`),
|
||||
or the number of tasks to be run in parallel by BitBake (:term:`BB_NUMBER_THREADS`).
|
||||
|
||||
- How the fetchers shall behave, e.g., :term:`BB_FETCH_PREMIRRORONLY` is used
|
||||
by BitBake to determine if BitBake's fetcher shall search only
|
||||
:term:`PREMIRRORS` for files.
|
||||
|
||||
Those variables are usually configured globally.
|
||||
|
||||
BitBake configuration
|
||||
=====================
|
||||
|
||||
There are variables:
|
||||
|
||||
- Like :term:`B` or :term:`T`, that are used to specify directories used by
|
||||
BitBake during the build of a particular recipe. Those variables are
|
||||
specified in ``bitbake.conf``. Some, like :term:`B`, are quite often
|
||||
overwritten in recipes.
|
||||
|
||||
- Starting with ``FAKEROOT``, to configure how the ``fakeroot`` command is
|
||||
handled. Those are usually set by ``bitbake.conf`` and might get adapted in a
|
||||
``bbclass``.
|
||||
|
||||
- Detailing where BitBake will store and fetch information from, for
|
||||
data reuse between build runs like :term:`CACHE`, :term:`DL_DIR` or
|
||||
:term:`PERSISTENT_DIR`. Those are usually global.
|
||||
|
||||
|
||||
Layers and files
|
||||
================
|
||||
|
||||
Variables starting with ``LAYER`` configure how BitBake handles layers.
|
||||
Additionally, variables starting with ``BB`` configure how layers and files are
|
||||
handled. For example:
|
||||
|
||||
- :term:`LAYERDEPENDS` is used to configure on which layers a given layer
|
||||
depends.
|
||||
|
||||
- The configured layers are contained in :term:`BBLAYERS` and files in
|
||||
:term:`BBFILES`.
|
||||
|
||||
Those variables are often used in the files ``layer.conf`` and ``bblayers.conf``.
|
||||
|
||||
Recipes and packages
|
||||
====================
|
||||
|
||||
Variables handling recipes and packages can be split into:
|
||||
|
||||
- :term:`PN`, :term:`PV` or :term:`PF` for example, contain information about
|
||||
the name or revision of a recipe or package. Usually, the default set in
|
||||
``bitbake.conf`` is used, but those are from time to time overwritten in
|
||||
recipes.
|
||||
|
||||
- :term:`SUMMARY`, :term:`DESCRIPTION`, :term:`LICENSE` or :term:`HOMEPAGE`
|
||||
contain the expected information and should be set specifically for every
|
||||
recipe.
|
||||
|
||||
- In recipes, variables are also used to control build and runtime
|
||||
dependencies between recipes/packages with other recipes/packages. The
|
||||
most common should be: :term:`PROVIDES`, :term:`RPROVIDES`, :term:`DEPENDS`,
|
||||
and :term:`RDEPENDS`.
|
||||
|
||||
- There are further variables starting with ``SRC`` that specify the sources in
|
||||
a recipe like :term:`SRC_URI` or :term:`SRCDATE`. Those are also usually set
|
||||
in recipes.
|
||||
|
||||
- Which version or provider of a recipe should be given preference when
|
||||
multiple recipes would provide the same item, is controlled by variables
|
||||
starting with ``PREFERRED_``. Those are normally set in the configuration
|
||||
files of a ``MACHINE`` or ``DISTRO``.
|
||||
@@ -424,7 +424,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
@@ -432,15 +432,6 @@ overview of their function and contents.
|
||||
``ConfigParsed`` event can set the variable to trigger the re-parse.
|
||||
You must be careful to avoid recursive loops with this functionality.
|
||||
|
||||
:term:`BB_LOADFACTOR_MAX`
|
||||
Setting this to a value will cause BitBake to check the system load
|
||||
average before executing new tasks. If the load average is above the
|
||||
the number of CPUs multipled by this factor, no new task will be started
|
||||
unless there is no task executing. A value of "1.5" has been found to
|
||||
work reasonably. This is helpful for systems which don't have pressure
|
||||
regulation enabled, which is more granular. Pressure values take
|
||||
precedence over loadfactor.
|
||||
|
||||
:term:`BB_LOGCONFIG`
|
||||
Specifies the name of a config file that contains the user logging
|
||||
configuration. See
|
||||
@@ -572,7 +563,7 @@ overview of their function and contents.
|
||||
:term:`BB_RUNFMT` variable is undefined and the run filenames get
|
||||
created using the following form::
|
||||
|
||||
run.{func}.{pid}
|
||||
run.{task}.{pid}
|
||||
|
||||
If you want to force run files to take a specific name, you can set this
|
||||
variable in a configuration file.
|
||||
@@ -929,9 +920,9 @@ overview of their function and contents.
|
||||
section.
|
||||
|
||||
:term:`BBPATH`
|
||||
A colon-separated list used by BitBake to locate class (``.bbclass``)
|
||||
and configuration (``.conf``) files. This variable is analogous to the
|
||||
``PATH`` variable.
|
||||
Used by BitBake to locate class (``.bbclass``) and configuration
|
||||
(``.conf``) files. This variable is analogous to the ``PATH``
|
||||
variable.
|
||||
|
||||
If you run BitBake from a directory outside of the build directory,
|
||||
you must be sure to set :term:`BBPATH` to point to the build directory.
|
||||
@@ -1081,11 +1072,6 @@ overview of their function and contents.
|
||||
environment variable. The value is a colon-separated list of
|
||||
directories that are searched left-to-right in order.
|
||||
|
||||
:term:`FILE_LAYERNAME`
|
||||
During parsing and task execution, this is set to the name of the
|
||||
layer containing the recipe file. Code can use this to identify which
|
||||
layer a recipe is from.
|
||||
|
||||
:term:`GITDIR`
|
||||
The directory in which a local copy of a Git repository is stored
|
||||
when it is cloned.
|
||||
@@ -1179,8 +1165,8 @@ overview of their function and contents.
|
||||
order.
|
||||
|
||||
:term:`OVERRIDES`
|
||||
A colon-separated list that BitBake uses to control what variables are
|
||||
overridden after BitBake parses recipes and configuration files.
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
|
||||
Following is a simple example that uses an overrides list based on
|
||||
machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can
|
||||
|
||||
@@ -13,7 +13,6 @@ BitBake User Manual
|
||||
bitbake-user-manual/bitbake-user-manual-intro
|
||||
bitbake-user-manual/bitbake-user-manual-execution
|
||||
bitbake-user-manual/bitbake-user-manual-metadata
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables-context
|
||||
bitbake-user-manual/bitbake-user-manual-fetching
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables
|
||||
bitbake-user-manual/bitbake-user-manual-hello
|
||||
|
||||
@@ -4,15 +4,15 @@
|
||||
BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
- :yocto_docs:`BitBake 2.2 User Manual </bitbake/2.2/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
******************************
|
||||
*****************************
|
||||
Release Series 4.0 (kirstone)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
@@ -26,12 +26,6 @@ Release Series 3.1 (dunfell)
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.2 User Manual </bitbake/2.2/>`
|
||||
|
||||
******************************
|
||||
Release Series 3.4 (honister)
|
||||
******************************
|
||||
|
||||
@@ -9,19 +9,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.8.0"
|
||||
__version__ = "2.4.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise RuntimeError("Sorry, python 3.8.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
# https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
|
||||
# https://bugs.ams1.psf.io/issue42888
|
||||
# so ensure libgcc_s is loaded early on
|
||||
import ctypes
|
||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||
|
||||
class BBHandledException(Exception):
|
||||
"""
|
||||
@@ -36,7 +29,6 @@ class BBHandledException(Exception):
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
@@ -228,14 +220,3 @@ def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
|
||||
setattr(sys.modules[current], newname, newobj)
|
||||
|
||||
TaskData = namedtuple("TaskData", [
|
||||
"pn",
|
||||
"taskname",
|
||||
"fn",
|
||||
"deps",
|
||||
"provides",
|
||||
"taskhash",
|
||||
"unihash",
|
||||
"hashfn",
|
||||
"taskhash_deps",
|
||||
])
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
libacl = ctypes.CDLL("libacl.so.1", use_errno=True)
|
||||
|
||||
|
||||
ACL_TYPE_ACCESS = 0x8000
|
||||
ACL_TYPE_DEFAULT = 0x4000
|
||||
|
||||
ACL_FIRST_ENTRY = 0
|
||||
ACL_NEXT_ENTRY = 1
|
||||
|
||||
ACL_UNDEFINED_TAG = 0x00
|
||||
ACL_USER_OBJ = 0x01
|
||||
ACL_USER = 0x02
|
||||
ACL_GROUP_OBJ = 0x04
|
||||
ACL_GROUP = 0x08
|
||||
ACL_MASK = 0x10
|
||||
ACL_OTHER = 0x20
|
||||
|
||||
ACL_READ = 0x04
|
||||
ACL_WRITE = 0x02
|
||||
ACL_EXECUTE = 0x01
|
||||
|
||||
acl_t = ctypes.c_void_p
|
||||
acl_entry_t = ctypes.c_void_p
|
||||
acl_permset_t = ctypes.c_void_p
|
||||
acl_perm_t = ctypes.c_uint
|
||||
|
||||
acl_tag_t = ctypes.c_int
|
||||
|
||||
libacl.acl_free.argtypes = [acl_t]
|
||||
|
||||
|
||||
def acl_free(acl):
|
||||
libacl.acl_free(acl)
|
||||
|
||||
|
||||
libacl.acl_get_file.restype = acl_t
|
||||
libacl.acl_get_file.argtypes = [ctypes.c_char_p, ctypes.c_uint]
|
||||
|
||||
|
||||
def acl_get_file(path, typ):
|
||||
acl = libacl.acl_get_file(os.fsencode(path), typ)
|
||||
if acl is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
return acl
|
||||
|
||||
|
||||
libacl.acl_get_entry.argtypes = [acl_t, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_entry(acl, entry_id):
|
||||
entry = acl_entry_t()
|
||||
ret = libacl.acl_get_entry(acl, entry_id, ctypes.byref(entry))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
if ret == 0:
|
||||
return None
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
libacl.acl_get_tag_type.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_tag_type(entry_d):
|
||||
tag = acl_tag_t()
|
||||
ret = libacl.acl_get_tag_type(entry_d, ctypes.byref(tag))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return tag.value
|
||||
|
||||
|
||||
libacl.acl_get_qualifier.restype = ctypes.c_void_p
|
||||
libacl.acl_get_qualifier.argtypes = [acl_entry_t]
|
||||
|
||||
|
||||
def acl_get_qualifier(entry_d):
|
||||
ret = libacl.acl_get_qualifier(entry_d)
|
||||
if ret is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return ctypes.c_void_p(ret)
|
||||
|
||||
|
||||
libacl.acl_get_permset.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_permset(entry_d):
|
||||
permset = acl_permset_t()
|
||||
ret = libacl.acl_get_permset(entry_d, ctypes.byref(permset))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
return permset
|
||||
|
||||
|
||||
libacl.acl_get_perm.argtypes = [acl_permset_t, acl_perm_t]
|
||||
|
||||
|
||||
def acl_get_perm(permset_d, perm):
|
||||
ret = libacl.acl_get_perm(permset_d, perm)
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return bool(ret)
|
||||
|
||||
|
||||
class Entry(object):
|
||||
def __init__(self, tag, qualifier, mode):
|
||||
self.tag = tag
|
||||
self.qualifier = qualifier
|
||||
self.mode = mode
|
||||
|
||||
def __str__(self):
|
||||
typ = ""
|
||||
qual = ""
|
||||
if self.tag == ACL_USER:
|
||||
typ = "user"
|
||||
qual = pwd.getpwuid(self.qualifier).pw_name
|
||||
elif self.tag == ACL_GROUP:
|
||||
typ = "group"
|
||||
qual = grp.getgrgid(self.qualifier).gr_name
|
||||
elif self.tag == ACL_USER_OBJ:
|
||||
typ = "user"
|
||||
elif self.tag == ACL_GROUP_OBJ:
|
||||
typ = "group"
|
||||
elif self.tag == ACL_MASK:
|
||||
typ = "mask"
|
||||
elif self.tag == ACL_OTHER:
|
||||
typ = "other"
|
||||
|
||||
r = "r" if self.mode & ACL_READ else "-"
|
||||
w = "w" if self.mode & ACL_WRITE else "-"
|
||||
x = "x" if self.mode & ACL_EXECUTE else "-"
|
||||
|
||||
return f"{typ}:{qual}:{r}{w}{x}"
|
||||
|
||||
|
||||
class ACL(object):
|
||||
def __init__(self, acl):
|
||||
self.acl = acl
|
||||
|
||||
def __del__(self):
|
||||
acl_free(self.acl)
|
||||
|
||||
def entries(self):
|
||||
entry_id = ACL_FIRST_ENTRY
|
||||
while True:
|
||||
entry = acl_get_entry(self.acl, entry_id)
|
||||
if entry is None:
|
||||
break
|
||||
|
||||
permset = acl_get_permset(entry)
|
||||
|
||||
mode = 0
|
||||
for m in (ACL_READ, ACL_WRITE, ACL_EXECUTE):
|
||||
if acl_get_perm(permset, m):
|
||||
mode |= m
|
||||
|
||||
qualifier = None
|
||||
tag = acl_get_tag_type(entry)
|
||||
|
||||
if tag == ACL_USER or tag == ACL_GROUP:
|
||||
qual = acl_get_qualifier(entry)
|
||||
qualifier = ctypes.cast(qual, ctypes.POINTER(ctypes.c_int))[0]
|
||||
|
||||
yield Entry(tag, qualifier, mode)
|
||||
|
||||
entry_id = ACL_NEXT_ENTRY
|
||||
|
||||
@classmethod
|
||||
def from_path(cls, path, typ):
|
||||
acl = acl_get_file(path, typ)
|
||||
return cls(acl)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import pwd
|
||||
import grp
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
acl = ACL.from_path(args.path, ACL_TYPE_ACCESS)
|
||||
for entry in acl.entries():
|
||||
print(str(entry))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -4,13 +4,30 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from .client import AsyncClient, Client, ClientPool
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
ClientError,
|
||||
ServerError,
|
||||
ConnectionClosedError,
|
||||
InvokeError,
|
||||
)
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection, ClientError, ServerError
|
||||
|
||||
@@ -10,59 +10,22 @@ import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import re
|
||||
import contextlib
|
||||
from threading import Thread
|
||||
from .connection import StreamConnection, WebsocketConnection, DEFAULT_MAX_CHUNK
|
||||
from .exceptions import ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
UNIX_PREFIX = "unix://"
|
||||
WS_PREFIX = "ws://"
|
||||
WSS_PREFIX = "wss://"
|
||||
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
elif addr.startswith(WS_PREFIX) or addr.startswith(WSS_PREFIX):
|
||||
return (ADDR_TYPE_WS, (addr,))
|
||||
else:
|
||||
m = re.match(r"\[(?P<host>[^\]]*)\]:(?P<port>\d+)$", addr)
|
||||
if m is not None:
|
||||
host = m.group("host")
|
||||
port = m.group("port")
|
||||
else:
|
||||
host, port = addr.split(":")
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
proto_name,
|
||||
proto_version,
|
||||
logger,
|
||||
timeout=30,
|
||||
server_headers=False,
|
||||
headers={},
|
||||
):
|
||||
self.socket = None
|
||||
def __init__(self, proto_name, proto_version, logger, timeout=30):
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.proto_name = proto_name
|
||||
self.proto_version = proto_version
|
||||
self.logger = logger
|
||||
self.timeout = timeout
|
||||
self.needs_server_headers = server_headers
|
||||
self.server_headers = {}
|
||||
self.headers = headers
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
reader, writer = await asyncio.open_connection(address, port)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
return await asyncio.open_connection(address, port)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
@@ -77,63 +40,27 @@ class AsyncClient(object):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
reader, writer = await asyncio.open_unix_connection(sock=sock)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
async def connect_sock():
|
||||
websocket = await websockets.connect(
|
||||
uri,
|
||||
ping_interval=None,
|
||||
open_timeout=self.timeout,
|
||||
)
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
os.chdir(cwd)
|
||||
return await asyncio.open_unix_connection(sock=sock)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def setup_connection(self):
|
||||
# Send headers
|
||||
await self.socket.send("%s %s" % (self.proto_name, self.proto_version))
|
||||
await self.socket.send(
|
||||
"needs-headers: %s" % ("true" if self.needs_server_headers else "false")
|
||||
)
|
||||
for k, v in self.headers.items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
|
||||
# End of headers
|
||||
await self.socket.send("")
|
||||
|
||||
self.server_headers = {}
|
||||
if self.needs_server_headers:
|
||||
while True:
|
||||
line = await self.socket.recv()
|
||||
if not line:
|
||||
# End headers
|
||||
break
|
||||
tag, value = line.split(":", 1)
|
||||
self.server_headers[tag.lower()] = value.strip()
|
||||
|
||||
async def get_header(self, tag, default):
|
||||
await self.connect()
|
||||
return self.server_headers.get(tag, default)
|
||||
s = '%s %s\n\n' % (self.proto_name, self.proto_version)
|
||||
self.writer.write(s.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def connect(self):
|
||||
if self.socket is None:
|
||||
self.socket = await self._connect_sock()
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
await self.setup_connection()
|
||||
|
||||
async def disconnect(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
async def close(self):
|
||||
await self.disconnect()
|
||||
self.reader = None
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
@@ -144,7 +71,6 @@ class AsyncClient(object):
|
||||
except (
|
||||
OSError,
|
||||
ConnectionError,
|
||||
ConnectionClosedError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
@@ -156,27 +82,49 @@ class AsyncClient(object):
|
||||
await self.close()
|
||||
count += 1
|
||||
|
||||
def check_invoke_error(self, msg):
|
||||
if isinstance(msg, dict) and "invoke-error" in msg:
|
||||
raise InvokeError(msg["invoke-error"]["message"])
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for server")
|
||||
|
||||
if not line:
|
||||
raise ConnectionError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line
|
||||
|
||||
async def invoke(self, msg):
|
||||
async def proc():
|
||||
await self.socket.send_message(msg)
|
||||
return await self.socket.recv_message()
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
result = await self._send_wrapper(proc)
|
||||
self.check_invoke_error(result)
|
||||
return result
|
||||
l = await get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def ping(self):
|
||||
return await self.invoke({"ping": {}})
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
return await self.send_message(
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
|
||||
class Client(object):
|
||||
@@ -194,7 +142,7 @@ class Client(object):
|
||||
# required (but harmless) with it.
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self._add_methods("connect_tcp", "ping")
|
||||
self._add_methods('connect_tcp', 'ping')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_async_client(self):
|
||||
@@ -223,95 +171,8 @@ class Client(object):
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
|
||||
def disconnect(self):
|
||||
def close(self):
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
|
||||
class ClientPool(object):
|
||||
def __init__(self, max_clients):
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
self.max_clients = max_clients
|
||||
self.loop = None
|
||||
self.client_condition = None
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _new_client(self):
|
||||
raise NotImplementedError("Must be implemented in derived class")
|
||||
|
||||
def close(self):
|
||||
if self.client_condition:
|
||||
self.client_condition = None
|
||||
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.__close_clients())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def run_tasks(self, tasks):
|
||||
if not self.loop:
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
thread = Thread(target=self.__thread_main, args=(tasks,))
|
||||
thread.start()
|
||||
thread.join()
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def get_client(self):
|
||||
async with self.client_condition:
|
||||
if self.avail_clients:
|
||||
client = self.avail_clients.pop()
|
||||
elif self.num_clients < self.max_clients:
|
||||
self.num_clients += 1
|
||||
client = await self._new_client()
|
||||
else:
|
||||
while not self.avail_clients:
|
||||
await self.client_condition.wait()
|
||||
client = self.avail_clients.pop()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
async with self.client_condition:
|
||||
self.avail_clients.append(client)
|
||||
self.client_condition.notify()
|
||||
|
||||
def __thread_main(self, tasks):
|
||||
async def process_task(task):
|
||||
async with self.get_client() as client:
|
||||
await task(client)
|
||||
|
||||
asyncio.set_event_loop(self.loop)
|
||||
if not self.client_condition:
|
||||
self.client_condition = asyncio.Condition()
|
||||
tasks = [process_task(t) for t in tasks]
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
|
||||
async def __close_clients(self):
|
||||
for c in self.avail_clients:
|
||||
await c.close()
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
self.loop.close()
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
from datetime import datetime
|
||||
from .exceptions import ClientError, ConnectionClosedError
|
||||
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield "".join((msg, "\n"))
|
||||
else:
|
||||
yield "".join((json.dumps({"chunk-stream": None}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map("".join, itertools.zip_longest(*args, fillvalue="")):
|
||||
yield "".join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def json_serialize(obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
raise TypeError("Type %s not serializeable" % type(obj))
|
||||
|
||||
|
||||
class StreamConnection(object):
|
||||
def __init__(self, reader, writer, timeout, max_chunk=DEFAULT_MAX_CHUNK):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.timeout = timeout
|
||||
self.max_chunk = max_chunk
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self.writer.get_extra_info("peername")
|
||||
|
||||
async def send_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg, default=json_serialize), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv_message(self):
|
||||
l = await self.recv()
|
||||
|
||||
m = json.loads(l)
|
||||
if not m:
|
||||
return m
|
||||
|
||||
if "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = await self.recv()
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
async def send(self, msg):
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv(self):
|
||||
if self.timeout < 0:
|
||||
line = await self.reader.readline()
|
||||
else:
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
|
||||
if not line:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line.rstrip()
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
|
||||
class WebsocketConnection(object):
|
||||
def __init__(self, socket, timeout):
|
||||
self.socket = socket
|
||||
self.timeout = timeout
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return ":".join(str(s) for s in self.socket.remote_address)
|
||||
|
||||
async def send_message(self, msg):
|
||||
await self.send(json.dumps(msg, default=json_serialize))
|
||||
|
||||
async def recv_message(self):
|
||||
m = await self.recv()
|
||||
return json.loads(m)
|
||||
|
||||
async def send(self, msg):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
await self.socket.send(msg)
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def recv(self):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
if self.timeout < 0:
|
||||
return await self.socket.recv()
|
||||
|
||||
try:
|
||||
return await asyncio.wait_for(self.socket.recv(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def close(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
@@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvokeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionClosedError(Exception):
|
||||
pass
|
||||
@@ -12,333 +12,241 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import multiprocessing
|
||||
import logging
|
||||
from .connection import StreamConnection, WebsocketConnection
|
||||
from .exceptions import ClientError, ServerError, ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class ClientLoggerAdapter(logging.LoggerAdapter):
|
||||
def process(self, msg, kwargs):
|
||||
return f"[Client {self.extra['address']}] {msg}", kwargs
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncServerConnection(object):
|
||||
# If a handler returns this object (e.g. `return self.NO_RESPONSE`), no
|
||||
# return message will be automatically be sent back to the client
|
||||
NO_RESPONSE = object()
|
||||
|
||||
def __init__(self, socket, proto_name, logger):
|
||||
self.socket = socket
|
||||
def __init__(self, reader, writer, proto_name, logger):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.proto_name = proto_name
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.handlers = {
|
||||
"ping": self.handle_ping,
|
||||
'chunk-stream': self.handle_chunk,
|
||||
'ping': self.handle_ping,
|
||||
}
|
||||
self.logger = ClientLoggerAdapter(
|
||||
logger,
|
||||
{
|
||||
"address": socket.address,
|
||||
},
|
||||
)
|
||||
self.client_headers = {}
|
||||
|
||||
async def close(self):
|
||||
await self.socket.close()
|
||||
|
||||
async def handle_headers(self, headers):
|
||||
return {}
|
||||
self.logger = logger
|
||||
|
||||
async def process_requests(self):
|
||||
try:
|
||||
self.logger.info("Client %r connected" % (self.socket.address,))
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
self.logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
# Read protocol and version
|
||||
client_protocol = await self.socket.recv()
|
||||
client_protocol = await self.reader.readline()
|
||||
if not client_protocol:
|
||||
return
|
||||
|
||||
(client_proto_name, client_proto_version) = client_protocol.split()
|
||||
(client_proto_name, client_proto_version) = client_protocol.decode('utf-8').rstrip().split()
|
||||
if client_proto_name != self.proto_name:
|
||||
self.logger.debug("Rejecting invalid protocol %s" % (self.proto_name))
|
||||
self.logger.debug('Rejecting invalid protocol %s' % (self.proto_name))
|
||||
return
|
||||
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split("."))
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split('.'))
|
||||
if not self.validate_proto_version():
|
||||
self.logger.debug(
|
||||
"Rejecting invalid protocol version %s" % (client_proto_version)
|
||||
)
|
||||
self.logger.debug('Rejecting invalid protocol version %s' % (client_proto_version))
|
||||
return
|
||||
|
||||
# Read headers
|
||||
self.client_headers = {}
|
||||
# Read headers. Currently, no headers are implemented, so look for
|
||||
# an empty line to signal the end of the headers
|
||||
while True:
|
||||
header = await self.socket.recv()
|
||||
if not header:
|
||||
# Empty line. End of headers
|
||||
break
|
||||
tag, value = header.split(":", 1)
|
||||
self.client_headers[tag.lower()] = value.strip()
|
||||
line = await self.reader.readline()
|
||||
if not line:
|
||||
return
|
||||
|
||||
if self.client_headers.get("needs-headers", "false") == "true":
|
||||
for k, v in (await self.handle_headers(self.client_headers)).items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
await self.socket.send("")
|
||||
line = line.decode('utf-8').rstrip()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Handle messages
|
||||
while True:
|
||||
d = await self.socket.recv_message()
|
||||
d = await self.read_message()
|
||||
if d is None:
|
||||
break
|
||||
try:
|
||||
response = await self.dispatch_message(d)
|
||||
except InvokeError as e:
|
||||
await self.socket.send_message(
|
||||
{"invoke-error": {"message": str(e)}}
|
||||
)
|
||||
break
|
||||
|
||||
if response is not self.NO_RESPONSE:
|
||||
await self.socket.send_message(response)
|
||||
|
||||
except ConnectionClosedError as e:
|
||||
self.logger.info(str(e))
|
||||
except (ClientError, ConnectionError) as e:
|
||||
await self.dispatch_message(d)
|
||||
await self.writer.drain()
|
||||
except ClientError as e:
|
||||
self.logger.error(str(e))
|
||||
finally:
|
||||
await self.close()
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug("Handling %s" % k)
|
||||
return await self.handlers[k](msg[k])
|
||||
self.logger.debug('Handling %s' % k)
|
||||
await self.handlers[k](msg[k])
|
||||
return
|
||||
|
||||
raise ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
async def handle_ping(self, request):
|
||||
return {"alive": True}
|
||||
def write_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode('utf-8'))
|
||||
|
||||
async def read_message(self):
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return None
|
||||
|
||||
class StreamServer(object):
|
||||
def __init__(self, handler, logger):
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.closed = False
|
||||
|
||||
async def handle_stream_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
socket = StreamConnection(reader, writer, -1)
|
||||
if self.closed:
|
||||
await socket.close()
|
||||
return
|
||||
|
||||
await self.handler(socket)
|
||||
|
||||
async def stop(self):
|
||||
self.closed = True
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_stream_client, self.host, self.port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
|
||||
class UnixStreamServer(StreamServer):
|
||||
def __init__(self, path, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.path = path
|
||||
|
||||
def start(self, loop):
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(self.path))
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_unix_server(
|
||||
self.handle_stream_client, os.path.basename(self.path)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
message = l.decode('utf-8')
|
||||
|
||||
self.logger.debug("Listening on %r" % self.path)
|
||||
self.address = "unix://%s" % os.path.abspath(self.path)
|
||||
return [self.server.wait_closed()]
|
||||
if not message.endswith('\n'):
|
||||
return None
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
return json.loads(message)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % message)
|
||||
raise e
|
||||
|
||||
def cleanup(self):
|
||||
os.unlink(self.path)
|
||||
async def handle_chunk(self, request):
|
||||
lines = []
|
||||
try:
|
||||
while True:
|
||||
l = await self.reader.readline()
|
||||
l = l.rstrip(b"\n").decode("utf-8")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
msg = json.loads(''.join(lines))
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % lines)
|
||||
raise e
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
if 'chunk-stream' in msg:
|
||||
raise ClientError("Nested chunks are not allowed")
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
await self.dispatch_message(msg)
|
||||
|
||||
self.server = loop.run_until_complete(
|
||||
websockets.server.serve(
|
||||
self.client_handler,
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "ws://[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "ws://%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
async def client_handler(self, websocket):
|
||||
socket = WebsocketConnection(websocket, -1)
|
||||
await self.handler(socket)
|
||||
async def handle_ping(self, request):
|
||||
response = {'alive': True}
|
||||
self.write_message(response)
|
||||
|
||||
|
||||
class AsyncServer(object):
|
||||
def __init__(self, logger):
|
||||
self._cleanup_socket = None
|
||||
self.logger = logger
|
||||
self.start = None
|
||||
self.address = None
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port):
|
||||
self.server = TCPStreamServer(host, port, self._client_handler, self.logger)
|
||||
def start_tcp():
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug('Listening on %r' % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
self.start = start_tcp
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
def cleanup():
|
||||
os.unlink(path)
|
||||
|
||||
def start_websocket_server(self, host, port):
|
||||
self.server = WebsocketsServer(host, port, self._client_handler, self.logger)
|
||||
def start_unix():
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
self.logger.debug('Listening on %r' % path)
|
||||
|
||||
self._cleanup_socket = cleanup
|
||||
self.address = "unix://%s" % os.path.abspath(path)
|
||||
|
||||
self.start = start_unix
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, reader, writer):
|
||||
pass
|
||||
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = self.accept_client(socket)
|
||||
client = self.accept_client(reader, writer)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
self.logger.error(
|
||||
"Error from client %s: %s" % (address, str(e)), exc_info=True
|
||||
)
|
||||
self.logger.error('Error from client: %s' % str(e), exc_info=True)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
self.logger.debug("Client %s disconnected", address)
|
||||
await socket.close()
|
||||
writer.close()
|
||||
self.logger.debug('Client disconnected')
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, socket):
|
||||
pass
|
||||
|
||||
async def stop(self):
|
||||
self.logger.debug("Stopping server")
|
||||
await self.server.stop()
|
||||
|
||||
def start(self):
|
||||
tasks = self.server.start(self.loop)
|
||||
self.address = self.server.address
|
||||
return tasks
|
||||
def run_loop_forever(self):
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def signal_handler(self):
|
||||
self.logger.debug("Got exit signal")
|
||||
self.loop.create_task(self.stop())
|
||||
self.loop.stop()
|
||||
|
||||
def _serve_forever(self, tasks):
|
||||
def _serve_forever(self):
|
||||
try:
|
||||
self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGINT, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGQUIT, self.signal_handler)
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
|
||||
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
self.run_loop_forever()
|
||||
self.server.close()
|
||||
|
||||
self.logger.debug("Server shutting down")
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.logger.debug('Server shutting down')
|
||||
finally:
|
||||
self.server.cleanup()
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
def serve_forever(self):
|
||||
"""
|
||||
Serve requests in the current process
|
||||
"""
|
||||
self._create_loop()
|
||||
tasks = self.start()
|
||||
self._serve_forever(tasks)
|
||||
self.loop.close()
|
||||
|
||||
def _create_loop(self):
|
||||
# Create loop and override any loop that may have existed in
|
||||
# a parent process. It is possible that the usecases of
|
||||
# serve_forever might be constrained enough to allow using
|
||||
# get_event_loop here, but better safe than sorry for now.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.start()
|
||||
self._serve_forever()
|
||||
|
||||
def serve_as_process(self, *, prefunc=None, args=(), log_level=None):
|
||||
def serve_as_process(self, *, prefunc=None, args=()):
|
||||
"""
|
||||
Serve requests in a child process
|
||||
"""
|
||||
|
||||
def run(queue):
|
||||
# Create loop and override any loop that may have existed
|
||||
# in a parent process. Without doing this and instead
|
||||
@@ -351,22 +259,18 @@ class AsyncServer(object):
|
||||
# more general, though, as any potential use of asyncio in
|
||||
# Cooker could create a loop that needs to replaced in this
|
||||
# new process.
|
||||
self._create_loop()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
try:
|
||||
self.address = None
|
||||
tasks = self.start()
|
||||
self.start()
|
||||
finally:
|
||||
# Always put the server address to wake up the parent task
|
||||
queue.put(self.address)
|
||||
queue.close()
|
||||
|
||||
if prefunc is not None:
|
||||
prefunc(self, *args)
|
||||
|
||||
if log_level is not None:
|
||||
self.logger.setLevel(log_level)
|
||||
|
||||
self._serve_forever(tasks)
|
||||
self._serve_forever()
|
||||
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
|
||||
@@ -344,7 +344,9 @@ def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
(_, mc, virtualfn) = virtualfn.split(':', 2)
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
@@ -367,7 +369,7 @@ def realfn2virtual(realfn, cls, mc):
|
||||
|
||||
def variant2virtual(realfn, variant):
|
||||
"""
|
||||
Convert a real filename + a variant to a virtual filename
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
@@ -512,11 +514,11 @@ class Cache(object):
|
||||
|
||||
return len(self.depends_cache)
|
||||
|
||||
def parse(self, filename, appends, layername):
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc, layername=layername)
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
variants = []
|
||||
# Process the "real" fn last so we can store variants list
|
||||
|
||||
@@ -62,7 +62,6 @@ def check_indent(codestr):
|
||||
modulecode_deps = {}
|
||||
|
||||
def add_module_functions(fn, functions, namespace):
|
||||
import os
|
||||
fstat = os.stat(fn)
|
||||
fixedhash = fn + ":" + str(fstat.st_size) + ":" + str(fstat.st_mtime)
|
||||
for f in functions:
|
||||
@@ -72,11 +71,6 @@ def add_module_functions(fn, functions, namespace):
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
@@ -87,14 +81,14 @@ def add_module_functions(fn, functions, namespace):
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy()]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, src, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4]]
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
@@ -117,22 +111,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -161,7 +154,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 12
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -175,8 +168,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -262,19 +255,19 @@ class PythonParser():
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].value)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant):
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].value.split())
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Constant):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].value))
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
@@ -282,8 +275,8 @@ class PythonParser():
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
value = node.args[0].value
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
@@ -293,8 +286,8 @@ class PythonParser():
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
self.var_execs.add(node.args[0].value)
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
|
||||
@@ -344,7 +337,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -353,7 +345,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
@@ -372,11 +363,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
|
||||
@@ -65,7 +65,7 @@ class Command:
|
||||
command = commandline.pop(0)
|
||||
|
||||
# Ensure cooker is ready for commands
|
||||
if command not in ["updateConfig", "setFeatures", "ping"]:
|
||||
if command != "updateConfig" and command != "setFeatures":
|
||||
try:
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
@@ -85,6 +85,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates_apply()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
@@ -108,6 +109,7 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self, _, process_server, halt):
|
||||
try:
|
||||
self.cooker.process_inotify_updates_apply()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
@@ -167,8 +169,6 @@ class CommandsSync:
|
||||
Allow a UI to check the server is still alive
|
||||
"""
|
||||
return "Still alive!"
|
||||
ping.needconfig = False
|
||||
ping.readonly = True
|
||||
|
||||
def stateShutdown(self, command, params):
|
||||
"""
|
||||
@@ -307,11 +307,6 @@ class CommandsSync:
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def revalidateCaches(self, command, params):
|
||||
"""Called by UI clients when metadata may have changed"""
|
||||
command.cooker.revalidateCaches()
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
@@ -420,30 +415,15 @@ class CommandsSync:
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
"""
|
||||
Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`).
|
||||
|
||||
Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes`
|
||||
|
||||
:param command: Internally used parameter.
|
||||
:param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed.
|
||||
:return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage`
|
||||
"""
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
def sortkey(x):
|
||||
vfn, _ = x
|
||||
realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn
|
||||
realfn, _, mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return (-command.cooker.collections[mc].calc_bbfile_priority(realfn)[0], vfn)
|
||||
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey))
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), key=sortkey))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
@@ -565,8 +545,8 @@ class CommandsSync:
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
virtualfn = params[0]
|
||||
(fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
fn = params[0]
|
||||
mc = bb.runqueue.mc_from_tid(fn)
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
@@ -581,7 +561,6 @@ class CommandsSync:
|
||||
appendfiles = command.cooker.collections[mc].get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
layername = command.cooker.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
@@ -589,10 +568,10 @@ class CommandsSync:
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc, layername)[cls]
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
envdata = command.cooker.databuilder.parseRecipe(virtualfn, appendfiles, layername)
|
||||
envdata = command.cooker.databuilder.parseRecipe(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
@@ -792,14 +771,7 @@ class CommandsAsync:
|
||||
(mc, pn) = bb.runqueue.split_mc(params[0])
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
bb.siggen.check_siggen_version(bb.siggen)
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc])
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc])
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
def getTaskSignatures(self, command, params):
|
||||
res = command.cooker.getTaskSignatures(params[0], params[1])
|
||||
bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
getTaskSignatures.needcache = True
|
||||
|
||||
@@ -17,11 +17,12 @@ import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
from collections import defaultdict, namedtuple
|
||||
import bb, bb.command
|
||||
import bb, bb.exceptions, bb.command
|
||||
from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build
|
||||
import queue
|
||||
import signal
|
||||
import prserv.serv
|
||||
import pyinotify
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
@@ -102,15 +103,12 @@ class CookerFeatures(object):
|
||||
|
||||
class EventWriter:
|
||||
def __init__(self, cooker, eventfile):
|
||||
self.file_inited = None
|
||||
self.cooker = cooker
|
||||
self.eventfile = eventfile
|
||||
self.event_queue = []
|
||||
|
||||
def write_variables(self):
|
||||
with open(self.eventfile, "a") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
def send(self, event):
|
||||
def write_event(self, event):
|
||||
with open(self.eventfile, "a") as f:
|
||||
try:
|
||||
str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8')
|
||||
@@ -120,6 +118,28 @@ class EventWriter:
|
||||
import traceback
|
||||
print(err, traceback.format_exc())
|
||||
|
||||
def send(self, event):
|
||||
if self.file_inited:
|
||||
# we have the file, just write the event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# init on bb.event.BuildStarted
|
||||
name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"):
|
||||
with open(self.eventfile, "w") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
self.file_inited = True
|
||||
|
||||
# write pending events
|
||||
for evt in self.event_queue:
|
||||
self.write_event(evt)
|
||||
|
||||
# also write the current event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# queue all events until the file is inited
|
||||
self.event_queue.append(event)
|
||||
|
||||
#============================================================================#
|
||||
# BBCooker
|
||||
@@ -131,11 +151,8 @@ class BBCooker:
|
||||
|
||||
def __init__(self, featureSet=None, server=None):
|
||||
self.recipecaches = None
|
||||
self.baseconfig_valid = False
|
||||
self.parsecache_valid = False
|
||||
self.eventlog = None
|
||||
# The skiplists, one per multiconfig
|
||||
self.skiplist_by_mc = defaultdict(dict)
|
||||
self.skiplist = {}
|
||||
self.featureset = CookerFeatures()
|
||||
if featureSet:
|
||||
for f in featureSet:
|
||||
@@ -154,9 +171,17 @@ class BBCooker:
|
||||
self.waitIdle = server.wait_for_idle
|
||||
|
||||
bb.debug(1, "BBCooker starting %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatched = {}
|
||||
self.parsewatched = {}
|
||||
self.configwatcher = None
|
||||
self.confignotifier = None
|
||||
|
||||
self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
|
||||
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
|
||||
|
||||
self.watcher = None
|
||||
self.notifier = None
|
||||
|
||||
# If being called by something like tinfoil, we need to clean cached data
|
||||
# which may now be invalid
|
||||
@@ -167,6 +192,8 @@ class BBCooker:
|
||||
self.hashserv = None
|
||||
self.hashservaddr = None
|
||||
|
||||
self.inotify_modified_files = []
|
||||
|
||||
# TOSTOP must not be set or our children will hang when they output
|
||||
try:
|
||||
fd = sys.stdout.fileno()
|
||||
@@ -190,37 +217,135 @@ class BBCooker:
|
||||
signal.signal(signal.SIGHUP, self.sigterm_exception)
|
||||
|
||||
bb.debug(1, "BBCooker startup complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.inotify_threadlock = threading.Lock()
|
||||
|
||||
def init_configdata(self):
|
||||
if not hasattr(self, "data"):
|
||||
self.initConfigurationData()
|
||||
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.handlePRServ()
|
||||
|
||||
def _baseconfig_set(self, value):
|
||||
if value and not self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config valid")
|
||||
elif not value and self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config invalidated")
|
||||
self.baseconfig_valid = value
|
||||
def setupConfigWatcher(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
if self.configwatcher:
|
||||
self.configwatcher.close()
|
||||
self.confignotifier = None
|
||||
self.configwatcher = None
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
self.configwatcher.bbseen = set()
|
||||
self.configwatcher.bbwatchedfiles = set()
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
|
||||
def _parsecache_set(self, value):
|
||||
if value and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache valid")
|
||||
elif not value and self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache invalidated")
|
||||
self.parsecache_valid = value
|
||||
def setupParserWatcher(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
if self.watcher:
|
||||
self.watcher.close()
|
||||
self.notifier = None
|
||||
self.watcher = None
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
self.watcher.bbseen = set()
|
||||
self.watcher.bbwatchedfiles = set()
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
def add_filewatch(self, deps, configwatcher=False):
|
||||
if configwatcher:
|
||||
watcher = self.configwatched
|
||||
else:
|
||||
watcher = self.parsewatched
|
||||
def process_inotify_updates(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
# read notified events and enqueue them
|
||||
n.read_events()
|
||||
|
||||
def process_inotify_updates_apply(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
n.read_events()
|
||||
n.process_events()
|
||||
|
||||
def config_notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
self.baseconfig_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if not event.pathname in self.configwatcher.bbwatchedfiles:
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.configwatcher.bbseen:
|
||||
self.configwatcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.baseconfig_valid = False
|
||||
|
||||
def notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if event.pathname.endswith("bitbake-cookerdaemon.log") \
|
||||
or event.pathname.endswith("bitbake.lock"):
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.watcher.bbseen:
|
||||
self.watcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def add_filewatch(self, deps, watcher=None, dirs=False):
|
||||
if not watcher:
|
||||
watcher = self.watcher
|
||||
for i in deps:
|
||||
f = i[0]
|
||||
mtime = i[1]
|
||||
watcher[f] = mtime
|
||||
watcher.bbwatchedfiles.add(i[0])
|
||||
if dirs:
|
||||
f = i[0]
|
||||
else:
|
||||
f = os.path.dirname(i[0])
|
||||
if f in watcher.bbseen:
|
||||
continue
|
||||
watcher.bbseen.add(f)
|
||||
watchtarget = None
|
||||
while True:
|
||||
# We try and add watches for files that don't exist but if they did, would influence
|
||||
# the parser. The parent directory of these files may not exist, in which case we need
|
||||
# to watch any parent that does exist for changes.
|
||||
try:
|
||||
watcher.add_watch(f, self.watchmask, quiet=False)
|
||||
if watchtarget:
|
||||
watcher.bbwatchedfiles.add(watchtarget)
|
||||
break
|
||||
except pyinotify.WatchManagerError as e:
|
||||
if 'ENOENT' in str(e):
|
||||
watchtarget = f
|
||||
f = os.path.dirname(f)
|
||||
if f in watcher.bbseen:
|
||||
break
|
||||
watcher.bbseen.add(f)
|
||||
continue
|
||||
if 'ENOSPC' in str(e):
|
||||
providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
|
||||
providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
|
||||
providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
|
||||
providerlog.error("Root privilege is required to modify max_user_watches.")
|
||||
raise
|
||||
|
||||
def handle_inotify_updates(self):
|
||||
# reload files for which we got notifications
|
||||
for p in self.inotify_modified_files:
|
||||
bb.parse.update_cache(p)
|
||||
if p in bb.parse.BBHandler.cached_statements:
|
||||
del bb.parse.BBHandler.cached_statements[p]
|
||||
self.inotify_modified_files = []
|
||||
|
||||
def sigterm_exception(self, signum, stackframe):
|
||||
if signum == signal.SIGTERM:
|
||||
@@ -251,7 +376,8 @@ class BBCooker:
|
||||
if mod not in self.orig_sysmodules:
|
||||
del sys.modules[mod]
|
||||
|
||||
self.configwatched = {}
|
||||
self.handle_inotify_updates()
|
||||
self.setupConfigWatcher()
|
||||
|
||||
# Need to preserve BB_CONSOLELOG over resets
|
||||
consolelog = None
|
||||
@@ -285,10 +411,6 @@ class BBCooker:
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
self.extraconfigdata = {}
|
||||
|
||||
eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
|
||||
if not self.configuration.writeeventlog and eventlog:
|
||||
self.setupEventLog(eventlog)
|
||||
|
||||
if consolelog:
|
||||
self.data.setVar("BB_CONSOLELOG", consolelog)
|
||||
|
||||
@@ -298,10 +420,10 @@ class BBCooker:
|
||||
self.disableDataTracking()
|
||||
|
||||
for mc in self.databuilder.mcdata.values():
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True)
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
|
||||
|
||||
self._baseconfig_set(True)
|
||||
self._parsecache_set(False)
|
||||
self.baseconfig_valid = True
|
||||
self.parsecache_valid = False
|
||||
|
||||
def handlePRServ(self):
|
||||
# Setup a PR Server based on the new configuration
|
||||
@@ -316,13 +438,13 @@ class BBCooker:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
with hashserv.create_client(upstream) as client:
|
||||
client.ping()
|
||||
except (ConnectionError, ImportError) as e:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
upstream = None
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
@@ -331,7 +453,7 @@ class BBCooker:
|
||||
sync=False,
|
||||
upstream=upstream,
|
||||
)
|
||||
self.hashserv.serve_as_process(log_level=logging.WARNING)
|
||||
self.hashserv.serve_as_process()
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
@@ -348,29 +470,6 @@ class BBCooker:
|
||||
if hasattr(self, "data"):
|
||||
self.data.disableTracking()
|
||||
|
||||
def revalidateCaches(self):
|
||||
bb.parse.clear_cache()
|
||||
|
||||
clean = True
|
||||
for f in self.configwatched:
|
||||
if not bb.parse.check_mtime(f, self.configwatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._baseconfig_set(False)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if clean:
|
||||
for f in self.parsewatched:
|
||||
if not bb.parse.check_mtime(f, self.parsewatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if not clean:
|
||||
bb.parse.BBHandler.cached_statements = {}
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
@@ -389,24 +488,8 @@ class BBCooker:
|
||||
self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
|
||||
|
||||
self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS"))
|
||||
self.collections = {}
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
|
||||
self._parsecache_set(False)
|
||||
|
||||
def setupEventLog(self, eventlog):
|
||||
if self.eventlog and self.eventlog[0] != eventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
self.eventlog = None
|
||||
if not self.eventlog or self.eventlog[0] != eventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
if not os.path.exists(os.path.dirname(eventlog)):
|
||||
bb.utils.mkdirhier(os.path.dirname(eventlog))
|
||||
writer = EventWriter(self, eventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def updateConfigOpts(self, options, environment, cmdline):
|
||||
self.ui_cmdline = cmdline
|
||||
@@ -427,7 +510,14 @@ class BBCooker:
|
||||
setattr(self.configuration, o, options[o])
|
||||
|
||||
if self.configuration.writeeventlog:
|
||||
self.setupEventLog(self.configuration.writeeventlog)
|
||||
if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
writer = EventWriter(self, self.configuration.writeeventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)))
|
||||
|
||||
bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel
|
||||
bb.msg.loggerDefaultDomains = self.configuration.debug_domains
|
||||
@@ -457,7 +547,6 @@ class BBCooker:
|
||||
# Now update all the variables not in the datastore to match
|
||||
self.configuration.env = environment
|
||||
|
||||
self.revalidateCaches()
|
||||
if not clean:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
@@ -535,14 +624,13 @@ class BBCooker:
|
||||
|
||||
if fn:
|
||||
try:
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn))
|
||||
except Exception as e:
|
||||
parselog.exception("Unable to read %s", fn)
|
||||
raise
|
||||
else:
|
||||
if not mc in self.databuilder.mcdata:
|
||||
bb.fatal('No multiconfig named "%s" found' % mc)
|
||||
bb.fatal('Not multiconfig named "%s" found' % mc)
|
||||
envdata = self.databuilder.mcdata[mc]
|
||||
data.expandKeys(envdata)
|
||||
parse.ast.runAnonFuncs(envdata)
|
||||
@@ -613,8 +701,8 @@ class BBCooker:
|
||||
localdata = {}
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete)
|
||||
localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc])
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
|
||||
bb.data.expandKeys(localdata[mc])
|
||||
|
||||
current = 0
|
||||
@@ -934,7 +1022,7 @@ class BBCooker:
|
||||
for mc in self.multiconfigs:
|
||||
# First get list of recipes, including skipped
|
||||
recipefns = list(self.recipecaches[mc].pkg_fn.keys())
|
||||
recipefns.extend(self.skiplist_by_mc[mc].keys())
|
||||
recipefns.extend(self.skiplist.keys())
|
||||
|
||||
# Work out list of bbappends that have been applied
|
||||
applied_appends = []
|
||||
@@ -1274,8 +1362,8 @@ class BBCooker:
|
||||
if bf.startswith("/") or bf.startswith("../"):
|
||||
bf = os.path.abspath(bf)
|
||||
|
||||
collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
self.collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
try:
|
||||
os.stat(bf)
|
||||
bf = os.path.abspath(bf)
|
||||
@@ -1341,8 +1429,7 @@ class BBCooker:
|
||||
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn))
|
||||
infos = dict(infos)
|
||||
|
||||
fn = bb.cache.realfn2virtual(fn, cls, mc)
|
||||
@@ -1387,8 +1474,6 @@ class BBCooker:
|
||||
buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME")
|
||||
if fireevents:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
# Execute the runqueue
|
||||
@@ -1424,7 +1509,7 @@ class BBCooker:
|
||||
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc])
|
||||
bb.event.disable_heartbeat()
|
||||
# We trashed self.recipecaches above
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
self.configuration.limited_deps = False
|
||||
bb.parse.siggen.reset(self.data)
|
||||
if quietlog:
|
||||
@@ -1436,36 +1521,6 @@ class BBCooker:
|
||||
|
||||
self.idleCallBackRegister(buildFileIdle, rq)
|
||||
|
||||
def getTaskSignatures(self, target, tasks):
|
||||
sig = []
|
||||
getAllTaskSignatures = False
|
||||
|
||||
if not tasks:
|
||||
tasks = ["do_build"]
|
||||
getAllTaskSignatures = True
|
||||
|
||||
for task in tasks:
|
||||
taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
|
||||
for l in runlist:
|
||||
mc, pn, taskname, fn = l
|
||||
|
||||
taskdep = rq.rqdata.dataCaches[mc].task_deps[fn]
|
||||
for t in taskdep['tasks']:
|
||||
if t in taskdep['nostamp'] or "setscene" in t:
|
||||
continue
|
||||
tid = bb.runqueue.build_tid(mc, fn, t)
|
||||
|
||||
if t in task or getAllTaskSignatures:
|
||||
try:
|
||||
sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
|
||||
except KeyError:
|
||||
sig.append(self.getTaskSignatures(target, [t])[0])
|
||||
|
||||
return sig
|
||||
|
||||
def buildTargets(self, targets, task):
|
||||
"""
|
||||
Attempt to build the targets specified
|
||||
@@ -1531,8 +1586,6 @@ class BBCooker:
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
@@ -1543,13 +1596,7 @@ class BBCooker:
|
||||
|
||||
|
||||
def getAllKeysWithFlags(self, flaglist):
|
||||
def dummy_autorev(d):
|
||||
return
|
||||
|
||||
dump = {}
|
||||
# Horrible but for now we need to avoid any sideeffects of autorev being called
|
||||
saved = bb.fetch2.get_autorev
|
||||
bb.fetch2.get_autorev = dummy_autorev
|
||||
for k in self.data.keys():
|
||||
try:
|
||||
expand = True
|
||||
@@ -1569,7 +1616,6 @@ class BBCooker:
|
||||
dump[k][d] = None
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bb.fetch2.get_autorev = saved
|
||||
return dump
|
||||
|
||||
|
||||
@@ -1577,6 +1623,8 @@ class BBCooker:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
self.handle_inotify_updates()
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
logger.debug("Reloading base configuration data")
|
||||
self.initConfigurationData()
|
||||
@@ -1597,8 +1645,7 @@ class BBCooker:
|
||||
self.updateCacheSync()
|
||||
|
||||
if self.state != state.parsing and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parsing started")
|
||||
self.parsewatched = {}
|
||||
self.setupParserWatcher()
|
||||
|
||||
bb.parse.siggen.reset(self.data)
|
||||
self.parseConfiguration ()
|
||||
@@ -1613,22 +1660,25 @@ class BBCooker:
|
||||
for dep in self.configuration.extra_assume_provided:
|
||||
self.recipecaches[mc].ignored_dependencies.add(dep)
|
||||
|
||||
self.collections = {}
|
||||
|
||||
mcfilelist = {}
|
||||
total_masked = 0
|
||||
searchdirs = set()
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
(filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
|
||||
mcfilelist[mc] = filelist
|
||||
total_masked += masked
|
||||
searchdirs |= set(search)
|
||||
|
||||
# Add mtimes for directories searched for bb/bbappend files
|
||||
# Add inotify watches for directories searched for bb/bbappend files
|
||||
for dirent in searchdirs:
|
||||
self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))])
|
||||
self.add_filewatch([[dirent]], dirs=True)
|
||||
|
||||
self.parser = CookerParser(self, mcfilelist, total_masked)
|
||||
self._parsecache_set(True)
|
||||
self.parsecache_valid = True
|
||||
|
||||
self.state = state.parsing
|
||||
|
||||
@@ -1746,7 +1796,8 @@ class BBCooker:
|
||||
self.data = self.databuilder.data
|
||||
# In theory tinfoil could have modified the base data before parsing,
|
||||
# ideally need to track if anything did modify the datastore
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
|
||||
|
||||
class CookerExit(bb.event.Event):
|
||||
"""
|
||||
@@ -1767,10 +1818,10 @@ class CookerCollectFiles(object):
|
||||
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
|
||||
|
||||
def calc_bbfile_priority(self, filename):
|
||||
for layername, _, regex, pri in self.bbfile_config_priorities:
|
||||
for _, _, regex, pri in self.bbfile_config_priorities:
|
||||
if regex.match(filename):
|
||||
return pri, regex, layername
|
||||
return 0, None, None
|
||||
return pri, regex
|
||||
return 0, None
|
||||
|
||||
def get_bbfiles(self):
|
||||
"""Get list of default .bb files by reading out the current directory"""
|
||||
@@ -1789,7 +1840,7 @@ class CookerCollectFiles(object):
|
||||
for ignored in ('SCCS', 'CVS', '.svn'):
|
||||
if ignored in dirs:
|
||||
dirs.remove(ignored)
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))]
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))]
|
||||
|
||||
return found
|
||||
|
||||
@@ -1812,7 +1863,7 @@ class CookerCollectFiles(object):
|
||||
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
# We need to track where we look so that we can know when the cache is invalid. There
|
||||
# We need to track where we look so that we can add inotify watches. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir()
|
||||
# (or os.scandir() for python 3.6+) calls while we run glob().
|
||||
origlistdir = os.listdir
|
||||
@@ -1943,7 +1994,7 @@ class CookerCollectFiles(object):
|
||||
# Calculate priorities for each file
|
||||
for p in pkgfns:
|
||||
realfn, cls, mc = bb.cache.virtualfn2realfn(p)
|
||||
priorities[p], regex, _ = self.calc_bbfile_priority(realfn)
|
||||
priorities[p], regex = self.calc_bbfile_priority(realfn)
|
||||
if regex in unmatched_regex:
|
||||
matched_regex.add(regex)
|
||||
unmatched_regex.remove(regex)
|
||||
@@ -2080,7 +2131,7 @@ class Parser(multiprocessing.Process):
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
|
||||
def parse(self, mc, cache, filename, appends, layername):
|
||||
def parse(self, mc, cache, filename, appends):
|
||||
try:
|
||||
origfilter = bb.event.LogHandler.filter
|
||||
# Record the filename we're parsing into any events generated
|
||||
@@ -2094,10 +2145,11 @@ class Parser(multiprocessing.Process):
|
||||
bb.event.set_class_handlers(self.handlers.copy())
|
||||
bb.event.LogHandler.filter = parse_filter
|
||||
|
||||
return True, mc, cache.parse(filename, appends, layername)
|
||||
return True, mc, cache.parse(filename, appends)
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
return True, None, exc
|
||||
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
|
||||
# and for example a worker thread doesn't just exit on its own in response to
|
||||
@@ -2133,11 +2185,10 @@ class CookerParser(object):
|
||||
for mc in self.cooker.multiconfigs:
|
||||
for filename in self.mcfilelist[mc]:
|
||||
appends = self.cooker.collections[mc].get_file_appends(filename)
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
if not self.bb_caches[mc].cacheValid(filename, appends):
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends))
|
||||
else:
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends))
|
||||
|
||||
self.total = len(self.fromcache) + len(self.willparse)
|
||||
self.toparse = len(self.willparse)
|
||||
@@ -2248,7 +2299,7 @@ class CookerParser(object):
|
||||
self.syncthread.join()
|
||||
|
||||
def load_cached(self):
|
||||
for mc, cache, filename, appends, layername in self.fromcache:
|
||||
for mc, cache, filename, appends in self.fromcache:
|
||||
infos = cache.loadCached(filename, appends)
|
||||
yield False, mc, infos
|
||||
|
||||
@@ -2298,12 +2349,8 @@ class CookerParser(object):
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
|
||||
exc_desc = str(exc)
|
||||
if isinstance(exc, SystemExit) and not isinstance(exc.code, str):
|
||||
exc_desc = 'Exited with "%d"' % exc.code
|
||||
|
||||
logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc))
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
@@ -2312,33 +2359,20 @@ class CookerParser(object):
|
||||
self.shutdown(clean=False, eventmsg=str(exc))
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
def skip_frames(f, fn_prefix):
|
||||
while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix):
|
||||
f = f.tb_next
|
||||
return f
|
||||
|
||||
self.error += 1
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
etype, value, tb = sys.exc_info()
|
||||
|
||||
# Remove any frames where the code comes from bitbake. This
|
||||
# prevents deep (and pretty useless) backtraces for expansion error
|
||||
tb = skip_frames(tb, bbdir)
|
||||
cur = tb
|
||||
while cur:
|
||||
cur.tb_next = skip_frames(cur.tb_next, bbdir)
|
||||
cur = cur.tb_next
|
||||
|
||||
etype, value, _ = sys.exc_info()
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
_, value, _ = sys.exc_info()
|
||||
etype, value, tb = sys.exc_info()
|
||||
if hasattr(value, "recipe"):
|
||||
logger.error('Unable to parse %s' % value.recipe,
|
||||
exc_info=sys.exc_info())
|
||||
exc_info=(etype, value, exc.traceback))
|
||||
else:
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
@@ -2359,7 +2393,7 @@ class CookerParser(object):
|
||||
for virtualfn, info_array in result:
|
||||
if info_array[0].skipped:
|
||||
self.skipped += 1
|
||||
self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0])
|
||||
self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
|
||||
self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
|
||||
parsed=parsed, watcher = self.cooker.add_filewatch)
|
||||
return True
|
||||
@@ -2368,10 +2402,9 @@ class CookerParser(object):
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
to_reparse = set()
|
||||
for mc in self.cooker.multiconfigs:
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername))
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename)))
|
||||
|
||||
for mc, filename, appends, layername in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends, layername)
|
||||
for mc, filename, appends in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends)
|
||||
for vfn, info_array in infos:
|
||||
self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)
|
||||
|
||||
@@ -494,19 +494,18 @@ class CookerDataBuilder(object):
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc, layername):
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
bb_data.setVar("FILE_LAYERNAME", layername)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
return bb_data
|
||||
|
||||
return bb.parse.handle(bbfile, bb_data)
|
||||
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None, layername=None):
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
@@ -516,31 +515,32 @@ class CookerDataBuilder(object):
|
||||
(bbfile, virtual, mc) = bb.cache.virtualfn2realfn(bbfile)
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
return datastores
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, '', layername)
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends)
|
||||
|
||||
for mc in self.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
def parseRecipe(self, virtualfn, appends, layername):
|
||||
def parseRecipe(self, virtualfn, appends):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
datastores = self.parseRecipeVariants(virtualfn, appends, virtonly=True, layername=layername)
|
||||
return datastores[virtual]
|
||||
bb_data = self.parseRecipeVariants(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
|
||||
@@ -285,7 +285,6 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
value = handle_contains(value, r2.contains, exclusions, d)
|
||||
return value
|
||||
|
||||
deps = set()
|
||||
@@ -293,7 +292,7 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
value = handle_contains("", moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
|
||||
@@ -16,10 +16,7 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import builtins
|
||||
import copy
|
||||
import re
|
||||
import sys
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
@@ -31,7 +28,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+}")
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
@@ -153,21 +150,19 @@ class VariableParse:
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
|
||||
class DataContext(dict):
|
||||
excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe'])
|
||||
|
||||
class DataContext(dict):
|
||||
def __init__(self, metadata, **kwargs):
|
||||
self.metadata = metadata
|
||||
dict.__init__(self, **kwargs)
|
||||
self['d'] = metadata
|
||||
self.context = set(bb.utils.get_context())
|
||||
|
||||
def __missing__(self, key):
|
||||
if key in self.excluded or key in self.context:
|
||||
# Skip commonly accessed invalid variables
|
||||
if key in ['bb', 'oe', 'int', 'bool', 'time', 'str', 'os']:
|
||||
raise KeyError(key)
|
||||
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None:
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
@@ -272,9 +267,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +331,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +385,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
@@ -580,9 +577,12 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
loginfo['op'] = keyword
|
||||
self.varhistory.record(**loginfo)
|
||||
# todo make sure keyword is not __doc__ or __module__
|
||||
# pay the cookie monster
|
||||
|
||||
# more cookies for the cookie monster
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
if ':' in var:
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
|
||||
if base in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
@@ -635,7 +635,6 @@ class DataSmart(MutableMapping):
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.overrides = None
|
||||
self.expand_cache = {}
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import bb.exceptions
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -256,15 +257,14 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
# handle string containing python code
|
||||
if isinstance(handler, str):
|
||||
tmp = "def %s(e, d):\n%s" % (name, handler)
|
||||
# Inject empty lines to make code match lineno in filename
|
||||
if lineno is not None:
|
||||
tmp = "\n" * (lineno-1) + tmp
|
||||
try:
|
||||
code = bb.methodpool.compile_cache(tmp)
|
||||
if not code:
|
||||
if filename is None:
|
||||
filename = "%s(e, d)" % name
|
||||
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
if lineno is not None:
|
||||
ast.increment_lineno(code, lineno-1)
|
||||
code = compile(code, filename, "exec")
|
||||
bb.methodpool.compile_cache_add(tmp, code)
|
||||
except SyntaxError:
|
||||
@@ -758,7 +758,13 @@ class LogHandler(logging.Handler):
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
@@ -851,14 +857,6 @@ class FindSigInfoResult(Event):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
class GetTaskSignatureResult(Event):
|
||||
"""
|
||||
Event to return results from GetTaskSignatures command
|
||||
"""
|
||||
def __init__(self, sig):
|
||||
Event.__init__(self)
|
||||
self.sig = sig
|
||||
|
||||
class ParseError(Event):
|
||||
"""
|
||||
Event to indicate parse failed
|
||||
|
||||
96
bitbake/lib/bb/exceptions.py
Normal file
96
bitbake/lib/bb/exceptions.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
|
||||
try:
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
# There have been reports from the field of python 2.6 which doesn't
|
||||
# return a namedtuple here but simply a tuple so fallback gracefully if
|
||||
# args isn't present.
|
||||
except AttributeError:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -290,12 +290,12 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) if kvdelim in x else (x, None) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
def _param_str_join(self, dict_, elmdelim, kvdelim="="):
|
||||
return elmdelim.join([kvdelim.join([k, v]) if v else k for k, v in dict_.items()])
|
||||
return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
|
||||
|
||||
@property
|
||||
def hostport(self):
|
||||
@@ -388,7 +388,7 @@ def decodeurl(url):
|
||||
if s:
|
||||
if not '=' in s:
|
||||
raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
|
||||
s1, s2 = s.split('=', 1)
|
||||
s1, s2 = s.split('=')
|
||||
p[s1] = s2
|
||||
|
||||
return type, host, urllib.parse.unquote(path), user, pswd, p
|
||||
@@ -499,30 +499,30 @@ def fetcher_init(d):
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
@@ -536,8 +536,8 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
with dict(bb.persist_data.persist('BB_URI_HEADREVS', d)) as headrevs:
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
mirrors = (data or "").replace('\\n',' ').split()
|
||||
@@ -753,7 +753,7 @@ def get_autorev(d):
|
||||
d.setVar("__BBAUTOREV_SEEN", True)
|
||||
return "AUTOINC"
|
||||
|
||||
def _get_srcrev(d, method_name='sortable_revision'):
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
@@ -774,7 +774,6 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
d.setVar("__BBINSRCREV", True)
|
||||
|
||||
scms = []
|
||||
revs = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
@@ -782,19 +781,16 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
scms.append(u)
|
||||
|
||||
if not scms:
|
||||
d.delVar("__BBINSRCREV")
|
||||
return "", revs
|
||||
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
revs.append(rev)
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
d.delVar("__BBINSRCREV")
|
||||
if autoinc:
|
||||
return "AUTOINC+" + rev, revs
|
||||
return rev, revs
|
||||
return "AUTOINC+" + rev
|
||||
return rev
|
||||
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
@@ -810,7 +806,6 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
ud = urldata[scm]
|
||||
for name in ud.names:
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
@@ -828,21 +823,7 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
d.delVar("__BBINSRCREV")
|
||||
return format, revs
|
||||
|
||||
def get_hashvalue(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return " ".join(revs)
|
||||
|
||||
def get_pkgv_string(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return pkgv
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
if not pkgv:
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
return pkgv
|
||||
return format
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
@@ -872,12 +853,8 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'AWS_PROFILE',
|
||||
'AWS_ACCESS_KEY_ID',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_ROLE_ARN',
|
||||
'AWS_WEB_IDENTITY_TOKEN_FILE',
|
||||
'AWS_DEFAULT_REGION',
|
||||
'AWS_SESSION_TOKEN',
|
||||
'GIT_CACHE_PATH',
|
||||
'REMOTE_CONTAINERS_IPC',
|
||||
'SSL_CERT_DIR']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
@@ -943,10 +920,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
elif e.stderr:
|
||||
output = "output:\n%s" % e.stderr
|
||||
else:
|
||||
if log:
|
||||
output = "see logfile for output"
|
||||
else:
|
||||
output = "no output"
|
||||
output = "no output"
|
||||
error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
|
||||
except bb.process.CmdError as e:
|
||||
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
|
||||
@@ -1118,8 +1092,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
try:
|
||||
if ud.method.cleanup_upon_failure():
|
||||
ud.method.clean(ud, ld)
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
return False
|
||||
@@ -1261,7 +1234,7 @@ def get_checksum_file_list(d):
|
||||
ud = fetch.ud[u]
|
||||
if ud and isinstance(ud.method, local.Local):
|
||||
found = False
|
||||
paths = ud.method.localfile_searchpaths(ud, d)
|
||||
paths = ud.method.localpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
@@ -1317,7 +1290,7 @@ class FetchData(object):
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
@@ -1429,9 +1402,6 @@ class FetchMethod(object):
|
||||
Is localpath something that can be represented by a checksum?
|
||||
"""
|
||||
|
||||
# We cannot compute checksums for None
|
||||
if urldata.localpath is None:
|
||||
return False
|
||||
# We cannot compute checksums for directories
|
||||
if os.path.isdir(urldata.localpath):
|
||||
return False
|
||||
@@ -1444,12 +1414,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
"""
|
||||
When a fetch fails, should clean() be called?
|
||||
"""
|
||||
return True
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
"""
|
||||
Verify the donestamp file
|
||||
@@ -1592,7 +1556,6 @@ class FetchMethod(object):
|
||||
unpackdir = rootdir
|
||||
|
||||
if not unpack or not cmd:
|
||||
urldata.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
# If file == dest, then avoid any copies, as we already put the file into dest!
|
||||
dest = os.path.join(unpackdir, os.path.basename(file))
|
||||
if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
|
||||
@@ -1607,8 +1570,6 @@ class FetchMethod(object):
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
@@ -1662,13 +1623,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
@@ -1700,55 +1661,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class DummyUnpackTracer(object):
|
||||
"""
|
||||
Abstract API definition for a class that traces unpacked source files back
|
||||
to their respective upstream SRC_URI entries, for software composition
|
||||
analysis, license compliance and detailed SBOM generation purposes.
|
||||
User may load their own unpack tracer class (instead of the dummy
|
||||
one) by setting the BB_UNPACK_TRACER_CLASS config parameter.
|
||||
"""
|
||||
def start(self, unpackdir, urldata_dict, d):
|
||||
"""
|
||||
Start tracing the core Fetch.unpack process, using an index to map
|
||||
unpacked files to each SRC_URI entry.
|
||||
This method is called by Fetch.unpack and it may receive nested calls by
|
||||
gitsm and npmsw fetchers, that expand SRC_URI entries by adding implicit
|
||||
URLs and by recursively calling Fetch.unpack from new (nested) Fetch
|
||||
instances.
|
||||
"""
|
||||
return
|
||||
def start_url(self, url):
|
||||
"""Start tracing url unpack process.
|
||||
This method is called by Fetch.unpack before the fetcher-specific unpack
|
||||
method starts, and it may receive nested calls by gitsm and npmsw
|
||||
fetchers.
|
||||
"""
|
||||
return
|
||||
def unpack(self, unpack_type, destdir):
|
||||
"""
|
||||
Set unpack_type and destdir for current url.
|
||||
This method is called by the fetcher-specific unpack method after url
|
||||
tracing started.
|
||||
"""
|
||||
return
|
||||
def finish_url(self, url):
|
||||
"""Finish tracing url unpack process and update the file index.
|
||||
This method is called by Fetch.unpack after the fetcher-specific unpack
|
||||
method finished its job, and it may receive nested calls by gitsm
|
||||
and npmsw fetchers.
|
||||
"""
|
||||
return
|
||||
def complete(self):
|
||||
"""
|
||||
Finish tracing the Fetch.unpack process, and check if all nested
|
||||
Fecth.unpack calls (if any) have been completed; if so, save collected
|
||||
metadata.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
if localonly and cache:
|
||||
@@ -1769,30 +1681,10 @@ class Fetch(object):
|
||||
if key in urldata_cache:
|
||||
self.ud = urldata_cache[key]
|
||||
|
||||
# the unpack_tracer object needs to be made available to possible nested
|
||||
# Fetch instances (when those are created by gitsm and npmsw fetchers)
|
||||
# so we set it as a global variable
|
||||
global unpack_tracer
|
||||
try:
|
||||
unpack_tracer
|
||||
except NameError:
|
||||
class_path = d.getVar("BB_UNPACK_TRACER_CLASS")
|
||||
if class_path:
|
||||
# use user-defined unpack tracer class
|
||||
import importlib
|
||||
module_name, _, class_name = class_path.rpartition(".")
|
||||
module = importlib.import_module(module_name)
|
||||
class_ = getattr(module, class_name)
|
||||
unpack_tracer = class_()
|
||||
else:
|
||||
# fall back to the dummy/abstract class
|
||||
unpack_tracer = DummyUnpackTracer()
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
try:
|
||||
self.ud[url] = FetchData(url, d, localonly)
|
||||
self.ud[url].unpack_tracer = unpack_tracer
|
||||
except NonLocalMethod:
|
||||
if localonly:
|
||||
self.ud[url] = None
|
||||
@@ -1895,7 +1787,7 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp and m.cleanup_upon_failure():
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
@@ -1958,7 +1850,7 @@ class Fetch(object):
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
raise FetchError("URL doesn't work", u)
|
||||
raise FetchError("URL %s doesn't work" % u, u)
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
@@ -1968,8 +1860,6 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
unpack_tracer.start(root, self.ud, self.d)
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1977,15 +1867,11 @@ class Fetch(object):
|
||||
if ud.lockfile:
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
|
||||
unpack_tracer.start_url(u)
|
||||
ud.method.unpack(ud, root, self.d)
|
||||
unpack_tracer.finish_url(u)
|
||||
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
unpack_tracer.complete()
|
||||
|
||||
def clean(self, urls=None):
|
||||
"""
|
||||
Clean files that the fetcher gets or places
|
||||
@@ -2087,7 +1973,6 @@ from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2109,4 +1994,3 @@ methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
|
||||
@@ -59,11 +59,11 @@ class Crate(Wget):
|
||||
# version is expected to be the last token
|
||||
# but ignore possible url parameters which will be used
|
||||
# by the top fetcher class
|
||||
version = parts[-1].split(";")[0]
|
||||
version, _, _ = parts[len(parts) -1].partition(";")
|
||||
# second to last field is name
|
||||
name = parts[-2]
|
||||
name = parts[len(parts) - 2]
|
||||
# host (this is to allow custom crate registries to be specified
|
||||
host = '/'.join(parts[2:-2])
|
||||
host = '/'.join(parts[2:len(parts) - 2])
|
||||
|
||||
# if using upstream just fix it up nicely
|
||||
if host == 'crates.io':
|
||||
@@ -98,13 +98,11 @@ class Crate(Wget):
|
||||
save_cwd = os.getcwd()
|
||||
os.chdir(rootdir)
|
||||
|
||||
bp = d.getVar('BP')
|
||||
if bp == ud.parm.get('name'):
|
||||
pn = d.getVar('BPN')
|
||||
if pn == ud.parm.get('name'):
|
||||
cmd = "tar -xz --no-same-owner -f %s" % thefile
|
||||
ud.unpack_tracer.unpack("crate-extract", rootdir)
|
||||
else:
|
||||
cargo_bitbake = self._cargo_bitbake_path(rootdir)
|
||||
ud.unpack_tracer.unpack("cargo-extract", cargo_bitbake)
|
||||
|
||||
cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake)
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Google Cloup Platform Storage.
|
||||
|
||||
Class for fetching files from Google Cloud Storage using the
|
||||
Google Cloud Storage Python Client. The GCS Python Client must
|
||||
be correctly installed, configured and authenticated prior to use.
|
||||
Additionally, gsutil must also be installed.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2023, Snap Inc.
|
||||
#
|
||||
# Based in part on bb.fetch2.s3:
|
||||
# Copyright (C) 2017 Andre McCurdy
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class GCP(FetchMethod):
|
||||
"""
|
||||
Class to fetch urls via GCP's Python API.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.gcp_client = None
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with GCP.
|
||||
"""
|
||||
return ud.type in ['gs']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.basecmd = "gsutil stat"
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
self.gcp_client = storage.Client(project=None)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
blob.download_to_filename(ud.localpath)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
# tool with a little healthy suspicion).
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError(f"The GCP API returned success for gs://{ud.host}{ud.path} but {ud.localpath} doesn't exist?!")
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError(f"The downloaded file for gs://{ud.host}{ud.path} resulted in a zero size file?! Deleting and failing since this isn't right.")
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL.
|
||||
"""
|
||||
logger.debug2(f"Checking status of gs://{ud.host}{ud.path}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
if self.gcp_client.bucket(ud.host).blob(path).exists() == False:
|
||||
raise FetchError(f"The GCP API reported that gs://{ud.host}{ud.path} does not exist")
|
||||
else:
|
||||
return True
|
||||
@@ -48,23 +48,10 @@ Supported SRC_URI options are:
|
||||
instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- subpath
|
||||
Limit the checkout to a specific subpath of the tree.
|
||||
By default, checkout the whole tree, set subpath=<path> if needed
|
||||
|
||||
- destsuffix
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is git/, set destsuffix=<suffix> if needed
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
- lfs
|
||||
Enable the checkout to use LFS for large files. This will download all LFS files
|
||||
in the download step, as the unpack step does not have network access.
|
||||
The default is "1", set lfs=0 to skip.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2005 Richard Purdie
|
||||
@@ -78,7 +65,6 @@ import fnmatch
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
@@ -87,7 +73,6 @@ from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import trusted_network
|
||||
|
||||
|
||||
sha1_re = re.compile(r'^[0-9a-f]{40}$')
|
||||
@@ -150,9 +135,6 @@ class Git(FetchMethod):
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init git specific variable within url data
|
||||
@@ -262,7 +244,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -277,7 +259,7 @@ class Git(FetchMethod):
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_'))
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_'))
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
@@ -328,10 +310,7 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) \
|
||||
or self.shallow_tarball_need_update(ud) \
|
||||
or self.tarball_need_update(ud) \
|
||||
or self.lfs_need_update(ud, d)
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
@@ -343,15 +322,6 @@ class Git(FetchMethod):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
for name in ud.names:
|
||||
if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
for rev in ud.shallow_revs:
|
||||
try:
|
||||
@@ -371,16 +341,6 @@ class Git(FetchMethod):
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
return True
|
||||
# If the url is not in trusted network, that is, BB_NO_NETWORK is set to 0
|
||||
# and BB_ALLOWED_NETWORKS does not contain the host that ud.url uses, then
|
||||
# we need to try premirrors first as using upstream is destined to fail.
|
||||
if not trusted_network(d, ud.url):
|
||||
return True
|
||||
# the following check is to ensure incremental fetch in downloads, this is
|
||||
# because the premirror might be old and does not contain the new rev required,
|
||||
# and this will cause a total removal and new clone. So if we can reach to
|
||||
# network, we prefer upstream over premirror, though the premirror might contain
|
||||
# the new rev.
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
return True
|
||||
@@ -401,40 +361,12 @@ class Git(FetchMethod):
|
||||
else:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir)
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if 'mirror' in output:
|
||||
runfetchcmd("%s remote rm mirror" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s remote add --mirror=fetch mirror %s" % (ud.basecmd, tmpdir), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --update-head-ok --progress mirror " % (ud.basecmd)
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s " % (ud.basecmd, shlex.quote(tmpdir))
|
||||
runfetchcmd(fetch_cmd, d, workdir=ud.clonedir)
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
needs_clone = False
|
||||
if os.path.exists(ud.clonedir):
|
||||
# The directory may exist, but not be the top level of a bare git
|
||||
# repository in which case it needs to be deleted and re-cloned.
|
||||
try:
|
||||
# Since clones can be bare, use --absolute-git-dir instead of --show-toplevel
|
||||
output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
toplevel = output.rstrip()
|
||||
|
||||
if not bb.utils.path_is_descendant(toplevel, ud.clonedir):
|
||||
logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir)
|
||||
needs_clone = True
|
||||
except bb.fetch2.FetchError as e:
|
||||
logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e)
|
||||
needs_clone = True
|
||||
except FileNotFoundError as e:
|
||||
logger.warning("%s", e)
|
||||
needs_clone = True
|
||||
|
||||
if needs_clone:
|
||||
shutil.rmtree(ud.clonedir)
|
||||
else:
|
||||
needs_clone = True
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if needs_clone:
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible,
|
||||
# but it doesn't work when git/objects is a symlink, only works when it is a directory.
|
||||
if repourl.startswith("file://"):
|
||||
@@ -482,7 +414,7 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self.lfs_need_update(ud, d):
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
#
|
||||
@@ -505,8 +437,8 @@ class Git(FetchMethod):
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(ud.destdir, ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/.git" % ud.destdir)
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
@@ -544,7 +476,7 @@ class Git(FetchMethod):
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d,
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
@@ -632,8 +564,6 @@ class Git(FetchMethod):
|
||||
destdir = ud.destdir = os.path.join(destdir, destsuffix)
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
if not ud.bareclone:
|
||||
ud.unpack_tracer.unpack("git", destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
|
||||
@@ -672,8 +602,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)" % (repourl))
|
||||
elif not need_lfs:
|
||||
bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
|
||||
else:
|
||||
runfetchcmd("%s lfs install --local" % ud.basecmd, d, workdir=destdir)
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
@@ -725,35 +653,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _lfs_objects_downloaded(self, ud, d, name, wd):
|
||||
"""
|
||||
Verifies whether the LFS objects for requested revisions have already been downloaded
|
||||
"""
|
||||
# Bail out early if this repository doesn't use LFS
|
||||
if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
|
||||
return True
|
||||
|
||||
# The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revisions[name])
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
return True
|
||||
|
||||
# Match all lines beginning with the hexadecimal OID
|
||||
oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
|
||||
for line in output.split("\n"):
|
||||
oid = re.search(oid_regex, line)
|
||||
if not oid:
|
||||
bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
|
||||
if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
@@ -762,11 +661,8 @@ class Git(FetchMethod):
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
"""
|
||||
|
||||
if ud.nobranch:
|
||||
# If no branch is specified, use the current git commit
|
||||
refname = self._build_revision(ud, d, ud.names[0])
|
||||
elif wd == ud.clonedir:
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
@@ -871,42 +767,38 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
return pupver
|
||||
|
||||
rev_tag_re = re.compile(r"([0-9a-f]{40})\s+refs/tags/(.*)")
|
||||
pver_re = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
nonrel_re = re.compile(r"(alpha|beta|rc|final)+")
|
||||
|
||||
verstring = ""
|
||||
revision = ""
|
||||
for line in output.split("\n"):
|
||||
if not line:
|
||||
break
|
||||
|
||||
m = rev_tag_re.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
(revision, tag) = m.groups()
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
if nonrel_re.search(tag):
|
||||
m = re.search(r"(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# search for version in the line
|
||||
m = pver_re.search(tag)
|
||||
if not m:
|
||||
tag = tagregex.search(tag_head)
|
||||
if tag is None:
|
||||
continue
|
||||
|
||||
pver = m.group('pver').replace("_", ".")
|
||||
tag = tag.group('pver')
|
||||
tag = tag.replace("_", ".")
|
||||
|
||||
if verstring and bb.utils.vercmp(("0", pver, ""), ("0", verstring, "")) < 0:
|
||||
if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
|
||||
continue
|
||||
|
||||
verstring = pver
|
||||
verstring = tag
|
||||
revision = line.split()[0]
|
||||
pupver = (verstring, revision)
|
||||
|
||||
return pupver
|
||||
@@ -926,8 +818,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -123,13 +123,6 @@ class GitSM(Git):
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % module
|
||||
url += ";nobranch=1"
|
||||
url += ";lfs=%s" % self._need_lfs(ud)
|
||||
# Note that adding "user=" here to give credentials to the
|
||||
# submodule is not supported. Since using SRC_URI to give git://
|
||||
# URL a password is not supported, one have to use one of the
|
||||
# recommended way (eg. ~/.netrc or SSH config) which does specify
|
||||
# the user (See comment in git.py).
|
||||
# So, we will not take patches adding "user=" support here.
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
@@ -147,19 +140,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and extra_check:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -177,7 +157,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -200,7 +188,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -214,10 +211,6 @@ class GitSM(Git):
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
# modpath is needed by unpack tracer to calculate submodule
|
||||
# checkout dir
|
||||
new_ud = newfetch.ud[url]
|
||||
new_ud.modpath = modpath
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
@@ -243,12 +236,10 @@ class GitSM(Git):
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# All submodules should already be downloaded and configured in the tree. This simply
|
||||
# sets up the configuration and checks out the files. The main project config should
|
||||
# remain unmodified, and no download from the internet should occur. As such, lfs smudge
|
||||
# should also be skipped as these files were already smudged in the fetch stage if lfs
|
||||
# was enabled.
|
||||
runfetchcmd("GIT_LFS_SKIP_SMUDGE=1 %s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
# All submodules should already be downloaded and configured in the tree. This simply sets
|
||||
# up the configuration and checks out the files. The main project config should remain
|
||||
# unmodified, and no download from the internet should occur.
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
def implicit_urldata(self, ud, d):
|
||||
import shutil, subprocess, tempfile
|
||||
@@ -259,6 +250,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -242,7 +242,6 @@ class Hg(FetchMethod):
|
||||
revflag = "-r %s" % ud.revision
|
||||
subdir = ud.parm.get("destsuffix", ud.module)
|
||||
codir = "%s/%s" % (destdir, subdir)
|
||||
ud.unpack_tracer.unpack("hg", codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
|
||||
@@ -41,9 +41,9 @@ class Local(FetchMethod):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
return self.localfile_searchpaths(urldata, d)[-1]
|
||||
return self.localpaths(urldata, d)[-1]
|
||||
|
||||
def localfile_searchpaths(self, urldata, d):
|
||||
def localpaths(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
@@ -51,13 +51,11 @@ class Local(FetchMethod):
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
logger.debug2("Using absolute %s" % (path))
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
logger.debug2("Using %s for %s" % (newpath, path))
|
||||
searched.extend(hist)
|
||||
return searched
|
||||
|
||||
|
||||
@@ -44,12 +44,9 @@ def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
if package.startswith("@"):
|
||||
return re.sub("/", "-", package[1:])
|
||||
return package
|
||||
|
||||
def npm_filename(package, version):
|
||||
"""Get the filename of a npm package"""
|
||||
@@ -106,7 +103,6 @@ class NpmEnvironment(object):
|
||||
"""Run npm command in a controlled environment"""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("PATH", d.getVar("PATH")) # PATH might contain $HOME - evaluate it before patching
|
||||
d.setVar("HOME", tmpdir)
|
||||
|
||||
if not workdir:
|
||||
@@ -298,7 +294,6 @@ class Npm(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix", "npm")
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
npm_unpack(ud.localpath, destdir, d)
|
||||
ud.unpack_tracer.unpack("npm", destdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
"""Clean any existing full or partial download"""
|
||||
|
||||
@@ -41,9 +41,8 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
with:
|
||||
name = the package name (string)
|
||||
params = the package parameters (dictionary)
|
||||
destdir = the destination of the package (string)
|
||||
deptree = the package dependency tree (array of strings)
|
||||
"""
|
||||
# For handling old style dependencies entries in shinkwrap files
|
||||
def _walk_deps(deps, deptree):
|
||||
for name in deps:
|
||||
subtree = [*deptree, name]
|
||||
@@ -53,22 +52,9 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
continue
|
||||
elif deps[name].get("bundled", False):
|
||||
continue
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in subtree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
callback(name, deps[name], destsuffix)
|
||||
callback(name, deps[name], subtree)
|
||||
|
||||
# packages entry means new style shrinkwrap file, else use dependencies
|
||||
packages = shrinkwrap.get("packages", None)
|
||||
if packages is not None:
|
||||
for package in packages:
|
||||
if package != "":
|
||||
name = package.split('node_modules/')[-1]
|
||||
package_infos = packages.get(package, {})
|
||||
if dev == False and package_infos.get("dev", False):
|
||||
continue
|
||||
callback(name, package_infos, package)
|
||||
else:
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
|
||||
class NpmShrinkWrap(FetchMethod):
|
||||
"""Class to fetch all package from a shrinkwrap file"""
|
||||
@@ -89,10 +75,12 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# Resolve the dependencies
|
||||
ud.deps = []
|
||||
|
||||
def _resolve_dependency(name, params, destsuffix):
|
||||
def _resolve_dependency(name, params, deptree):
|
||||
url = None
|
||||
localpath = None
|
||||
extrapaths = []
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity", None)
|
||||
@@ -191,9 +179,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
else:
|
||||
raise ParameterError("Unsupported dependency: %s" % name, ud.url)
|
||||
|
||||
# name is needed by unpack tracer for module mapping
|
||||
ud.deps.append({
|
||||
"name": name,
|
||||
"url": url,
|
||||
"localpath": localpath,
|
||||
"extrapaths": extrapaths,
|
||||
@@ -227,15 +213,13 @@ class NpmShrinkWrap(FetchMethod):
|
||||
@staticmethod
|
||||
def _foreach_proxy_method(ud, handle):
|
||||
returns = []
|
||||
#Check if there are dependencies before try to fetch them
|
||||
if len(ud.deps) > 0:
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
@@ -272,7 +256,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
ud.unpack_tracer.unpack("npm-shrinkwrap", destdir)
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
bb.utils.copyfile(ud.shrinkwrap_file,
|
||||
|
||||
@@ -87,10 +87,7 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 100"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
@@ -108,8 +105,7 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
@@ -129,21 +125,12 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri)
|
||||
|
||||
if os.path.getsize(localpath) == 0:
|
||||
os.remove(localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
@@ -153,6 +140,15 @@ class Wget(FetchMethod):
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
@@ -371,7 +367,7 @@ class Wget(FetchMethod):
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
if try_again:
|
||||
@@ -379,7 +375,7 @@ class Wget(FetchMethod):
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed for %s: %s" % (uri,e))
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -217,9 +217,7 @@ def create_bitbake_parser():
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means recursively compare the dumped signature with the most recent"
|
||||
" one in a local build or sstate cache (can be used to find out why tasks re-run"
|
||||
" when that is not expected)")
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
exec_group.add_argument("--revisions-changed", action="store_true",
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
|
||||
@@ -234,10 +234,9 @@ class diskMonitor:
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out.
|
||||
# This is reported by the inode count being 0 (btrfs) or the free
|
||||
# inode count being -1 (cephfs).
|
||||
if st.f_files == 0 or st.f_favail == -1:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
if st.f_files == 0:
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
|
||||
@@ -89,6 +89,10 @@ class BBLogFormatter(logging.Formatter):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
|
||||
def colorize(self, record):
|
||||
@@ -226,7 +230,7 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers
|
||||
console = logging.StreamHandler(output)
|
||||
console.addFilter(bb.msg.LogFilterShowOnce())
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty() and os.environ.get('NO_COLOR', '') == ''):
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
|
||||
@@ -49,32 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def check_mtime(f, mtime):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = current_mtime
|
||||
except OSError:
|
||||
current_mtime = 0
|
||||
return current_mtime == mtime
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
|
||||
@@ -211,12 +211,10 @@ class ExportFuncsNode(AstNode):
|
||||
|
||||
def eval(self, data):
|
||||
|
||||
sentinel = " # Export function set\n"
|
||||
for func in self.n:
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
basevar = data.getVar(func, False)
|
||||
if basevar and sentinel not in basevar:
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
continue
|
||||
|
||||
if data.getVar(func, False):
|
||||
@@ -233,11 +231,12 @@ class ExportFuncsNode(AstNode):
|
||||
data.setVarFlag(func, "lineno", 1)
|
||||
|
||||
if data.getVarFlag(calledfunc, "python", False):
|
||||
data.setVar(func, sentinel + " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
else:
|
||||
if "-" in self.classname:
|
||||
bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
data.setVar(func, " " + calledfunc + "\n", parsing=True)
|
||||
data.setVarFlag(func, 'export_func', '1')
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
@@ -314,16 +313,6 @@ class InheritNode(AstNode):
|
||||
def eval(self, data):
|
||||
bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data)
|
||||
|
||||
class InheritDeferredNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.inherit = (classes, filename, lineno)
|
||||
|
||||
def eval(self, data):
|
||||
inherits = data.getVar('__BBDEFINHERITS', False) or []
|
||||
inherits.append(self.inherit)
|
||||
data.setVar('__BBDEFINHERITS', inherits)
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
@@ -374,10 +363,6 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def handleInheritDeferred(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritDeferredNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
@@ -444,14 +429,6 @@ def multi_finalize(fn, d):
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
while True:
|
||||
inherits = d.getVar('__BBDEFINHERITS', False) or []
|
||||
if not inherits:
|
||||
break
|
||||
inherit, filename, lineno = inherits.pop(0)
|
||||
d.setVar('__BBDEFINHERITS', inherits)
|
||||
bb.parse.BBHandler.inherit(inherit, filename, lineno, d, deferred=True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
safe_d = d
|
||||
|
||||
@@ -21,7 +21,6 @@ from .ConfHandler import include, init
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
@@ -34,7 +33,6 @@ __infunc__ = []
|
||||
__inpython__ = False
|
||||
__body__ = []
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
@@ -42,10 +40,8 @@ def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
|
||||
def inherit(files, fn, lineno, d, deferred=False):
|
||||
def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
#if "${" in files and not deferred:
|
||||
# bb.warn("%s:%s has non deferred conditional inherit" % (fn, lineno))
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
classtype = d.getVar("__bbclasstype", False)
|
||||
@@ -81,7 +77,7 @@ def inherit(files, fn, lineno, d, deferred=False):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements, __residue__, __body__
|
||||
global cached_statements
|
||||
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
@@ -101,11 +97,6 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
|
||||
if __residue__:
|
||||
raise ParseError("Unparsed lines %s: %s" % (filename, str(__residue__)), filename, lineno)
|
||||
if __body__:
|
||||
raise ParseError("Unparsed lines from unclosed function %s: %s" % (filename, str(__body__)), filename, lineno)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
return statements
|
||||
@@ -274,11 +265,6 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
ast.handleInherit(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __inherit_def_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInheritDeferred(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
return ConfHandler.feeder(lineno, s, fn, statements, conffile=False)
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
@@ -154,7 +154,6 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
self.connection.close()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
|
||||
@@ -14,7 +14,6 @@ import os
|
||||
import sys
|
||||
import stat
|
||||
import errno
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import bb
|
||||
@@ -158,7 +157,7 @@ class RunQueueScheduler(object):
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
self.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False)
|
||||
if tid in self.rq.runq_buildable:
|
||||
self.buildable.add(tid)
|
||||
self.buildable.append(tid)
|
||||
|
||||
self.rev_prio_map = None
|
||||
self.is_pressure_usable()
|
||||
@@ -201,36 +200,19 @@ class RunQueueScheduler(object):
|
||||
curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
|
||||
now = time.time()
|
||||
tdiff = now - self.prev_pressure_time
|
||||
psi_accumulation_interval = 1.0
|
||||
cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff
|
||||
io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff
|
||||
memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff
|
||||
exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure
|
||||
exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure
|
||||
exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure
|
||||
|
||||
if tdiff > psi_accumulation_interval:
|
||||
if tdiff > 1.0:
|
||||
exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff > self.rq.max_cpu_pressure
|
||||
exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff > self.rq.max_io_pressure
|
||||
exceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff > self.rq.max_memory_pressure
|
||||
self.prev_cpu_pressure = curr_cpu_pressure
|
||||
self.prev_io_pressure = curr_io_pressure
|
||||
self.prev_memory_pressure = curr_memory_pressure
|
||||
self.prev_pressure_time = now
|
||||
|
||||
pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure)
|
||||
pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure)
|
||||
if hasattr(self, "pressure_state") and pressure_state != self.pressure_state:
|
||||
bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)))
|
||||
self.pressure_state = pressure_state
|
||||
else:
|
||||
exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) > self.rq.max_cpu_pressure
|
||||
exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_pressure)) > self.rq.max_io_pressure
|
||||
exceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self.prev_memory_pressure)) > self.rq.max_memory_pressure
|
||||
return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure)
|
||||
elif self.rq.max_loadfactor:
|
||||
limit = False
|
||||
loadfactor = float(os.getloadavg()[0]) / os.cpu_count()
|
||||
# bb.warn("Comparing %s to %s" % (loadfactor, self.rq.max_loadfactor))
|
||||
if loadfactor > self.rq.max_loadfactor:
|
||||
limit = True
|
||||
if hasattr(self, "loadfactor_limit") and limit != self.loadfactor_limit:
|
||||
bb.note("Load average limiting set to %s as load average: %s - using %s/%s bitbake threads" % (limit, loadfactor, len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks))
|
||||
self.loadfactor_limit = limit
|
||||
return limit
|
||||
return False
|
||||
|
||||
def next_buildable_task(self):
|
||||
@@ -281,11 +263,11 @@ class RunQueueScheduler(object):
|
||||
best = None
|
||||
bestprio = None
|
||||
for tid in buildable:
|
||||
taskname = taskname_from_tid(tid)
|
||||
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
|
||||
continue
|
||||
prio = self.rev_prio_map[tid]
|
||||
if bestprio is None or bestprio > prio:
|
||||
taskname = taskname_from_tid(tid)
|
||||
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
|
||||
continue
|
||||
stamp = self.stamps[tid]
|
||||
if stamp in self.rq.build_stamps.values():
|
||||
continue
|
||||
@@ -1015,32 +997,25 @@ class RunQueueData:
|
||||
# Handle --runall
|
||||
if self.cooker.configuration.runall:
|
||||
# re-run the mark_active and then drop unused tasks from new list
|
||||
reduced_tasklist = set(self.runtaskentries.keys())
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
reduced_tasklist.remove(tid)
|
||||
runq_build = {}
|
||||
|
||||
runall_tids = set()
|
||||
added = True
|
||||
while added:
|
||||
reduced_tasklist = set(self.runtaskentries.keys())
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
reduced_tasklist.remove(tid)
|
||||
runq_build = {}
|
||||
|
||||
orig = runall_tids
|
||||
for task in self.cooker.configuration.runall:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
runall_tids = set()
|
||||
for task in self.cooker.configuration.runall:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
for tid in reduced_tasklist:
|
||||
wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
|
||||
if wanttid in self.runtaskentries:
|
||||
runall_tids.add(wanttid)
|
||||
for tid in reduced_tasklist:
|
||||
wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
|
||||
if wanttid in self.runtaskentries:
|
||||
runall_tids.add(wanttid)
|
||||
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid, 1)
|
||||
self.target_tids.append(tid)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
added = runall_tids - orig
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid, 1)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
|
||||
delcount = set()
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
@@ -1274,41 +1249,27 @@ class RunQueueData:
|
||||
|
||||
bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
|
||||
|
||||
starttime = time.time()
|
||||
lasttime = starttime
|
||||
|
||||
# Iterate over the task list and call into the siggen code
|
||||
dealtwith = set()
|
||||
todeal = set(self.runtaskentries)
|
||||
while todeal:
|
||||
ready = set()
|
||||
for tid in todeal.copy():
|
||||
if not (self.runtaskentries[tid].depends - dealtwith):
|
||||
self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
# get_taskhash for a given tid *must* be called before get_unihash* below
|
||||
self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
ready.add(tid)
|
||||
unihashes = bb.parse.siggen.get_unihashes(ready)
|
||||
for tid in ready:
|
||||
dealtwith.add(tid)
|
||||
todeal.remove(tid)
|
||||
self.runtaskentries[tid].unihash = unihashes[tid]
|
||||
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
if time.time() > (lasttime + 30):
|
||||
lasttime = time.time()
|
||||
hashequiv_logger.verbose("Initial setup loop progress: %s of %s in %s" % (len(todeal), len(self.runtaskentries), lasttime - starttime))
|
||||
|
||||
endtime = time.time()
|
||||
if (endtime-starttime > 60):
|
||||
hashequiv_logger.verbose("Initial setup loop took: %s" % (endtime-starttime))
|
||||
dealtwith.add(tid)
|
||||
todeal.remove(tid)
|
||||
self.prepare_task_hash(tid)
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
bb.parse.siggen.writeout_file_checksum_cache()
|
||||
|
||||
#self.dump_data()
|
||||
return len(self.runtaskentries)
|
||||
|
||||
def prepare_task_hash(self, tid):
|
||||
bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
@@ -1350,36 +1311,24 @@ class RunQueue:
|
||||
self.worker = {}
|
||||
self.fakeworker = {}
|
||||
|
||||
@staticmethod
|
||||
def send_pickled_data(worker, data, name):
|
||||
msg = bytearray()
|
||||
msg.extend(b"<" + name.encode() + b">")
|
||||
pickled_data = pickle.dumps(data)
|
||||
msg.extend(len(pickled_data).to_bytes(4, 'big'))
|
||||
msg.extend(pickled_data)
|
||||
msg.extend(b"</" + name.encode() + b">")
|
||||
worker.stdin.write(msg)
|
||||
|
||||
def _start_worker(self, mc, fakeroot = False, rqexec = None):
|
||||
logger.debug("Starting bitbake-worker")
|
||||
magic = "decafbad"
|
||||
if self.cooker.configuration.profile:
|
||||
magic = "decafbadbad"
|
||||
fakerootlogs = None
|
||||
|
||||
workerscript = os.path.realpath(os.path.dirname(__file__) + "/../../bin/bitbake-worker")
|
||||
if fakeroot:
|
||||
magic = magic + "beef"
|
||||
mcdata = self.cooker.databuilder.mcdata[mc]
|
||||
fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
|
||||
fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
|
||||
env = os.environ.copy()
|
||||
for key, value in (var.split('=',1) for var in fakerootenv):
|
||||
for key, value in (var.split('=') for var in fakerootenv):
|
||||
env[key] = value
|
||||
worker = subprocess.Popen(fakerootcmd + [sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
|
||||
worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
|
||||
fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
|
||||
else:
|
||||
worker = subprocess.Popen([sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
bb.utils.nonblockingfd(worker.stdout)
|
||||
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
|
||||
|
||||
@@ -1397,9 +1346,9 @@ class RunQueue:
|
||||
"umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
|
||||
}
|
||||
|
||||
RunQueue.send_pickled_data(worker, self.cooker.configuration, "cookerconfig")
|
||||
RunQueue.send_pickled_data(worker, self.cooker.extraconfigdata, "extraconfigdata")
|
||||
RunQueue.send_pickled_data(worker, workerdata, "workerdata")
|
||||
worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
|
||||
worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
|
||||
worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
|
||||
worker.stdin.flush()
|
||||
|
||||
return RunQueueWorker(worker, workerpipe)
|
||||
@@ -1409,7 +1358,7 @@ class RunQueue:
|
||||
return
|
||||
logger.debug("Teardown for bitbake-worker")
|
||||
try:
|
||||
RunQueue.send_pickled_data(worker.process, b"", "quit")
|
||||
worker.process.stdin.write(b"<quit></quit>")
|
||||
worker.process.stdin.flush()
|
||||
worker.process.stdin.close()
|
||||
except IOError:
|
||||
@@ -1421,12 +1370,12 @@ class RunQueue:
|
||||
continue
|
||||
worker.pipe.close()
|
||||
|
||||
def start_worker(self, rqexec):
|
||||
def start_worker(self):
|
||||
if self.worker:
|
||||
self.teardown_workers()
|
||||
self.teardown = False
|
||||
for mc in self.rqdata.dataCaches:
|
||||
self.worker[mc] = self._start_worker(mc, False, rqexec)
|
||||
self.worker[mc] = self._start_worker(mc)
|
||||
|
||||
def start_fakeworker(self, rqexec, mc):
|
||||
if not mc in self.fakeworker:
|
||||
@@ -1586,9 +1535,6 @@ class RunQueue:
|
||||
('bb.event.HeartbeatEvent',), data=self.cfgData)
|
||||
self.dm_event_handler_registered = True
|
||||
|
||||
self.rqdata.init_progress_reporter.next_stage()
|
||||
self.rqexe = RunQueueExecute(self)
|
||||
|
||||
dump = self.cooker.configuration.dump_signatures
|
||||
if dump:
|
||||
self.rqdata.init_progress_reporter.finish()
|
||||
@@ -1600,8 +1546,10 @@ class RunQueue:
|
||||
self.state = runQueueComplete
|
||||
|
||||
if self.state is runQueueSceneInit:
|
||||
self.start_worker(self.rqexe)
|
||||
self.rqdata.init_progress_reporter.finish()
|
||||
self.rqdata.init_progress_reporter.next_stage()
|
||||
self.start_worker()
|
||||
self.rqdata.init_progress_reporter.next_stage()
|
||||
self.rqexe = RunQueueExecute(self)
|
||||
|
||||
# If we don't have any setscene functions, skip execution
|
||||
if not self.rqdata.runq_setscene_tids:
|
||||
@@ -1716,17 +1664,6 @@ class RunQueue:
|
||||
return
|
||||
|
||||
def print_diffscenetasks(self):
|
||||
def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid):
|
||||
invalidtasks = []
|
||||
for t in taskdepends[task].depends:
|
||||
if t not in valid and t not in visited_invalid:
|
||||
invalidtasks.extend(get_root_invalid_tasks(t, taskdepends, valid, noexec, visited_invalid))
|
||||
visited_invalid.add(t)
|
||||
|
||||
direct_invalid = [t for t in taskdepends[task].depends if t not in valid]
|
||||
if not direct_invalid and task not in noexec:
|
||||
invalidtasks = [task]
|
||||
return invalidtasks
|
||||
|
||||
noexec = []
|
||||
tocheck = set()
|
||||
@@ -1760,49 +1697,46 @@ class RunQueue:
|
||||
valid_new.add(dep)
|
||||
|
||||
invalidtasks = set()
|
||||
for tid in self.rqdata.runtaskentries:
|
||||
if tid not in valid_new and tid not in noexec:
|
||||
invalidtasks.add(tid)
|
||||
|
||||
toptasks = set(["{}:{}".format(t[3], t[2]) for t in self.rqdata.targets])
|
||||
for tid in toptasks:
|
||||
found = set()
|
||||
processed = set()
|
||||
for tid in invalidtasks:
|
||||
toprocess = set([tid])
|
||||
while toprocess:
|
||||
next = set()
|
||||
visited_invalid = set()
|
||||
for t in toprocess:
|
||||
if t not in valid_new and t not in noexec:
|
||||
invalidtasks.update(get_root_invalid_tasks(t, self.rqdata.runtaskentries, valid_new, noexec, visited_invalid))
|
||||
continue
|
||||
if t in self.rqdata.runq_setscene_tids:
|
||||
for dep in self.rqexe.sqdata.sq_deps[t]:
|
||||
next.add(dep)
|
||||
continue
|
||||
|
||||
for dep in self.rqdata.runtaskentries[t].depends:
|
||||
next.add(dep)
|
||||
|
||||
if dep in invalidtasks:
|
||||
found.add(tid)
|
||||
if dep not in processed:
|
||||
processed.add(dep)
|
||||
next.add(dep)
|
||||
toprocess = next
|
||||
if tid in found:
|
||||
toprocess = set()
|
||||
|
||||
tasklist = []
|
||||
for tid in invalidtasks:
|
||||
for tid in invalidtasks.difference(found):
|
||||
tasklist.append(tid)
|
||||
|
||||
if tasklist:
|
||||
bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
|
||||
|
||||
return invalidtasks
|
||||
return invalidtasks.difference(found)
|
||||
|
||||
def write_diffscenetasks(self, invalidtasks):
|
||||
bb.siggen.check_siggen_version(bb.siggen)
|
||||
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
bb.debug(1, "Recursively looking for recipe {} hashes {}".format(key, hashes))
|
||||
hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
|
||||
bb.debug(1, "Found hashfiles:\n{}".format(hashfiles))
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 2:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb)
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
|
||||
recout.extend(list(' ' + l for l in out2))
|
||||
else:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
@@ -1813,25 +1747,20 @@ class RunQueue:
|
||||
for tid in invalidtasks:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
h = self.rqdata.runtaskentries[tid].unihash
|
||||
bb.debug(1, "Looking for recipe {} task {}".format(pn, taskname))
|
||||
h = self.rqdata.runtaskentries[tid].hash
|
||||
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc])
|
||||
bb.debug(1, "Found hashfiles:\n{}".format(matches))
|
||||
match = None
|
||||
for m in matches.values():
|
||||
if h in m['path']:
|
||||
match = m['path']
|
||||
for m in matches:
|
||||
if h in m:
|
||||
match = m
|
||||
if match is None:
|
||||
bb.fatal("Can't find a task we're supposed to have written out? (hash: %s tid: %s)?" % (h, tid))
|
||||
bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
|
||||
matches = {k : v for k, v in iter(matches.items()) if h not in k}
|
||||
matches_local = {k : v for k, v in iter(matches.items()) if h not in k and not v['sstate']}
|
||||
if matches_local:
|
||||
matches = matches_local
|
||||
if matches:
|
||||
latestmatch = matches[sorted(matches.keys(), key=lambda h: matches[h]['time'])[-1]]['path']
|
||||
latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
|
||||
prevh = __find_sha256__.search(latestmatch).group(0)
|
||||
output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
|
||||
bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, most recent matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
|
||||
bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
|
||||
|
||||
|
||||
class RunQueueExecute:
|
||||
@@ -1847,7 +1776,6 @@ class RunQueueExecute:
|
||||
self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
|
||||
self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
|
||||
self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
|
||||
self.max_loadfactor = self.cfgData.getVar("BB_LOADFACTOR_MAX")
|
||||
|
||||
self.sq_buildable = set()
|
||||
self.sq_running = set()
|
||||
@@ -1865,8 +1793,6 @@ class RunQueueExecute:
|
||||
self.build_stamps2 = []
|
||||
self.failed_tids = []
|
||||
self.sq_deferred = {}
|
||||
self.sq_needed_harddeps = set()
|
||||
self.sq_harddep_deferred = set()
|
||||
|
||||
self.stampcache = {}
|
||||
|
||||
@@ -1876,6 +1802,11 @@ class RunQueueExecute:
|
||||
|
||||
self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids))
|
||||
|
||||
for mc in rq.worker:
|
||||
rq.worker[mc].pipe.setrunqueueexec(self)
|
||||
for mc in rq.fakeworker:
|
||||
rq.fakeworker[mc].pipe.setrunqueueexec(self)
|
||||
|
||||
if self.number_tasks <= 0:
|
||||
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
|
||||
|
||||
@@ -1901,11 +1832,6 @@ class RunQueueExecute:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit))
|
||||
if self.max_memory_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
|
||||
|
||||
if self.max_loadfactor:
|
||||
self.max_loadfactor = float(self.max_loadfactor)
|
||||
if self.max_loadfactor <= 0:
|
||||
bb.fatal("Invalid BB_LOADFACTOR_MAX %s, needs to be greater than zero." % (self.max_loadfactor))
|
||||
|
||||
# List of setscene tasks which we've covered
|
||||
self.scenequeue_covered = set()
|
||||
@@ -1916,6 +1842,11 @@ class RunQueueExecute:
|
||||
self.tasks_notcovered = set()
|
||||
self.scenequeue_notneeded = set()
|
||||
|
||||
# We can't skip specified target tasks which aren't setscene tasks
|
||||
self.cantskip = set(self.rqdata.target_tids)
|
||||
self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
|
||||
self.cantskip.intersection_update(self.rqdata.runtaskentries)
|
||||
|
||||
schedulers = self.get_schedulers()
|
||||
for scheduler in schedulers:
|
||||
if self.scheduler == scheduler.name:
|
||||
@@ -1928,25 +1859,7 @@ class RunQueueExecute:
|
||||
|
||||
#if self.rqdata.runq_setscene_tids:
|
||||
self.sqdata = SQData()
|
||||
build_scenequeue_data(self.sqdata, self.rqdata, self)
|
||||
|
||||
update_scenequeue_data(self.sqdata.sq_revdeps, self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=True)
|
||||
|
||||
# Compute a list of 'stale' sstate tasks where the current hash does not match the one
|
||||
# in any stamp files. Pass the list out to metadata as an event.
|
||||
found = {}
|
||||
for tid in self.rqdata.runq_setscene_tids:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
stamps = bb.build.find_stale_stamps(taskname, taskfn)
|
||||
if stamps:
|
||||
if mc not in found:
|
||||
found[mc] = {}
|
||||
found[mc][tid] = stamps
|
||||
for mc in found:
|
||||
event = bb.event.StaleSetSceneTasks(found[mc])
|
||||
bb.event.fire(event, self.cooker.databuilder.mcdata[mc])
|
||||
|
||||
self.build_taskdepdata_cache()
|
||||
build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
|
||||
|
||||
def runqueue_process_waitpid(self, task, status, fakerootlog=None):
|
||||
|
||||
@@ -1972,14 +1885,14 @@ class RunQueueExecute:
|
||||
def finish_now(self):
|
||||
for mc in self.rq.worker:
|
||||
try:
|
||||
RunQueue.send_pickled_data(self.rq.worker[mc].process, b"", "finishnow")
|
||||
self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
|
||||
self.rq.worker[mc].process.stdin.flush()
|
||||
except IOError:
|
||||
# worker must have died?
|
||||
pass
|
||||
for mc in self.rq.fakeworker:
|
||||
try:
|
||||
RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, b"", "finishnow")
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
|
||||
self.rq.fakeworker[mc].process.stdin.flush()
|
||||
except IOError:
|
||||
# worker must have died?
|
||||
@@ -2078,19 +1991,11 @@ class RunQueueExecute:
|
||||
self.setbuildable(revdep)
|
||||
logger.debug("Marking task %s as buildable", revdep)
|
||||
|
||||
found = None
|
||||
for t in sorted(self.sq_deferred.copy()):
|
||||
for t in self.sq_deferred.copy():
|
||||
if self.sq_deferred[t] == task:
|
||||
# Allow the next deferred task to run. Any other deferred tasks should be deferred after that task.
|
||||
# We shouldn't allow all to run at once as it is prone to races.
|
||||
if not found:
|
||||
bb.debug(1, "Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
found = t
|
||||
else:
|
||||
bb.debug(1, "Deferring %s after %s" % (t, found))
|
||||
self.sq_deferred[t] = found
|
||||
logger.debug2("Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
def task_complete(self, task):
|
||||
self.stats.taskCompleted()
|
||||
@@ -2190,24 +2095,13 @@ class RunQueueExecute:
|
||||
if not hasattr(self, "sorted_setscene_tids"):
|
||||
# Don't want to sort this set every execution
|
||||
self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
|
||||
# Resume looping where we left off when we returned to feed the mainloop
|
||||
self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids)
|
||||
|
||||
task = None
|
||||
if not self.sqdone and self.can_start_task():
|
||||
loopcount = 0
|
||||
# Find the next setscene to run, exit the loop when we've processed all tids or found something to execute
|
||||
while loopcount < len(self.rqdata.runq_setscene_tids):
|
||||
loopcount += 1
|
||||
nexttask = next(self.setscene_tids_generator)
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred:
|
||||
if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
# Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds
|
||||
continue
|
||||
if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \
|
||||
nexttask not in self.sq_needed_harddeps and \
|
||||
self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \
|
||||
self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
# Find the next setscene to run
|
||||
for nexttask in self.sorted_setscene_tids:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
|
||||
if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
if nexttask not in self.rqdata.target_tids:
|
||||
logger.debug2("Skipping setscene for task %s" % nexttask)
|
||||
self.sq_task_skip(nexttask)
|
||||
@@ -2215,25 +2109,13 @@ class RunQueueExecute:
|
||||
if nexttask in self.sq_deferred:
|
||||
del self.sq_deferred[nexttask]
|
||||
return True
|
||||
if nexttask in self.sqdata.sq_harddeps_rev and not self.sqdata.sq_harddeps_rev[nexttask].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
logger.debug2("Deferring %s due to hard dependencies" % nexttask)
|
||||
updated = False
|
||||
for dep in self.sqdata.sq_harddeps_rev[nexttask]:
|
||||
if dep not in self.sq_needed_harddeps:
|
||||
logger.debug2("Enabling task %s as it is a hard dependency" % dep)
|
||||
self.sq_buildable.add(dep)
|
||||
self.sq_needed_harddeps.add(dep)
|
||||
updated = True
|
||||
self.sq_harddep_deferred.add(nexttask)
|
||||
if updated:
|
||||
return True
|
||||
continue
|
||||
# If covered tasks are running, need to wait for them to complete
|
||||
for t in self.sqdata.sq_covered_tasks[nexttask]:
|
||||
if t in self.runq_running and t not in self.runq_complete:
|
||||
continue
|
||||
if nexttask in self.sq_deferred:
|
||||
# Deferred tasks that were still deferred were skipped above so we now need to process
|
||||
if self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
continue
|
||||
logger.debug("Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
@@ -2276,7 +2158,6 @@ class RunQueueExecute:
|
||||
bb.event.fire(startevent, self.cfgData)
|
||||
|
||||
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
realfn = bb.cache.virtualfn2realfn(taskfn)[0]
|
||||
runtask = {
|
||||
'fn' : taskfn,
|
||||
'task' : task,
|
||||
@@ -2285,7 +2166,6 @@ class RunQueueExecute:
|
||||
'unihash' : self.rqdata.get_task_unihash(task),
|
||||
'quieterrors' : True,
|
||||
'appends' : self.cooker.collections[mc].get_file_appends(taskfn),
|
||||
'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2],
|
||||
'taskdepdata' : self.sq_build_taskdepdata(task),
|
||||
'dry_run' : False,
|
||||
'taskdep': taskdep,
|
||||
@@ -2297,10 +2177,10 @@ class RunQueueExecute:
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
|
||||
if not mc in self.rq.fakeworker:
|
||||
self.rq.start_fakeworker(self, mc)
|
||||
RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask")
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps(runtask) + b"</runtask>")
|
||||
self.rq.fakeworker[mc].process.stdin.flush()
|
||||
else:
|
||||
RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask")
|
||||
self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps(runtask) + b"</runtask>")
|
||||
self.rq.worker[mc].process.stdin.flush()
|
||||
|
||||
self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False)
|
||||
@@ -2371,7 +2251,6 @@ class RunQueueExecute:
|
||||
bb.event.fire(startevent, self.cfgData)
|
||||
|
||||
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
realfn = bb.cache.virtualfn2realfn(taskfn)[0]
|
||||
runtask = {
|
||||
'fn' : taskfn,
|
||||
'task' : task,
|
||||
@@ -2380,7 +2259,6 @@ class RunQueueExecute:
|
||||
'unihash' : self.rqdata.get_task_unihash(task),
|
||||
'quieterrors' : False,
|
||||
'appends' : self.cooker.collections[mc].get_file_appends(taskfn),
|
||||
'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2],
|
||||
'taskdepdata' : self.build_taskdepdata(task),
|
||||
'dry_run' : self.rqdata.setscene_enforce,
|
||||
'taskdep': taskdep,
|
||||
@@ -2398,10 +2276,10 @@ class RunQueueExecute:
|
||||
self.rq.state = runQueueFailed
|
||||
self.stats.taskFailed()
|
||||
return True
|
||||
RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask")
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps(runtask) + b"</runtask>")
|
||||
self.rq.fakeworker[mc].process.stdin.flush()
|
||||
else:
|
||||
RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask")
|
||||
self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps(runtask) + b"</runtask>")
|
||||
self.rq.worker[mc].process.stdin.flush()
|
||||
|
||||
self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False)
|
||||
@@ -2455,25 +2333,6 @@ class RunQueueExecute:
|
||||
ret.add(dep)
|
||||
return ret
|
||||
|
||||
# Build the individual cache entries in advance once to save time
|
||||
def build_taskdepdata_cache(self):
|
||||
taskdepdata_cache = {}
|
||||
for task in self.rqdata.runtaskentries:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
|
||||
taskdepdata_cache[task] = bb.TaskData(
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
|
||||
taskname = taskname,
|
||||
fn = fn,
|
||||
deps = self.filtermcdeps(task, mc, self.rqdata.runtaskentries[task].depends),
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
|
||||
taskhash = self.rqdata.runtaskentries[task].hash,
|
||||
unihash = self.rqdata.runtaskentries[task].unihash,
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
|
||||
taskhash_deps = self.rqdata.runtaskentries[task].taskhash_deps,
|
||||
)
|
||||
|
||||
self.taskdepdata_cache = taskdepdata_cache
|
||||
|
||||
# We filter out multiconfig dependencies from taskdepdata we pass to the tasks
|
||||
# as most code can't handle them
|
||||
def build_taskdepdata(self, task):
|
||||
@@ -2485,11 +2344,15 @@ class RunQueueExecute:
|
||||
while next:
|
||||
additional = []
|
||||
for revdep in next:
|
||||
self.taskdepdata_cache[revdep] = self.taskdepdata_cache[revdep]._replace(
|
||||
unihash=self.rqdata.runtaskentries[revdep].unihash
|
||||
)
|
||||
taskdepdata[revdep] = self.taskdepdata_cache[revdep]
|
||||
for revdep2 in self.taskdepdata_cache[revdep].deps:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
deps = self.rqdata.runtaskentries[revdep].depends
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash
|
||||
unihash = self.rqdata.runtaskentries[revdep].unihash
|
||||
deps = self.filtermcdeps(task, mc, deps)
|
||||
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
|
||||
for revdep2 in deps:
|
||||
if revdep2 not in taskdepdata:
|
||||
additional.append(revdep2)
|
||||
next = additional
|
||||
@@ -2503,7 +2366,7 @@ class RunQueueExecute:
|
||||
return
|
||||
|
||||
notcovered = set(self.scenequeue_notcovered)
|
||||
notcovered |= self.sqdata.cantskip
|
||||
notcovered |= self.cantskip
|
||||
for tid in self.scenequeue_notcovered:
|
||||
notcovered |= self.sqdata.sq_covered_tasks[tid]
|
||||
notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
|
||||
@@ -2583,28 +2446,17 @@ class RunQueueExecute:
|
||||
elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
|
||||
next.add(p)
|
||||
|
||||
starttime = time.time()
|
||||
lasttime = starttime
|
||||
|
||||
# When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
|
||||
while next:
|
||||
current = next.copy()
|
||||
next = set()
|
||||
ready = {}
|
||||
for tid in current:
|
||||
if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
|
||||
continue
|
||||
# get_taskhash for a given tid *must* be called before get_unihash* below
|
||||
ready[tid] = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches)
|
||||
|
||||
unihashes = bb.parse.siggen.get_unihashes(ready.keys())
|
||||
|
||||
for tid in ready:
|
||||
orighash = self.rqdata.runtaskentries[tid].hash
|
||||
newhash = ready[tid]
|
||||
newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches)
|
||||
origuni = self.rqdata.runtaskentries[tid].unihash
|
||||
newuni = unihashes[tid]
|
||||
|
||||
newuni = bb.parse.siggen.get_unihash(tid)
|
||||
# FIXME, need to check it can come from sstate at all for determinism?
|
||||
remapped = False
|
||||
if newuni == origuni:
|
||||
@@ -2625,21 +2477,12 @@ class RunQueueExecute:
|
||||
next |= self.rqdata.runtaskentries[tid].revdeps
|
||||
total.remove(tid)
|
||||
next.intersection_update(total)
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
if time.time() > (lasttime + 30):
|
||||
lasttime = time.time()
|
||||
hashequiv_logger.verbose("Rehash loop slow progress: %s in %s" % (len(total), lasttime - starttime))
|
||||
|
||||
endtime = time.time()
|
||||
if (endtime-starttime > 60):
|
||||
hashequiv_logger.verbose("Rehash loop took more than 60s: %s" % (endtime-starttime))
|
||||
|
||||
if changed:
|
||||
for mc in self.rq.worker:
|
||||
RunQueue.send_pickled_data(self.rq.worker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes")
|
||||
self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
|
||||
for mc in self.rq.fakeworker:
|
||||
RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes")
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
|
||||
|
||||
hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed)))
|
||||
|
||||
@@ -2709,8 +2552,8 @@ class RunQueueExecute:
|
||||
update_tasks2 = []
|
||||
for tid in update_tasks:
|
||||
harddepfail = False
|
||||
for t in self.sqdata.sq_harddeps_rev[tid]:
|
||||
if t in self.scenequeue_notcovered:
|
||||
for t in self.sqdata.sq_harddeps:
|
||||
if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
|
||||
harddepfail = True
|
||||
break
|
||||
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
@@ -2742,14 +2585,12 @@ class RunQueueExecute:
|
||||
|
||||
if changed:
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.sq_needed_harddeps = set()
|
||||
self.sq_harddep_deferred = set()
|
||||
self.holdoff_need_update = True
|
||||
|
||||
def scenequeue_updatecounters(self, task, fail=False):
|
||||
|
||||
if fail and task in self.sqdata.sq_harddeps:
|
||||
for dep in sorted(self.sqdata.sq_harddeps[task]):
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
|
||||
if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
|
||||
# dependency could be already processed, e.g. noexec setscene task
|
||||
continue
|
||||
@@ -2759,12 +2600,7 @@ class RunQueueExecute:
|
||||
logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
self.sq_task_failoutright(dep)
|
||||
continue
|
||||
|
||||
# For performance, only compute allcovered once if needed
|
||||
if self.sqdata.sq_deps[task]:
|
||||
allcovered = self.scenequeue_covered | self.scenequeue_notcovered
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(allcovered):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if dep not in self.sq_buildable:
|
||||
self.sq_buildable.add(dep)
|
||||
|
||||
@@ -2782,13 +2618,6 @@ class RunQueueExecute:
|
||||
new.add(dep)
|
||||
next = new
|
||||
|
||||
# If this task was one which other setscene tasks have a hard dependency upon, we need
|
||||
# to walk through the hard dependencies and allow execution of those which have completed dependencies.
|
||||
if task in self.sqdata.sq_harddeps:
|
||||
for dep in self.sq_harddep_deferred.copy():
|
||||
if self.sqdata.sq_harddeps_rev[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
self.sq_harddep_deferred.remove(dep)
|
||||
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.holdoff_need_update = True
|
||||
|
||||
@@ -2857,19 +2686,12 @@ class RunQueueExecute:
|
||||
additional = []
|
||||
for revdep in next:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
deps = getsetscenedeps(revdep)
|
||||
|
||||
taskdepdata[revdep] = bb.TaskData(
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
|
||||
taskname = taskname,
|
||||
fn = fn,
|
||||
deps = deps,
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash,
|
||||
unihash = self.rqdata.runtaskentries[revdep].unihash,
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
|
||||
taskhash_deps = self.rqdata.runtaskentries[revdep].taskhash_deps,
|
||||
)
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash
|
||||
unihash = self.rqdata.runtaskentries[revdep].unihash
|
||||
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
|
||||
for revdep2 in deps:
|
||||
if revdep2 not in taskdepdata:
|
||||
additional.append(revdep2)
|
||||
@@ -2913,7 +2735,6 @@ class SQData(object):
|
||||
self.sq_revdeps = {}
|
||||
# Injected inter-setscene task dependencies
|
||||
self.sq_harddeps = {}
|
||||
self.sq_harddeps_rev = {}
|
||||
# Cache of stamp files so duplicates can't run in parallel
|
||||
self.stamps = {}
|
||||
# Setscene tasks directly depended upon by the build
|
||||
@@ -2923,17 +2744,12 @@ class SQData(object):
|
||||
# A list of normal tasks a setscene task covers
|
||||
self.sq_covered_tasks = {}
|
||||
|
||||
def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
|
||||
sq_revdeps = {}
|
||||
sq_revdeps_squash = {}
|
||||
sq_collated_deps = {}
|
||||
|
||||
# We can't skip specified target tasks which aren't setscene tasks
|
||||
sqdata.cantskip = set(rqdata.target_tids)
|
||||
sqdata.cantskip.difference_update(rqdata.runq_setscene_tids)
|
||||
sqdata.cantskip.intersection_update(rqdata.runtaskentries)
|
||||
|
||||
# We need to construct a dependency graph for the setscene functions. Intermediate
|
||||
# dependencies between the setscene tasks only complicate the code. This code
|
||||
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
|
||||
@@ -3002,7 +2818,7 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
for tid in rqdata.runtaskentries:
|
||||
if not rqdata.runtaskentries[tid].revdeps:
|
||||
sqdata.unskippable.add(tid)
|
||||
sqdata.unskippable |= sqdata.cantskip
|
||||
sqdata.unskippable |= sqrq.cantskip
|
||||
while new:
|
||||
new = False
|
||||
orig = sqdata.unskippable.copy()
|
||||
@@ -3041,7 +2857,6 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
idepends = rqdata.taskData[mc].taskentries[realtid].idepends
|
||||
sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False)
|
||||
|
||||
sqdata.sq_harddeps_rev[tid] = set()
|
||||
for (depname, idependtask) in idepends:
|
||||
|
||||
if depname not in rqdata.taskData[mc].build_targets:
|
||||
@@ -3054,15 +2869,20 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
if deptid not in rqdata.runtaskentries:
|
||||
bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
|
||||
|
||||
logger.debug2("Adding hard setscene dependency %s for %s" % (deptid, tid))
|
||||
|
||||
if not deptid in sqdata.sq_harddeps:
|
||||
sqdata.sq_harddeps[deptid] = set()
|
||||
sqdata.sq_harddeps[deptid].add(tid)
|
||||
sqdata.sq_harddeps_rev[tid].add(deptid)
|
||||
|
||||
sq_revdeps_squash[tid].add(deptid)
|
||||
# Have to zero this to avoid circular dependencies
|
||||
sq_revdeps_squash[deptid] = set()
|
||||
|
||||
rqdata.init_progress_reporter.next_stage()
|
||||
|
||||
for task in sqdata.sq_harddeps:
|
||||
for dep in sqdata.sq_harddeps[task]:
|
||||
sq_revdeps_squash[dep].add(task)
|
||||
|
||||
rqdata.init_progress_reporter.next_stage()
|
||||
|
||||
#for tid in sq_revdeps_squash:
|
||||
@@ -3089,7 +2909,7 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
if not sqdata.sq_revdeps[tid]:
|
||||
sqrq.sq_buildable.add(tid)
|
||||
|
||||
rqdata.init_progress_reporter.next_stage()
|
||||
rqdata.init_progress_reporter.finish()
|
||||
|
||||
sqdata.noexec = set()
|
||||
sqdata.stamppresent = set()
|
||||
@@ -3106,7 +2926,23 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
|
||||
|
||||
# Compute a list of 'stale' sstate tasks where the current hash does not match the one
|
||||
# in any stamp files. Pass the list out to metadata as an event.
|
||||
found = {}
|
||||
for tid in rqdata.runq_setscene_tids:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
stamps = bb.build.find_stale_stamps(taskname, taskfn)
|
||||
if stamps:
|
||||
if mc not in found:
|
||||
found[mc] = {}
|
||||
found[mc][tid] = stamps
|
||||
for mc in found:
|
||||
event = bb.event.StaleSetSceneTasks(found[mc])
|
||||
bb.event.fire(event, cooker.databuilder.mcdata[mc])
|
||||
|
||||
def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False):
|
||||
|
||||
@@ -3302,12 +3138,15 @@ class runQueuePipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.d = d
|
||||
self.rq = rq
|
||||
self.rqexec = rqexec
|
||||
self.fakerootlogs = fakerootlogs
|
||||
|
||||
def setrunqueueexec(self, rqexec):
|
||||
self.rqexec = rqexec
|
||||
|
||||
def read(self):
|
||||
for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
|
||||
for worker in workers.values():
|
||||
@@ -3318,7 +3157,7 @@ class runQueuePipe():
|
||||
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
@@ -38,13 +38,9 @@ logger = logging.getLogger('BitBake')
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
|
||||
def currenttime():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def serverlog(msg):
|
||||
print(str(os.getpid()) + " " + currenttime() + " " + msg)
|
||||
#Seems a flush here triggers filesytem sync like behaviour and long hangs in the server
|
||||
#sys.stdout.flush()
|
||||
print(str(os.getpid()) + " " + datetime.datetime.now().strftime('%H:%M:%S.%f') + " " + msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
#
|
||||
# When we have lockfile issues, try and find infomation about which process is
|
||||
@@ -293,9 +289,7 @@ class ProcessServer():
|
||||
continue
|
||||
try:
|
||||
serverlog("Running command %s" % command)
|
||||
reply = self.cooker.command.runCommand(command, self)
|
||||
serverlog("Sending reply %s" % repr(reply))
|
||||
self.command_channel_reply.send(reply)
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command, self))
|
||||
serverlog("Command Completed (socket: %s)" % os.path.exists(self.sockname))
|
||||
except Exception as e:
|
||||
stack = traceback.format_exc()
|
||||
@@ -381,7 +375,7 @@ class ProcessServer():
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False)
|
||||
if not lock:
|
||||
newlockcontents = get_lock_contents(lockfile)
|
||||
if not newlockcontents[0].startswith([f"{os.getpid()}\n", f"{os.getpid()} "]):
|
||||
if not newlockcontents[0].startswith([os.getpid() + "\n", os.getpid() + " "]):
|
||||
# A new server was started, the lockfile contents changed, we can exit
|
||||
serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents))
|
||||
return
|
||||
@@ -402,22 +396,6 @@ class ProcessServer():
|
||||
serverlog("".join(msg))
|
||||
|
||||
def idle_thread(self):
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.idle_thread_internal)
|
||||
|
||||
prof.dump_stats("profile-mainloop.log")
|
||||
bb.utils.process_profilelog("profile-mainloop.log")
|
||||
serverlog("Raw profiling information saved to profile-mainloop.log and processed statistics to profile-mainloop.log.processed")
|
||||
else:
|
||||
self.idle_thread_internal()
|
||||
|
||||
def idle_thread_internal(self):
|
||||
def remove_idle_func(function):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
del self._idlefuns[function]
|
||||
@@ -427,6 +405,12 @@ class ProcessServer():
|
||||
nextsleep = 0.1
|
||||
fds = []
|
||||
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
except Exception as exc:
|
||||
serverlog("Exception %s in inofify updates broke the idle_thread, exiting" % traceback.format_exc())
|
||||
self.quit = True
|
||||
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
items = list(self._idlefuns.items())
|
||||
|
||||
@@ -516,18 +500,12 @@ class ServerCommunicator():
|
||||
self.recv = recv
|
||||
|
||||
def runCommand(self, command):
|
||||
try:
|
||||
self.connection.send(command)
|
||||
except BrokenPipeError as e:
|
||||
raise BrokenPipeError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
logger.info("No reply from server in 30s (for command %s at %s)" % (command[0], currenttime()))
|
||||
logger.info("No reply from server in 30s")
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s at %s)" % currenttime())
|
||||
try:
|
||||
ret, exc = self.recv.get()
|
||||
except EOFError as e:
|
||||
raise EOFError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)")
|
||||
ret, exc = self.recv.get()
|
||||
# Should probably turn all exceptions in exc back into exceptions?
|
||||
# For now, at least handle BBHandledException
|
||||
if exc and ("BBHandledException" in exc or "SystemExit" in exc):
|
||||
@@ -642,7 +620,7 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile):
|
||||
|
||||
@@ -882,10 +860,11 @@ class ConnectionWriter(object):
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
|
||||
while len(process.signal_received) > 0:
|
||||
sig = process.signal_received.pop()
|
||||
process.handle_sig(sig, None)
|
||||
try:
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self._send(obj)
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import difflib
|
||||
import simplediff
|
||||
import json
|
||||
import types
|
||||
from contextlib import contextmanager
|
||||
import bb.compress.zstd
|
||||
from bb.checksum import FileChecksumCache
|
||||
from bb import runqueue
|
||||
@@ -25,24 +24,6 @@ import hashserv.client
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv')
|
||||
|
||||
#find_siginfo and find_siginfo_version are set by the metadata siggen
|
||||
# The minimum version of the find_siginfo function we need
|
||||
find_siginfo_minversion = 2
|
||||
|
||||
HASHSERV_ENVVARS = [
|
||||
"SSL_CERT_DIR",
|
||||
"SSL_CERT_FILE",
|
||||
"NO_PROXY",
|
||||
"HTTPS_PROXY",
|
||||
"HTTP_PROXY"
|
||||
]
|
||||
|
||||
def check_siggen_version(siggen):
|
||||
if not hasattr(siggen, "find_siginfo_version"):
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (no version found)")
|
||||
if siggen.find_siginfo_version < siggen.find_siginfo_minversion:
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (%s vs %s)" % (siggen.find_siginfo_version, siggen.find_siginfo_minversion))
|
||||
|
||||
class SetEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, set) or isinstance(obj, frozenset):
|
||||
@@ -111,18 +92,9 @@ class SignatureGenerator(object):
|
||||
if flag:
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn][t] = flag
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
return None
|
||||
|
||||
def get_unihash(self, tid):
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
return unihash
|
||||
return self.taskhash[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
return {tid: self.get_unihash(tid) for tid in tids}
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
return
|
||||
|
||||
@@ -210,11 +182,6 @@ class SignatureGenerator(object):
|
||||
def exit(self):
|
||||
return
|
||||
|
||||
def build_pnid(mc, pn, taskname):
|
||||
if mc:
|
||||
return "mc:" + mc + ":" + pn + ":" + taskname
|
||||
return pn + ":" + taskname
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -289,6 +256,10 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.warn("Error during finalise of %s" % mcfn)
|
||||
raise
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[mcfn]:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
|
||||
basehashes = {}
|
||||
for task in taskdeps:
|
||||
basehashes[task] = self.basehash[mcfn + ":" + task]
|
||||
@@ -298,11 +269,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
d.setVar("__siggen_varvals", lookupcache)
|
||||
d.setVar("__siggen_taskdeps", taskdeps)
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#self.setup_datacache_from_datastore(mcfn, d)
|
||||
#for task in taskdeps:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
super().setup_datacache_from_datastore(mcfn, d)
|
||||
|
||||
@@ -343,19 +309,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
recipename = dataCaches[mc].pkg_fn[mcfn]
|
||||
|
||||
self.tidtopn[tid] = recipename
|
||||
# save hashfn for deps into siginfo?
|
||||
for dep in deps:
|
||||
(depmc, _, deptask, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
dep_pn = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, dep_pn, dataCaches):
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
(depmc, _, _, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
depname = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, depname, dataCaches):
|
||||
continue
|
||||
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
|
||||
dep_pnid = build_pnid(depmc, dep_pn, deptask)
|
||||
self.runtaskdeps[tid].append((dep_pnid, dep))
|
||||
self.runtaskdeps[tid].append(dep)
|
||||
|
||||
if task in dataCaches[mc].file_checksums[mcfn]:
|
||||
if self.checksum_cache:
|
||||
@@ -381,15 +343,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taints[tid] = taint
|
||||
logger.warning("%s is tainted from a forced run" % tid)
|
||||
|
||||
return set(dep for _, dep in self.runtaskdeps[tid])
|
||||
return
|
||||
|
||||
def get_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in sorted(self.runtaskdeps[tid]):
|
||||
data += self.get_unihash(dep[1])
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data += self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data += "./" + f.split("/./")[1]
|
||||
@@ -447,21 +409,21 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data['varvals'][task] = self.datacaches[mc].siggen_varvals[mcfn][task]
|
||||
for dep in self.datacaches[mc].siggen_taskdeps[mcfn][task]:
|
||||
if dep in self.basehash_ignore_vars:
|
||||
continue
|
||||
continue
|
||||
data['gendeps'][dep] = self.datacaches[mc].siggen_gendeps[mcfn][dep]
|
||||
data['varvals'][dep] = self.datacaches[mc].siggen_varvals[mcfn][dep]
|
||||
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = [dep[0] for dep in sorted(self.runtaskdeps[tid])]
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
data['file_checksum_values'].append((os.path.basename(f), cs))
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data['runtaskhashes'][dep[0]] = self.get_unihash(dep[1])
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
data['taskhash'] = self.taskhash[tid]
|
||||
data['unihash'] = self.get_unihash(tid)
|
||||
|
||||
@@ -539,86 +501,32 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queries from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.env = {}
|
||||
|
||||
origenv = data.getVar("BB_ORIGENV")
|
||||
for e in HASHSERV_ENVVARS:
|
||||
value = data.getVar(e)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(e)
|
||||
if value:
|
||||
self.env[e] = value
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env = data[:7]
|
||||
super().set_taskdata(data[7:])
|
||||
self.server, self.method, self.extramethod = data[:3]
|
||||
super().set_taskdata(data[3:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
return {
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
@contextmanager
|
||||
def _client_env(self):
|
||||
orig_env = os.environ.copy()
|
||||
try:
|
||||
for k, v in self.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
yield
|
||||
finally:
|
||||
for k, v in self.env.items():
|
||||
if k in orig_env:
|
||||
os.environ[k] = orig_env[k]
|
||||
else:
|
||||
del os.environ[k]
|
||||
|
||||
@contextmanager
|
||||
def client(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
|
||||
@contextmanager
|
||||
def client_pool(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client_pool', None) is None:
|
||||
self._client_pool = hashserv.client.ClientPool(self.server, self.max_parallel, **self.get_hashserv_creds())
|
||||
yield self._client_pool
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server)
|
||||
return self._client
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().reset(data)
|
||||
|
||||
def exit(self):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().exit()
|
||||
|
||||
def __close_clients(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
if getattr(self, '_client_pool', None) is not None:
|
||||
self._client_pool.close()
|
||||
self._client_pool = None
|
||||
|
||||
def get_stampfile_hash(self, tid):
|
||||
if tid in self.taskhash:
|
||||
# If a unique hash is reported, use it as the stampfile hash. This
|
||||
@@ -650,7 +558,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
return None
|
||||
return unihash
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
def get_unihash(self, tid):
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
# If its not a setscene task we can return
|
||||
@@ -665,108 +573,40 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
return None
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
unihash = taskhash
|
||||
|
||||
def _get_method(self, tid):
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
return method
|
||||
|
||||
def unihashes_exist(self, query):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
uncached_query = {}
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
uncached_query[key] = unihash
|
||||
|
||||
if self.max_parallel <= 1 or len(uncached_query) <= 1:
|
||||
# No parallelism required. Make the query serially with the single client
|
||||
with self.client() as client:
|
||||
uncached_result = {
|
||||
key: client.unihash_exists(value) for key, value in uncached_query.items()
|
||||
}
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
uncached_result = client_pool.unihashes_exist(uncached_query)
|
||||
|
||||
for key, exists in uncached_result.items():
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
|
||||
return result
|
||||
|
||||
def get_unihash(self, tid):
|
||||
return self.get_unihashes([tid])[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
"""
|
||||
For a iterable of tids, returns a dictionary that maps each tid to a
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
queries = {}
|
||||
query_result = {}
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
queries[tid] = (self._get_method(tid), self.taskhash[tid])
|
||||
|
||||
if len(queries) == 0:
|
||||
return result
|
||||
|
||||
if self.max_parallel <= 1 or len(queries) <= 1:
|
||||
# No parallelism required. Make the query using a single client
|
||||
with self.client() as client:
|
||||
keys = list(queries.keys())
|
||||
unihashes = client.get_unihash_batch(queries[k] for k in keys)
|
||||
|
||||
for idx, k in enumerate(keys):
|
||||
query_result[k] = unihashes[idx]
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
query_result = client_pool.get_unihashes(queries)
|
||||
|
||||
for tid, unihash in query_result.items():
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
if unihash:
|
||||
try:
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
data = self.client().get_unihash(method, self.taskhash[tid])
|
||||
if data:
|
||||
unihash = data
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.bbdebug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
|
||||
return result
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
def report_unihash(self, path, task, d):
|
||||
import importlib
|
||||
@@ -830,9 +670,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
@@ -863,9 +701,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
hashequiv_logger.verbose('Reported task %s as unihash %s to %s (%s)' % (tid, wanted_unihash, self.server, str(data)))
|
||||
|
||||
if data is None:
|
||||
@@ -898,13 +734,6 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
super().init_rundepcheck(data)
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
self.max_parallel = 1
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return f
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
@@ -964,6 +793,39 @@ def list_inline_diff(oldlist, newlist, colors=None):
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(basepath):
|
||||
basepath, dir, recipe_task = basepath.rsplit("/", 2)
|
||||
cleaned = dir + '/' + recipe_task
|
||||
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
mc_suffix = ''
|
||||
|
||||
# mc stuff now removed from basepath. Whatever was next, if present will be the first
|
||||
# suffix. ':/', recipe path start, marks the end of this. Something like
|
||||
# 'virtual:a[:b[:c]]:/path...' (b and c being optional)
|
||||
if basepath[0] != '/':
|
||||
cleaned += ':' + basepath.split(':/', 1)[0]
|
||||
|
||||
return cleaned + mc_suffix
|
||||
|
||||
def clean_basepaths(a):
|
||||
b = {}
|
||||
for x in a:
|
||||
b[clean_basepath(x)] = a[x]
|
||||
return b
|
||||
|
||||
def clean_basepaths_list(a):
|
||||
b = []
|
||||
for x in a:
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
# Handled renamed fields
|
||||
def handle_renames(data):
|
||||
if 'basewhitelist' in data:
|
||||
@@ -994,18 +856,10 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
try:
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (b, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
for data in [a_data, b_data]:
|
||||
handle_renames(data)
|
||||
@@ -1140,11 +994,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = a_data['runtaskdeps']
|
||||
clean_b = b_data['runtaskdeps']
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
@@ -1153,8 +1007,8 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
a = a_data['runtaskhashes']
|
||||
b = b_data['runtaskhashes']
|
||||
a = clean_basepaths(a_data['runtaskhashes'])
|
||||
b = clean_basepaths(b_data['runtaskhashes'])
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
@@ -1243,12 +1097,8 @@ def calc_taskhash(sigdata):
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
handle_renames(a_data)
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
def parseExpression(self, exp):
|
||||
parsedvar = self.d.expandWithRefs(exp, None)
|
||||
self.references = parsedvar.references
|
||||
self.execs = parsedvar.execs
|
||||
|
||||
def test_simple_reference(self):
|
||||
self.setEmptyVars(["FOO"])
|
||||
@@ -62,11 +61,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
def test_python_exec_reference(self):
|
||||
self.parseExpression("${@eval('3 * 5')}")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -436,37 +430,11 @@ esac
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
def test_contains_vardeps_override_operators(self):
|
||||
# Check override operators handle dependencies correctly with the contains functionality
|
||||
expr_plain = 'testval'
|
||||
expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
|
||||
expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
|
||||
expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr_plain)
|
||||
self.d.prependVar('ANOTHERVAR', expr_prepend)
|
||||
self.d.appendVar('ANOTHERVAR', expr_append)
|
||||
self.d.setVar('ANOTHERVAR:remove', expr_remove)
|
||||
self.d.setVar('TESTVAR1', 'blah')
|
||||
self.d.setVar('TESTVAR2', 'testval2')
|
||||
self.d.setVar('TESTVAR3', 'no-testval')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([
|
||||
expr_prepend + expr_plain + expr_append,
|
||||
'_remove of ' + expr_remove,
|
||||
'TESTVAR1{testval1} = Unset',
|
||||
'TESTVAR2{testval2} = Set',
|
||||
'TESTVAR3{no-testval} = Set',
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
# self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
# self.d.setVarFlag("FOO", "vardeps", "oe_*")
|
||||
# self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
# self.assertEquals(deps, set(["oe_libinstall"]))
|
||||
|
||||
|
||||
|
||||
@@ -77,18 +77,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_snippet_function_reference(self):
|
||||
self.d.setVar("TESTVAL", "testvalue")
|
||||
self.d.setVar("testfunc", 'd.getVar("TESTVAL")')
|
||||
context = bb.utils.get_context()
|
||||
context["testfunc"] = lambda d: d.getVar("TESTVAL")
|
||||
val = self.d.expand("${@testfunc(d)}")
|
||||
self.assertEqual(str(val), "testvalue")
|
||||
|
||||
def test_python_snippet_builtin_metadata(self):
|
||||
self.d.setVar("eval", "INVALID")
|
||||
self.d.expand("${@eval('3')}")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
@@ -395,16 +383,6 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override_2(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3 testvalue5")
|
||||
@@ -426,6 +404,16 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST:bar:append", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_append_and_unused_override(self):
|
||||
# Had a bug where an unused override append could return "" instead of None
|
||||
self.d.setVar("BAR:append:unusedoverride", "testvalue2")
|
||||
|
||||
@@ -13,7 +13,6 @@ import pickle
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import tempfile
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
|
||||
@@ -469,8 +468,6 @@ class EventClassesTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
@@ -953,24 +950,3 @@ class EventClassesTest(unittest.TestCase):
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_lineno_in_eventhandler(self):
|
||||
# The error lineno is 5, not 4 since the first line is '\n'
|
||||
error_line = """
|
||||
# Comment line1
|
||||
# Comment line2
|
||||
python test_lineno_in_eventhandler() {
|
||||
This is an error line
|
||||
}
|
||||
addhandler test_lineno_in_eventhandler
|
||||
test_lineno_in_eventhandler[eventmask] = "bb.event.ConfigParsed"
|
||||
"""
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = tempfile.NamedTemporaryFile(suffix = '.bb')
|
||||
f.write(bytes(error_line, "utf-8"))
|
||||
f.flush()
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue(" line 5\n" in output)
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import contextlib
|
||||
import unittest
|
||||
import hashlib
|
||||
import tempfile
|
||||
@@ -25,8 +24,7 @@ def skipIfNoNetwork():
|
||||
return lambda f: f
|
||||
|
||||
class TestTimeout(Exception):
|
||||
# Indicate to pytest that this is not a test suite
|
||||
__test__ = False
|
||||
pass
|
||||
|
||||
class Timeout():
|
||||
|
||||
@@ -308,21 +306,6 @@ class URITest(unittest.TestCase):
|
||||
'params': {"someparam" : "1"},
|
||||
'query': {},
|
||||
'relative': True
|
||||
},
|
||||
"https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip": {
|
||||
'uri': 'https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip',
|
||||
'scheme': 'https',
|
||||
'hostname': 'www.innodisk.com',
|
||||
'port': None,
|
||||
'hostport': 'www.innodisk.com',
|
||||
'path': '/Download_file',
|
||||
'userinfo': '',
|
||||
'userinfo': '',
|
||||
'username': '',
|
||||
'password': '',
|
||||
'params': {"downloadfilename" : "EGPL-T101.zip"},
|
||||
'query': {"9BE0BF6657": None},
|
||||
'relative': False
|
||||
}
|
||||
|
||||
}
|
||||
@@ -432,28 +415,18 @@ class FetcherTest(unittest.TestCase):
|
||||
|
||||
def git(self, cmd, cwd=None):
|
||||
if isinstance(cmd, str):
|
||||
cmd = 'git -c safe.bareRepository=all ' + cmd
|
||||
cmd = 'git ' + cmd
|
||||
else:
|
||||
cmd = ['git', '-c', 'safe.bareRepository=all'] + cmd
|
||||
cmd = ['git'] + cmd
|
||||
if cwd is None:
|
||||
cwd = self.gitdir
|
||||
return bb.process.run(cmd, cwd=cwd)[0]
|
||||
|
||||
def git_init(self, cwd=None):
|
||||
self.git('init', cwd=cwd)
|
||||
# Explicitly set initial branch to master as
|
||||
# a common setup is to use other default
|
||||
# branch than master.
|
||||
self.git(['checkout', '-b', 'master'], cwd=cwd)
|
||||
|
||||
try:
|
||||
self.git(['config', 'user.email'], cwd=cwd)
|
||||
except bb.process.ExecutionError:
|
||||
if not self.git(['config', 'user.email'], cwd=cwd):
|
||||
self.git(['config', 'user.email', 'you@example.com'], cwd=cwd)
|
||||
|
||||
try:
|
||||
self.git(['config', 'user.name'], cwd=cwd)
|
||||
except bb.process.ExecutionError:
|
||||
if not self.git(['config', 'user.name'], cwd=cwd):
|
||||
self.git(['config', 'user.name', 'Your Name'], cwd=cwd)
|
||||
|
||||
class MirrorUriTest(FetcherTest):
|
||||
@@ -562,7 +535,7 @@ class MirrorUriTest(FetcherTest):
|
||||
class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitDownloadDirectoryNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_dir = "git.openembedded.org.bitbake"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_dir = "github.com.openembedded.bitbake.git"
|
||||
@@ -610,7 +583,7 @@ class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
class TarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(TarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz"
|
||||
@@ -644,7 +617,7 @@ class TarballNamingTest(FetcherTest):
|
||||
class GitShallowTarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitShallowTarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master"
|
||||
self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz"
|
||||
@@ -679,7 +652,7 @@ class GitShallowTarballNamingTest(FetcherTest):
|
||||
class CleanTarballTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(CleanTarballTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1')
|
||||
@@ -700,13 +673,11 @@ class CleanTarballTest(FetcherTest):
|
||||
archive = tarfile.open(os.path.join(self.dldir, self.recipe_tarball))
|
||||
self.assertNotEqual(len(archive.members), 0)
|
||||
for member in archive.members:
|
||||
if member.name == ".":
|
||||
continue
|
||||
self.assertEqual(member.uname, 'oe', "user name for %s differs" % member.name)
|
||||
self.assertEqual(member.uid, 0, "uid for %s differs" % member.name)
|
||||
self.assertEqual(member.gname, 'oe', "group name for %s differs" % member.name)
|
||||
self.assertEqual(member.gid, 0, "gid for %s differs" % member.name)
|
||||
self.assertEqual(member.mtime, mtime, "mtime for %s differs" % member.name)
|
||||
self.assertEqual(member.uname, 'oe')
|
||||
self.assertEqual(member.uid, 0)
|
||||
self.assertEqual(member.gname, 'oe')
|
||||
self.assertEqual(member.gid, 0)
|
||||
self.assertEqual(member.mtime, mtime)
|
||||
|
||||
|
||||
class FetcherLocalTest(FetcherTest):
|
||||
@@ -1040,25 +1011,25 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch(self):
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_goodsrcrev(self):
|
||||
# SRCREV is set but matches rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master"
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_badsrcrev(self):
|
||||
# SRCREV is set but does not match rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master"
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_tagandrev(self):
|
||||
# SRCREV is set but does not match rev= parameter
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5;protocol=https"
|
||||
url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1067,7 +1038,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;branch=master;protocol=https"
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;branch=master"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1076,26 +1047,26 @@ class FetcherNetworkTest(FetcherTest):
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName;branch=master;protocol=https"
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName;branch=master"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_for_mirrored_url_when_previous_downloaded_by_the_recipe_url(self):
|
||||
recipeurl = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
mirrorurl = "git://someserver.org/bitbake;branch=master;protocol=https"
|
||||
recipeurl = "git://git.openembedded.org/bitbake;branch=master"
|
||||
mirrorurl = "git://someserver.org/bitbake;branch=master"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake")
|
||||
self.gitfetcher(recipeurl, mirrorurl)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_when_previous_downloaded_from_a_premirror(self):
|
||||
recipeurl = "git://someserver.org/bitbake;branch=master;protocol=https"
|
||||
recipeurl = "git://someserver.org/bitbake;branch=master"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake")
|
||||
self.gitfetcher(recipeurl, recipeurl)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_repository_when_premirror_rewrites_the_recipe_url(self):
|
||||
realurl = "https://git.openembedded.org/bitbake"
|
||||
recipeurl = "git://someserver.org/bitbake;protocol=https"
|
||||
realurl = "git://git.openembedded.org/bitbake"
|
||||
recipeurl = "git://someserver.org/bitbake"
|
||||
self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
|
||||
os.chdir(self.tempdir)
|
||||
self.git(['clone', realurl, self.sourcedir], cwd=self.tempdir)
|
||||
@@ -1105,9 +1076,9 @@ class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule(self):
|
||||
# URL with ssh submodules
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7;branch=master;protocol=https"
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7;branch=master"
|
||||
# Original URL (comment this if you have ssh access to git.yoctoproject.org)
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee;branch=master;protocol=https"
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee;branch=master"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1123,25 +1094,6 @@ class FetcherNetworkTest(FetcherTest):
|
||||
if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')):
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_restricted_network_premirrors(self):
|
||||
# this test is to ensure that premirrors will be tried in restricted network
|
||||
# that is, BB_ALLOWED_NETWORKS does not contain the domain the url uses
|
||||
url = "gitsm://github.com/grpc/grpc.git;protocol=https;name=grpc;branch=v1.60.x;rev=0ef13a7555dbaadd4633399242524129eef5e231"
|
||||
# create a download directory to be used as premirror later
|
||||
tempdir = tempfile.mkdtemp(prefix="bitbake-fetch-")
|
||||
dl_premirror = os.path.join(tempdir, "download-premirror")
|
||||
os.mkdir(dl_premirror)
|
||||
self.d.setVar("DL_DIR", dl_premirror)
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# now use the premirror in restricted network
|
||||
self.d.setVar("DL_DIR", self.dldir)
|
||||
self.d.setVar("PREMIRRORS", "gitsm://.*/.* gitsm://%s/git2/MIRRORNAME;protocol=file" % dl_premirror)
|
||||
self.d.setVar("BB_ALLOWED_NETWORKS", "*.some.domain")
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_dbus_broker(self):
|
||||
# The following external repositories have show failures in fetch and unpack operations
|
||||
@@ -1282,9 +1234,8 @@ class SVNTest(FetcherTest):
|
||||
cwd=repo_dir)
|
||||
|
||||
bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir)
|
||||
# Github won't emulate SVN anymore (see https://github.blog/2023-01-20-sunsetting-subversion-support/)
|
||||
# Use still accessible svn repo (only trunk to avoid longer downloads)
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://svn.apache.org/repos/asf/serf/trunk' .",
|
||||
# Github will emulate SVN. Use this to check if we're downloding...
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://github.com/PhilipHazel/pcre2.git' .",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
bb.process.run("svn commit --non-interactive -m 'Add external'",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
@@ -1312,8 +1263,8 @@ class SVNTest(FetcherTest):
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should NOT exist")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should NOT exit")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk')), msg="External dir should NOT exist")
|
||||
self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk', 'README')), msg="External README should NOT exit")
|
||||
|
||||
@skipIfNoSvn()
|
||||
def test_external_svn(self):
|
||||
@@ -1326,8 +1277,8 @@ class SVNTest(FetcherTest):
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should exist")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should exit")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk')), msg="External dir should exist")
|
||||
self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/trunk', 'README')), msg="External README should exit")
|
||||
|
||||
class TrustedNetworksTest(FetcherTest):
|
||||
def test_trusted_network(self):
|
||||
@@ -1378,10 +1329,9 @@ class URLHandle(unittest.TestCase):
|
||||
"http://www.google.com/index.html" : ('http', 'www.google.com', '/index.html', '', '', {}),
|
||||
"cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}),
|
||||
"cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', collections.OrderedDict([('tag', 'V0-99-81'), ('module', 'familiar/dist/ipkg')])),
|
||||
"git://git.openembedded.org/bitbake;branch=@foo;protocol=https" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo', 'protocol' : 'https'}),
|
||||
"git://git.openembedded.org/bitbake;branch=@foo" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo'}),
|
||||
"file://somelocation;someparam=1": ('file', '', 'somelocation', '', '', {'someparam': '1'}),
|
||||
"https://somesite.com/somerepo.git;user=anyUser:idtoken=1234" : ('https', 'somesite.com', '/somerepo.git', '', '', {'user': 'anyUser:idtoken=1234'}),
|
||||
r'git://s.o-me_ONE:!#$%^&*()-_={}[]\|:?,.<>~`@git.openembedded.org/bitbake;branch=main;protocol=https': ('git', 'git.openembedded.org', '/bitbake', 's.o-me_ONE', r'!#$%^&*()-_={}[]\|:?,.<>~`', {'branch': 'main', 'protocol' : 'https'}),
|
||||
r'git://s.o-me_ONE:!#$%^&*()-_={}[]\|:?,.<>~`@git.openembedded.org/bitbake;branch=main': ('git', 'git.openembedded.org', '/bitbake', 's.o-me_ONE', r'!#$%^&*()-_={}[]\|:?,.<>~`', {'branch': 'main'}),
|
||||
}
|
||||
# we require a pathname to encodeurl but users can still pass such urls to
|
||||
# decodeurl and we need to handle them
|
||||
@@ -1405,39 +1355,37 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
test_git_uris = {
|
||||
# version pattern "X.Y.Z"
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "", "")
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
: "1.99.4",
|
||||
# version pattern "vX.Y"
|
||||
# mirror of git.infradead.org since network issues interfered with testing
|
||||
("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git;branch=master;protocol=https", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "", "")
|
||||
("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git;branch=master", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "")
|
||||
: "1.5.0",
|
||||
# version pattern "pkg_name-X.Y"
|
||||
# mirror of git://anongit.freedesktop.org/git/xorg/proto/presentproto since network issues interfered with testing
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto;branch=master;protocol=https", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "", "")
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto;branch=master", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "")
|
||||
: "1.0",
|
||||
# version pattern "pkg_name-vX.Y.Z"
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master;protocol=https", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "", "")
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://git.yoctoproject.org/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "", "")
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
: "1.2.0",
|
||||
("u-boot-mkimage", "git://source.denx.de/u-boot/u-boot.git;branch=master;protocol=https", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "", "")
|
||||
("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
|
||||
: "2014.01",
|
||||
# version pattern "yyyymmdd"
|
||||
("mobile-broadband-provider-info", "git://git.yoctoproject.org/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "", "")
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
# mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap;branch=master;protocol=https", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P<pver>(\d+\.(\d\.?)*))", "")
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap;branch=master", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P<pver>(\d+\.(\d\.?)*))")
|
||||
: "0.4.3",
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky;branch=master;protocol=https", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P<pver>(([0-9][\.|_]?)+[0-9]))", "")
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky;branch=master", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P<pver>(([0-9][\.|_]?)+[0-9]))")
|
||||
: "11.0.0",
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P<pver>((\d+[\.\-_]*)+))", "")
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
: "1.3.59",
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https;branch=master", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))", "")
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https;branch=master", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
: "3.82+dbg0.9",
|
||||
("sysdig", "git://github.com/draios/sysdig.git;branch=dev;protocol=https", "4fb6288275f567f63515df0ff0a6518043ecfa9b", r"^(?P<pver>\d+(\.\d+)+)", "10.0.0")
|
||||
: "0.28.0",
|
||||
}
|
||||
|
||||
test_wget_uris = {
|
||||
@@ -1505,13 +1453,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
|
||||
r = bb.utils.vercmp_string(v, verstring)
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
|
||||
if k[4]:
|
||||
r = bb.utils.vercmp_string(verstring, k[4])
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], verstring, k[4]))
|
||||
|
||||
def test_wget_latest_versionstring(self):
|
||||
testdata = os.path.dirname(os.path.abspath(__file__)) + "/fetch-testdata"
|
||||
server = HTTPService(testdata, host="127.0.0.1")
|
||||
server = HTTPService(testdata)
|
||||
server.start()
|
||||
port = server.port
|
||||
try:
|
||||
@@ -1519,10 +1464,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.d.setVar("PN", k[0])
|
||||
checkuri = ""
|
||||
if k[2]:
|
||||
checkuri = "http://127.0.0.1:%s/" % port + k[2]
|
||||
checkuri = "http://localhost:%s/" % port + k[2]
|
||||
self.d.setVar("UPSTREAM_CHECK_URI", checkuri)
|
||||
self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
|
||||
url = "http://127.0.0.1:%s/" % port + k[1]
|
||||
url = "http://localhost:%s/" % port + k[1]
|
||||
ud = bb.fetch2.FetchData(url, self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
@@ -1715,8 +1660,6 @@ class GitShallowTest(FetcherTest):
|
||||
if cwd is None:
|
||||
cwd = self.gitdir
|
||||
actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines()
|
||||
# Resolve references into the same format as the comparision (needed by git 2.48 onwards)
|
||||
actual_refs = self.git(['rev-parse', '--symbolic-full-name'] + actual_refs, cwd=cwd).splitlines()
|
||||
full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines()
|
||||
self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs)))
|
||||
|
||||
@@ -2246,7 +2189,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.d.setVar('SRCREV', 'e5939ff608b95cdd4d0ab0e1935781ab9a276ac0')
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
|
||||
fetcher = bb.fetch.Fetch(["git://git.yoctoproject.org/fstests;branch=master;protocol=https"], self.d)
|
||||
fetcher = bb.fetch.Fetch(["git://git.yoctoproject.org/fstests;branch=master"], self.d)
|
||||
fetcher.download()
|
||||
|
||||
bb.utils.remove(self.dldir + "/*.tar.gz")
|
||||
@@ -2279,14 +2222,10 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git_init(cwd=self.srcdir)
|
||||
self.commit_file('.gitattributes', '*.mp3 filter=lfs -text')
|
||||
|
||||
def commit_file(self, filename, content):
|
||||
with open(os.path.join(self.srcdir, filename), "w") as f:
|
||||
f.write(content)
|
||||
self.git(["add", filename], cwd=self.srcdir)
|
||||
self.git(["commit", "-m", "Change"], cwd=self.srcdir)
|
||||
return self.git(["rev-parse", "HEAD"], cwd=self.srcdir).strip()
|
||||
with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
|
||||
attrs.write('*.mp3 filter=lfs -text')
|
||||
self.git(['add', '.gitattributes'], cwd=self.srcdir)
|
||||
self.git(['commit', '-m', "attributes", '.gitattributes'], cwd=self.srcdir)
|
||||
|
||||
def fetch(self, uri=None, download=True):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
@@ -2306,44 +2245,6 @@ class GitLfsTest(FetcherTest):
|
||||
unpacked_lfs_file = os.path.join(self.d.getVar('WORKDIR'), 'git', "Cat_poster_1.jpg")
|
||||
return unpacked_lfs_file
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_fetch_lfs_on_srcrev_change(self):
|
||||
"""Test if fetch downloads missing LFS objects when a different revision within an existing repository is requested"""
|
||||
self.git(["lfs", "install", "--local"], cwd=self.srcdir)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def hide_upstream_repository():
|
||||
"""Hide the upstream repository to make sure that git lfs cannot pull from it"""
|
||||
temp_name = self.srcdir + ".bak"
|
||||
os.rename(self.srcdir, temp_name)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.rename(temp_name, self.srcdir)
|
||||
|
||||
def fetch_and_verify(revision, filename, content):
|
||||
self.d.setVar('SRCREV', revision)
|
||||
fetcher, ud = self.fetch()
|
||||
|
||||
with hide_upstream_repository():
|
||||
workdir = self.d.getVar('WORKDIR')
|
||||
fetcher.unpack(workdir)
|
||||
|
||||
with open(os.path.join(workdir, "git", filename)) as f:
|
||||
self.assertEqual(f.read(), content)
|
||||
|
||||
commit_1 = self.commit_file("a.mp3", "version 1")
|
||||
commit_2 = self.commit_file("a.mp3", "version 2")
|
||||
|
||||
self.d.setVar('SRC_URI', "git://%s;protocol=file;lfs=1;branch=master" % self.srcdir)
|
||||
|
||||
# Seed the local download folder by fetching the latest commit and verifying that the LFS contents are
|
||||
# available even when the upstream repository disappears.
|
||||
fetch_and_verify(commit_2, "a.mp3", "version 2")
|
||||
# Verify that even when an older revision is fetched, the needed LFS objects are fetched into the download
|
||||
# folder.
|
||||
fetch_and_verify(commit_1, "a.mp3", "version 1")
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds_without_lfs_param(self):
|
||||
@@ -2362,7 +2263,7 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_skips(self):
|
||||
def test_real_git_lfs_repo_succeeds(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=0")
|
||||
f = self.get_real_git_lfs_file()
|
||||
# This is the actual non-smudged placeholder file on the repo if git-lfs does not run
|
||||
@@ -2375,41 +2276,24 @@ class GitLfsTest(FetcherTest):
|
||||
with open(f) as fh:
|
||||
self.assertEqual(lfs_file, fh.read())
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_lfs_enabled(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# With git-lfs installed, test that we can fetch and unpack
|
||||
fetcher, ud = self.fetch()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_lfs_disabled(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Verify that the fetcher can survive even if the source
|
||||
# repository has Git LFS usage configured.
|
||||
fetcher, ud = self.fetch()
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
def test_lfs_enabled_not_installed(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading
|
||||
# Careful: suppress initial attempt at downloading until
|
||||
# we know whether git-lfs is installed.
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. Only
|
||||
# attempt this with the real live copy of git-lfs installed.
|
||||
if ud.method._find_git_lfs(self.d):
|
||||
fetcher.download()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
# Artificially assert that git-lfs is not installed, so
|
||||
# we can verify a failure to unpack in it's absence.
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
@@ -2421,21 +2305,29 @@ class GitLfsTest(FetcherTest):
|
||||
finally:
|
||||
ud.method._find_git_lfs = old_find_git_lfs
|
||||
|
||||
def test_lfs_disabled_not_installed(self):
|
||||
def test_lfs_disabled(self):
|
||||
import shutil
|
||||
|
||||
uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
# In contrast to test_lfs_enabled(), allow the implicit download
|
||||
# done by self.fetch() to occur here. The point of this test case
|
||||
# is to verify that the fetcher can survive even if the source
|
||||
# repository has Git LFS usage configured.
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# Artificially assert that git-lfs is not installed, so
|
||||
# we can verify a failure to unpack in it's absence.
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# Even if git-lfs cannot be found, the unpack should be successful
|
||||
fetcher.download()
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
# If git-lfs cannot be found, the unpack should be successful
|
||||
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
@@ -2499,31 +2391,6 @@ class CrateTest(FetcherTest):
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/.cargo-checksum.json"))
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_url_matching_recipe(self):
|
||||
|
||||
self.d.setVar('BP', 'glob-0.2.11')
|
||||
|
||||
uri = "crate://crates.io/glob/0.2.11"
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
|
||||
self.assertIn("name", ud.parm)
|
||||
self.assertEqual(ud.parm["name"], "glob-0.2.11")
|
||||
self.assertIn("downloadfilename", ud.parm)
|
||||
self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate")
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.tempdir)
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir)), ['download', 'glob-0.2.11', 'unpacked'])
|
||||
self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['glob-0.2.11.crate', 'glob-0.2.11.crate.done'])
|
||||
self.assertTrue(os.path.exists(self.tempdir + "/glob-0.2.11/src/lib.rs"))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_url_params(self):
|
||||
|
||||
@@ -2589,7 +2456,7 @@ class CrateTest(FetcherTest):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, self.d)
|
||||
with self.assertRaisesRegex(bb.fetch2.FetchError, "Fetcher failure for URL"):
|
||||
with self.assertRaisesRegexp(bb.fetch2.FetchError, "Fetcher failure for URL"):
|
||||
fetcher.download()
|
||||
|
||||
class NPMTest(FetcherTest):
|
||||
@@ -3099,7 +2966,7 @@ class NPMTest(FetcherTest):
|
||||
class GitSharedTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitSharedTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake;branch=master"
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
self.d.setVar("__BBSRCREV_SEEN", "1")
|
||||
|
||||
@@ -3132,59 +2999,31 @@ class FetchPremirroronlyLocalTest(FetcherTest):
|
||||
os.mkdir(self.mirrordir)
|
||||
self.reponame = "bitbake"
|
||||
self.gitdir = os.path.join(self.tempdir, "git", self.reponame)
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=master;protocol=https"
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=master"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz"
|
||||
self.mirrorfile = os.path.join(self.mirrordir, self.mirrorname)
|
||||
self.testfilename = "bitbake-fetch.test"
|
||||
|
||||
def make_git_repo(self):
|
||||
self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz"
|
||||
recipeurl = "git:/git.fake.repo/bitbake"
|
||||
os.makedirs(self.gitdir)
|
||||
self.git_init(cwd=self.gitdir)
|
||||
self.git("init", self.gitdir)
|
||||
for i in range(0):
|
||||
self.git_new_commit()
|
||||
bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir)
|
||||
|
||||
def git_new_commit(self):
|
||||
import random
|
||||
testfilename = "bibake-fetch.test"
|
||||
os.unlink(os.path.join(self.mirrordir, self.mirrorname))
|
||||
branch = self.git("branch --show-current", self.gitdir).split()
|
||||
with open(os.path.join(self.gitdir, self.testfilename), "w") as testfile:
|
||||
testfile.write("File {} from branch {}; Useless random data {}".format(self.testfilename, branch, random.random()))
|
||||
self.git("add {}".format(self.testfilename), self.gitdir)
|
||||
self.git("commit -a -m \"This random commit {} in branch {}. I'm useless.\"".format(random.random(), branch), self.gitdir)
|
||||
with open(os.path.join(self.gitdir, testfilename), "w") as testfile:
|
||||
testfile.write("Useless random data {}".format(random.random()))
|
||||
self.git("add {}".format(testfilename), self.gitdir)
|
||||
self.git("commit -a -m \"This random commit {}. I'm useless.\"".format(random.random()), self.gitdir)
|
||||
bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir)
|
||||
return self.git("rev-parse HEAD", self.gitdir).strip()
|
||||
|
||||
def git_new_branch(self, name):
|
||||
self.git_new_commit()
|
||||
head = self.git("rev-parse HEAD", self.gitdir).strip()
|
||||
self.git("checkout -b {}".format(name), self.gitdir)
|
||||
newrev = self.git_new_commit()
|
||||
self.git("checkout {}".format(head), self.gitdir)
|
||||
return newrev
|
||||
|
||||
def test_mirror_multiple_fetches(self):
|
||||
self.make_git_repo()
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
## New commit in premirror. it's not in the download_dir
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher2 = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher2.download()
|
||||
fetcher2.unpack(self.unpackdir)
|
||||
## New commit in premirror. it's not in the download_dir
|
||||
self.d.setVar("SRCREV", self.git_new_commit())
|
||||
fetcher3 = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
fetcher3.download()
|
||||
fetcher3.unpack(self.unpackdir)
|
||||
|
||||
|
||||
def test_mirror_commit_nonexistent(self):
|
||||
self.make_git_repo()
|
||||
self.d.setVar("SRCREV", "0"*40)
|
||||
@@ -3205,59 +3044,6 @@ class FetchPremirroronlyLocalTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
def test_mirror_tarball_multiple_branches(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
both branches have required revisions
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1,testbranch2;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", branch2rev)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
self.assertTrue(os.path.exists(self.mirrorfile), "Mirror file doesn't exist")
|
||||
fetcher.download()
|
||||
fetcher.unpack(os.path.join(self.tempdir, "unpacked"))
|
||||
unpacked = os.path.join(self.tempdir, "unpacked", "git", self.testfilename)
|
||||
self.assertTrue(os.path.exists(unpacked), "Repo has not been unpackaged properly!")
|
||||
with open(unpacked, 'r') as f:
|
||||
content = f.read()
|
||||
## We expect to see testbranch1 in the file, not master, not testbranch2
|
||||
self.assertTrue(content.find("testbranch1") != -1, "Wrong branch has been checked out!")
|
||||
|
||||
def test_mirror_tarball_multiple_branches_nobranch(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
Unbalanced name/branches raises ParameterError
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", branch2rev)
|
||||
with self.assertRaises(bb.fetch2.ParameterError):
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
def test_mirror_tarball_multiple_branches_norev(self):
|
||||
"""
|
||||
test if PREMIRRORS can handle multiple name/branches correctly
|
||||
one of the branches specifies non existing SRCREV
|
||||
"""
|
||||
self.make_git_repo()
|
||||
branch1rev = self.git_new_branch("testbranch1")
|
||||
branch2rev = self.git_new_branch("testbranch2")
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;branch=testbranch1,testbranch2;protocol=https;name=branch1,branch2"
|
||||
self.d.setVar("SRCREV_branch1", branch1rev)
|
||||
self.d.setVar("SRCREV_branch2", "0"*40)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
self.assertTrue(os.path.exists(self.mirrorfile), "Mirror file doesn't exist")
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
|
||||
class FetchPremirroronlyNetworkTest(FetcherTest):
|
||||
|
||||
def setUp(self):
|
||||
@@ -3267,7 +3053,7 @@ class FetchPremirroronlyNetworkTest(FetcherTest):
|
||||
self.reponame = "fstests"
|
||||
self.clonedir = os.path.join(self.tempdir, "git")
|
||||
self.gitdir = os.path.join(self.tempdir, "git", "{}.git".format(self.reponame))
|
||||
self.recipe_url = "git://git.yoctoproject.org/fstests;protocol=https"
|
||||
self.recipe_url = "git://git.yoctoproject.org/fstests"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "0")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
@@ -3349,7 +3135,7 @@ class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
os.mkdir(self.mirrordir)
|
||||
self.reponame = "bitbake"
|
||||
self.gitdir = os.path.join(self.tempdir, "git", self.reponame)
|
||||
self.recipe_url = "git://git.fake.repo/bitbake;protocol=https"
|
||||
self.recipe_url = "git://git.fake.repo/bitbake"
|
||||
self.d.setVar("BB_FETCH_PREMIRRORONLY", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n")
|
||||
@@ -3361,7 +3147,7 @@ class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
import sys
|
||||
self.d.setVar("SRCREV", "0"*40)
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
with self.assertRaises(bb.fetch2.FetchError), self.assertLogs() as logs:
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
output = "".join(logs.output)
|
||||
self.assertFalse(" not a git repository (or any parent up to mount point /)" in output)
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertFalse(" not a git repository (or any parent up to mount point /)" in stdout)
|
||||
|
||||
@@ -186,16 +186,14 @@ deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
import sys
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in output)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in output)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in output)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in output)
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in stdout)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in stdout)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
@@ -243,101 +241,3 @@ unset A[flag@.service]
|
||||
with self.assertRaises(bb.parse.ParseError):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
export_function_recipe = """
|
||||
inherit someclass
|
||||
"""
|
||||
|
||||
export_function_recipe2 = """
|
||||
inherit someclass
|
||||
|
||||
do_compile () {
|
||||
false
|
||||
}
|
||||
|
||||
python do_compilepython () {
|
||||
bb.note("Something else")
|
||||
}
|
||||
|
||||
"""
|
||||
export_function_class = """
|
||||
someclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python someclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
export_function_class2 = """
|
||||
secondclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python secondclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
def test_parse_export_functions(self):
|
||||
def check_function_flags(d):
|
||||
self.assertEqual(d.getVarFlag("do_compile", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compile", "python"), None)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "python"), "1")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
recipename = tempdir + "/recipe.bb"
|
||||
os.makedirs(tempdir + "/classes")
|
||||
with open(tempdir + "/classes/someclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class)
|
||||
f.flush()
|
||||
with open(tempdir + "/classes/secondclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class2)
|
||||
f.flush()
|
||||
|
||||
with open(recipename, "w") as f:
|
||||
f.write(self.export_function_recipe)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
recipename2 = tempdir + "/recipe2.bb"
|
||||
with open(recipename2, "w") as f:
|
||||
f.write(self.export_function_recipe2)
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
with open(recipename, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
with open(recipename2, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("secondclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("secondclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
|
||||
@@ -17,12 +17,75 @@ import bb.siggen
|
||||
|
||||
class SiggenTest(unittest.TestCase):
|
||||
|
||||
def test_build_pnid(self):
|
||||
tests = {
|
||||
('', 'helloworld', 'do_sometask') : 'helloworld:do_sometask',
|
||||
('XX', 'helloworld', 'do_sometask') : 'mc:XX:helloworld:do_sometask',
|
||||
}
|
||||
def test_clean_basepath_simple_target_basepath(self):
|
||||
basepath = '/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
|
||||
for t in tests:
|
||||
self.assertEqual(bb.siggen.build_pnid(*t), tests[t])
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_basic_virtual_basepath(self):
|
||||
basepath = 'virtual:something:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_basepath(self):
|
||||
basepath = 'mc:somemachine:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:mc:somemachine'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_virtual_long_prefix_basepath(self):
|
||||
basepath = 'virtual:something:A:B:C:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:A:B:C'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_virtual_basepath(self):
|
||||
basepath = 'mc:somemachine:virtual:something:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:mc:somemachine'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
def test_clean_basepath_mc_virtual_long_prefix_basepath(self):
|
||||
basepath = 'mc:X:virtual:something:C:B:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask'
|
||||
expected_cleaned = 'helloworld/helloworld_1.2.3.bb:do_sometask:virtual:something:C:B:A:mc:X'
|
||||
|
||||
actual_cleaned = bb.siggen.clean_basepath(basepath)
|
||||
|
||||
self.assertEqual(actual_cleaned, expected_cleaned)
|
||||
|
||||
|
||||
# def test_clean_basepath_performance(self):
|
||||
# input_basepaths = [
|
||||
# 'mc:X:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'mc:X:virtual:something:C:B:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'virtual:something:C:B:A:/different/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# 'virtual:something:A:/full/path/to/poky/meta/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/this/is/most/common/input/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/and/should/be/tested/with/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# '/more/weight/recipes-whatever/helloworld/helloworld_1.2.3.bb:do_sometask',
|
||||
# ]
|
||||
|
||||
# time_start = time.time()
|
||||
|
||||
# i = 2000000
|
||||
# while i >= 0:
|
||||
# for basepath in input_basepaths:
|
||||
# bb.siggen.clean_basepath(basepath)
|
||||
# i -= 1
|
||||
|
||||
# elapsed = time.time() - time_start
|
||||
# print('{} ({}s)'.format(self.id(), round(elapsed, 3)))
|
||||
|
||||
# self.assertTrue(False)
|
||||
|
||||
@@ -188,19 +188,11 @@ class TinfoilCookerAdapter:
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
class TinfoilSkiplistByMcAdapter:
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
|
||||
def __getitem__(self, mc):
|
||||
return self.tinfoil.get_skipped_recipes(mc)
|
||||
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.multiconfigs = [''] + (tinfoil.config_data.getVar('BBMULTICONFIG') or '').split()
|
||||
self.collections = {}
|
||||
self.recipecaches = {}
|
||||
self.skiplist_by_mc = self.TinfoilSkiplistByMcAdapter(tinfoil)
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = self.TinfoilCookerCollectionAdapter(tinfoil, mc)
|
||||
self.recipecaches[mc] = self.TinfoilRecipeCacheAdapter(tinfoil, mc)
|
||||
@@ -209,6 +201,8 @@ class TinfoilCookerAdapter:
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'skiplist':
|
||||
attrvalue = self.tinfoil.get_skipped_recipes()
|
||||
elif name == 'bbfile_config_priorities':
|
||||
ret = self.tinfoil.run_command('getLayerPriorities')
|
||||
bbfile_config_priorities = []
|
||||
@@ -331,11 +325,11 @@ class Tinfoil:
|
||||
self.recipes_parsed = False
|
||||
self.quiet = 0
|
||||
self.oldhandlers = self.logger.handlers[:]
|
||||
self.localhandlers = []
|
||||
if setup_logging:
|
||||
# This is the *client-side* logger, nothing to do with
|
||||
# logging messages from the server
|
||||
bb.msg.logger_create('BitBake', output)
|
||||
self.localhandlers = []
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.localhandlers.append(handler)
|
||||
@@ -455,12 +449,6 @@ class Tinfoil:
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
def modified_files(self):
|
||||
"""
|
||||
Notify the server it needs to revalidate it's caches since the client has modified files
|
||||
"""
|
||||
self.run_command("revalidateCaches")
|
||||
|
||||
def run_command(self, command, *params, handle_events=True):
|
||||
"""
|
||||
Run a command on the server (as implemented in bb.command).
|
||||
@@ -520,12 +508,12 @@ class Tinfoil:
|
||||
"""
|
||||
return defaultdict(list, self.run_command('getOverlayedRecipes', mc))
|
||||
|
||||
def get_skipped_recipes(self, mc=''):
|
||||
def get_skipped_recipes(self):
|
||||
"""
|
||||
Find recipes which were skipped (i.e. SkipRecipe was raised
|
||||
during parsing).
|
||||
"""
|
||||
return OrderedDict(self.run_command('getSkippedRecipes', mc))
|
||||
return OrderedDict(self.run_command('getSkippedRecipes'))
|
||||
|
||||
def get_all_providers(self, mc=''):
|
||||
return defaultdict(list, self.run_command('allProviders', mc))
|
||||
@@ -539,7 +527,6 @@ class Tinfoil:
|
||||
def get_runtime_providers(self, rdep):
|
||||
return self.run_command('getRuntimeProviders', rdep)
|
||||
|
||||
# TODO: teach this method about mc
|
||||
def get_recipe_file(self, pn):
|
||||
"""
|
||||
Get the file name for the specified recipe/target. Raises
|
||||
@@ -548,7 +535,6 @@ class Tinfoil:
|
||||
"""
|
||||
best = self.find_best_provider(pn)
|
||||
if not best or (len(best) > 3 and not best[3]):
|
||||
# TODO: pass down mc
|
||||
skiplist = self.get_skipped_recipes()
|
||||
taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
|
||||
skipreasons = taskdata.get_reasons(pn)
|
||||
|
||||
@@ -1746,6 +1746,7 @@ class BuildInfoHelper(object):
|
||||
|
||||
buildname = self.server.runCommand(['getVariable', 'BUILDNAME'])[0]
|
||||
machine = self.server.runCommand(['getVariable', 'MACHINE'])[0]
|
||||
image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0]
|
||||
|
||||
# location of the manifest files for this build;
|
||||
# note that this file is only produced if an image is produced
|
||||
@@ -1766,18 +1767,6 @@ class BuildInfoHelper(object):
|
||||
# filter out anything which isn't an image target
|
||||
image_targets = [target for target in targets if target.is_image]
|
||||
|
||||
if len(image_targets) > 0:
|
||||
#if there are image targets retrieve image_name
|
||||
image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0]
|
||||
if not image_name:
|
||||
#When build target is an image and image_name is not found as an environment variable
|
||||
logger.info("IMAGE_NAME not found, extracting from bitbake command")
|
||||
cmd = self.server.runCommand(['getVariable','BB_CMDLINE'])[0]
|
||||
#filter out tokens that are command line options
|
||||
cmd = [token for token in cmd if not token.startswith('-')]
|
||||
image_name = cmd[1].split(':', 1)[0] # remove everything after : in image name
|
||||
logger.info("IMAGE_NAME found as : %s " % image_name)
|
||||
|
||||
for image_target in image_targets:
|
||||
# this is set to True if we find at least one file relating to
|
||||
# this target; if this remains False after the scan, we copy the
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# This file re-uses code spread throughout other Bitbake source files.
|
||||
# As such, all other copyrights belong to their own right holders.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
decodedline = json.loads(line)
|
||||
if 'allvariables' in decodedline:
|
||||
self.variables = decodedline['allvariables']
|
||||
return
|
||||
if not 'vars' in decodedline:
|
||||
raise ValueError
|
||||
event_str = decodedline['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
@@ -179,7 +179,7 @@ class TerminalFilter(object):
|
||||
new[3] = new[3] & ~termios.ECHO
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, new)
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 2 and os.environ.get('NO_COLOR', '') == '':
|
||||
if curses.tigetnum("colors") > 2:
|
||||
for h in handlers:
|
||||
try:
|
||||
h.formatter.enable_color()
|
||||
@@ -420,11 +420,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
except bb.BBHandledException:
|
||||
drain_events_errorhandling(eventHandler)
|
||||
return 1
|
||||
except Exception as e:
|
||||
# bitbake-server comms failure
|
||||
early_logger = bb.msg.logger_create('bitbake', sys.stdout)
|
||||
early_logger.fatal("Attempting to set server environment: %s", e)
|
||||
return 1
|
||||
|
||||
if params.options.quiet == 0:
|
||||
console_loglevel = loglevel
|
||||
@@ -577,8 +572,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
else:
|
||||
log_exec_tty = False
|
||||
|
||||
should_print_hyperlinks = sys.stdout.isatty() and os.environ.get('NO_COLOR', '') == ''
|
||||
|
||||
helper = uihelper.BBUIHelper()
|
||||
|
||||
# Look for the specially designated handlers which need to be passed to the
|
||||
@@ -592,12 +585,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return
|
||||
|
||||
llevel, debug_domains = bb.msg.constructLogOptions()
|
||||
try:
|
||||
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Attempting to set event mask: %s", e)
|
||||
return 1
|
||||
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
|
||||
|
||||
# The logging_tree module is *extremely* helpful in debugging logging
|
||||
# domains. Uncomment here to dump the logging tree when bitbake starts
|
||||
@@ -606,11 +594,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
universe = False
|
||||
if not params.observe_only:
|
||||
try:
|
||||
params.updateFromServer(server)
|
||||
except Exception as e:
|
||||
logger.fatal("Fetching command line: %s", e)
|
||||
return 1
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
@@ -621,12 +605,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if cmdline['action'][0] == "buildTargets" and "universe" in cmdline['action'][1]:
|
||||
universe = True
|
||||
|
||||
try:
|
||||
ret, error = server.runCommand(cmdline['action'])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Command '{}' failed: %s".format(cmdline), e)
|
||||
return 1
|
||||
ret, error = server.runCommand(cmdline['action'])
|
||||
if error:
|
||||
logger.error("Command '%s' failed: %s" % (cmdline, error))
|
||||
return 1
|
||||
@@ -642,7 +621,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return_value = 0
|
||||
errors = 0
|
||||
warnings = 0
|
||||
taskfailures = {}
|
||||
taskfailures = []
|
||||
|
||||
printintervaldelta = 10 * 60 # 10 minutes
|
||||
printinterval = printintervaldelta
|
||||
@@ -728,8 +707,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
return_value = 1
|
||||
print_event_log(event, includelogs, loglines, termfilter)
|
||||
k = "{}:{}".format(event._fn, event._task)
|
||||
taskfailures[k] = event.logfile
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
@@ -825,7 +802,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
return_value = 1
|
||||
taskfailures.setdefault(event.taskstring)
|
||||
taskfailures.append(event.taskstring)
|
||||
logger.error(str(event))
|
||||
continue
|
||||
|
||||
@@ -877,26 +854,15 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
logger.error("Unknown event: %s", event)
|
||||
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Executing event: %s", e)
|
||||
return_value = 1
|
||||
errors = errors + 1
|
||||
main.shutdown = 3
|
||||
except EnvironmentError as ioerror:
|
||||
termfilter.clearFooter()
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
continue
|
||||
sys.stderr.write(str(ioerror))
|
||||
main.shutdown = 2
|
||||
if not params.observe_only:
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Unable to force shutdown: %s", e)
|
||||
main.shutdown = 3
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
main.shutdown = 2
|
||||
except KeyboardInterrupt:
|
||||
termfilter.clearFooter()
|
||||
if params.observe_only:
|
||||
@@ -905,13 +871,9 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
def state_force_shutdown():
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly stop: %s" % error)
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Unable to cleanly stop: %s", e)
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly stop: %s" % error)
|
||||
|
||||
if not params.observe_only and main.shutdown == 1:
|
||||
state_force_shutdown()
|
||||
@@ -924,9 +886,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly shutdown: %s" % error)
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure
|
||||
logger.fatal("Unable to cleanly shutdown: %s", e)
|
||||
except KeyboardInterrupt:
|
||||
state_force_shutdown()
|
||||
|
||||
@@ -934,33 +893,18 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
except Exception as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
main.shutdown = 2
|
||||
if not params.observe_only:
|
||||
try:
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
except (BrokenPipeError, EOFError) as e:
|
||||
# bitbake-server comms failure, don't attempt further comms and exit
|
||||
logger.fatal("Unable to force shutdown: %s", e)
|
||||
main.shudown = 3
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
main.shutdown = 2
|
||||
return_value = 1
|
||||
try:
|
||||
termfilter.clearFooter()
|
||||
summary = ""
|
||||
def format_hyperlink(url, link_text):
|
||||
if should_print_hyperlinks:
|
||||
start = f'\033]8;;{url}\033\\'
|
||||
end = '\033]8;;\033\\'
|
||||
return f'{start}{link_text}{end}'
|
||||
return link_text
|
||||
|
||||
if taskfailures:
|
||||
summary += pluralise("\nSummary: %s task failed:",
|
||||
"\nSummary: %s tasks failed:", len(taskfailures))
|
||||
for (failure, log_file) in taskfailures.items():
|
||||
for failure in taskfailures:
|
||||
summary += "\n %s" % failure
|
||||
if log_file:
|
||||
hyperlink = format_hyperlink(f"file://{log_file}", log_file)
|
||||
summary += "\n log: {}".format(hyperlink)
|
||||
if warnings:
|
||||
summary += pluralise("\nSummary: There was %s WARNING message.",
|
||||
"\nSummary: There were %s WARNING messages.", warnings)
|
||||
|
||||
@@ -227,9 +227,6 @@ class NCursesUI:
|
||||
shutdown = 0
|
||||
|
||||
try:
|
||||
if not params.observe_only:
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,6 +30,7 @@ import bb.build
|
||||
import bb.command
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
import bb.exceptions
|
||||
import bb.runqueue
|
||||
from bb.ui import uihelper
|
||||
|
||||
@@ -101,6 +102,10 @@ class TeamcityLogFormatter(logging.Formatter):
|
||||
details = ""
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
details = ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
details = ''.join(formatted)
|
||||
|
||||
if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]:
|
||||
# ERROR gets a separate errorDetails field
|
||||
|
||||
@@ -385,7 +385,7 @@ def main(server, eventHandler, params):
|
||||
main.shutdown = 1
|
||||
|
||||
logger.info("ToasterUI build done, brbe: %s", brbe)
|
||||
break
|
||||
continue
|
||||
|
||||
if isinstance(event, (bb.command.CommandCompleted,
|
||||
bb.command.CommandFailed,
|
||||
|
||||
@@ -50,7 +50,7 @@ def clean_context():
|
||||
|
||||
def get_context():
|
||||
return _context
|
||||
|
||||
|
||||
|
||||
def set_context(ctx):
|
||||
_context = ctx
|
||||
@@ -212,8 +212,8 @@ def explode_dep_versions2(s, *, sort=True):
|
||||
inversion = True
|
||||
# This list is based on behavior and supported comparisons from deb, opkg and rpm.
|
||||
#
|
||||
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
|
||||
# we list each possibly valid item.
|
||||
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
|
||||
# we list each possibly valid item.
|
||||
# The build system is responsible for validation of what it supports.
|
||||
if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
|
||||
lastcmp = i[0:2]
|
||||
@@ -347,7 +347,7 @@ def _print_exception(t, value, tb, realfile, text, context):
|
||||
exception = traceback.format_exception_only(t, value)
|
||||
error.append('Error executing a python function in %s:\n' % realfile)
|
||||
|
||||
# Strip 'us' from the stack (better_exec call) unless that was where the
|
||||
# Strip 'us' from the stack (better_exec call) unless that was where the
|
||||
# error came from
|
||||
if tb.tb_next is not None:
|
||||
tb = tb.tb_next
|
||||
@@ -604,6 +604,7 @@ def preserved_envvars():
|
||||
v = [
|
||||
'BBPATH',
|
||||
'BB_PRESERVE_ENV',
|
||||
'BB_ENV_PASSTHROUGH',
|
||||
'BB_ENV_PASSTHROUGH_ADDITIONS',
|
||||
]
|
||||
return v + preserved_envvars_exported()
|
||||
@@ -745,9 +746,9 @@ def prunedir(topdir, ionice=False):
|
||||
# but thats possibly insane and suffixes is probably going to be small
|
||||
#
|
||||
def prune_suffix(var, suffixes, d):
|
||||
"""
|
||||
"""
|
||||
See if var ends with any of the suffixes listed and
|
||||
remove it if found
|
||||
remove it if found
|
||||
"""
|
||||
for suffix in suffixes:
|
||||
if suffix and var.endswith(suffix):
|
||||
@@ -758,8 +759,7 @@ def mkdirhier(directory):
|
||||
"""Create a directory like 'mkdir -p', but does not complain if
|
||||
directory already exists like os.makedirs
|
||||
"""
|
||||
if '${' in str(directory):
|
||||
bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory))
|
||||
|
||||
try:
|
||||
os.makedirs(directory)
|
||||
except OSError as e:
|
||||
@@ -1001,9 +1001,9 @@ def umask(new_mask):
|
||||
os.umask(current_mask)
|
||||
|
||||
def to_boolean(string, default=None):
|
||||
"""
|
||||
"""
|
||||
Check input string and return boolean value True/False/None
|
||||
depending upon the checks
|
||||
depending upon the checks
|
||||
"""
|
||||
if not string:
|
||||
return default
|
||||
@@ -1142,10 +1142,7 @@ def get_referenced_vars(start_expr, d):
|
||||
|
||||
|
||||
def cpu_count():
|
||||
try:
|
||||
return len(os.sched_getaffinity(0))
|
||||
except OSError:
|
||||
return multiprocessing.cpu_count()
|
||||
return multiprocessing.cpu_count()
|
||||
|
||||
def nonblockingfd(fd):
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
@@ -1831,29 +1828,6 @@ def mkstemp(suffix=None, prefix=None, dir=None, text=False):
|
||||
prefix = tempfile.gettempprefix() + entropy
|
||||
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
|
||||
|
||||
def path_is_descendant(descendant, ancestor):
|
||||
"""
|
||||
Returns True if the path `descendant` is a descendant of `ancestor`
|
||||
(including being equivalent to `ancestor` itself). Otherwise returns False.
|
||||
Correctly accounts for symlinks, bind mounts, etc. by using
|
||||
os.path.samestat() to compare paths
|
||||
|
||||
May raise any exception that os.stat() raises
|
||||
"""
|
||||
|
||||
ancestor_stat = os.stat(ancestor)
|
||||
|
||||
# Recurse up each directory component of the descendant to see if it is
|
||||
# equivalent to the ancestor
|
||||
check_dir = os.path.abspath(descendant).rstrip("/")
|
||||
while check_dir:
|
||||
check_stat = os.stat(check_dir)
|
||||
if os.path.samestat(check_stat, ancestor_stat):
|
||||
return True
|
||||
check_dir = os.path.dirname(check_dir).rstrip("/")
|
||||
|
||||
return False
|
||||
|
||||
# If we don't have a timeout of some kind and a process/thread exits badly (for example
|
||||
# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
|
||||
# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
|
||||
libc = ctypes.CDLL("libc.so.6", use_errno=True)
|
||||
fsencoding = sys.getfilesystemencoding()
|
||||
|
||||
|
||||
libc.listxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t]
|
||||
libc.llistxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t]
|
||||
|
||||
|
||||
def listxattr(path, follow=True):
|
||||
func = libc.listxattr if follow else libc.llistxattr
|
||||
|
||||
os_path = os.fsencode(path)
|
||||
|
||||
while True:
|
||||
length = func(os_path, None, 0)
|
||||
|
||||
if length < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
if length == 0:
|
||||
return []
|
||||
|
||||
arr = ctypes.create_string_buffer(length)
|
||||
|
||||
read_length = func(os_path, arr, length)
|
||||
if read_length != length:
|
||||
# Race!
|
||||
continue
|
||||
|
||||
return [a.decode(fsencoding) for a in arr.raw.split(b"\x00") if a]
|
||||
|
||||
|
||||
libc.getxattr.argtypes = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_size_t,
|
||||
]
|
||||
libc.lgetxattr.argtypes = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_size_t,
|
||||
]
|
||||
|
||||
|
||||
def getxattr(path, name, follow=True):
|
||||
func = libc.getxattr if follow else libc.lgetxattr
|
||||
|
||||
os_path = os.fsencode(path)
|
||||
os_name = os.fsencode(name)
|
||||
|
||||
while True:
|
||||
length = func(os_path, os_name, None, 0)
|
||||
|
||||
if length < 0:
|
||||
err = ctypes.get_errno()
|
||||
if err == errno.ENODATA:
|
||||
return None
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
if length == 0:
|
||||
return ""
|
||||
|
||||
arr = ctypes.create_string_buffer(length)
|
||||
|
||||
read_length = func(os_path, os_name, arr, length)
|
||||
if read_length != length:
|
||||
# Race!
|
||||
continue
|
||||
|
||||
return arr.raw
|
||||
|
||||
|
||||
def get_all_xattr(path, follow=True):
|
||||
attrs = {}
|
||||
|
||||
names = listxattr(path, follow)
|
||||
|
||||
for name in names:
|
||||
value = getxattr(path, name, follow)
|
||||
if value is None:
|
||||
# This can happen if a value is erased after listxattr is called,
|
||||
# so ignore it
|
||||
continue
|
||||
attrs[name] = value
|
||||
|
||||
return attrs
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
attrs = get_all_xattr(args.path)
|
||||
|
||||
for name, value in attrs.items():
|
||||
try:
|
||||
value = value.decode(fsencoding)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
print(f"{name} = {value}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -50,14 +50,12 @@ class ActionPlugin(LayerPlugin):
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
self.tinfoil.modified_files()
|
||||
if not (args.force or notadded):
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
self.tinfoil.modified_files()
|
||||
bb.fatal("Parse failure with the specified layer added, exiting.")
|
||||
else:
|
||||
for item in notadded:
|
||||
@@ -83,7 +81,6 @@ class ActionPlugin(LayerPlugin):
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
self.tinfoil.modified_files()
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item)
|
||||
@@ -243,9 +240,6 @@ build results (as the layer priority order has effectively changed).
|
||||
if not entry_found:
|
||||
logger.warning("File %s does not match the flattened layer's BBFILES setting, you may need to edit conf/layer.conf or move the file elsewhere" % f1full)
|
||||
|
||||
self.tinfoil.modified_files()
|
||||
|
||||
|
||||
def get_file_layer(self, filename):
|
||||
layerdir = self.get_file_layerdir(filename)
|
||||
if layerdir:
|
||||
|
||||
@@ -142,11 +142,10 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
# Ensure we list skipped recipes
|
||||
# We are largely guessing about PN, PV and the preferred version here,
|
||||
# but we have no choice since skipped recipes are not fully parsed
|
||||
skiplist = list(self.tinfoil.cooker.skiplist_by_mc[mc].keys())
|
||||
|
||||
skiplist = list(self.tinfoil.cooker.skiplist.keys())
|
||||
mcspec = 'mc:%s:' % mc
|
||||
if mc:
|
||||
mcspec = f'mc:{mc}:'
|
||||
skiplist = [s[len(mcspec):] if s.startswith(mcspec) else s for s in skiplist]
|
||||
skiplist = [s[len(mcspec):] for s in skiplist if s.startswith(mcspec)]
|
||||
|
||||
for fn in skiplist:
|
||||
recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
|
||||
@@ -163,7 +162,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
def print_item(f, pn, ver, layer, ispref):
|
||||
if not selected_layer or layer == selected_layer:
|
||||
if not bare and f in skiplist:
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist_by_mc[mc][f].skipreason
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist[f].skipreason
|
||||
else:
|
||||
skipped = ''
|
||||
if show_filenames:
|
||||
@@ -283,10 +282,7 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
else:
|
||||
logger.plain('=== Appended recipes ===')
|
||||
|
||||
|
||||
cooker_data = self.tinfoil.cooker.recipecaches[args.mc]
|
||||
|
||||
pnlist = list(cooker_data.pkg_pn.keys())
|
||||
pnlist = list(self.tinfoil.cooker_data.pkg_pn.keys())
|
||||
pnlist.sort()
|
||||
appends = False
|
||||
for pn in pnlist:
|
||||
@@ -299,28 +295,26 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
if not found:
|
||||
continue
|
||||
|
||||
if self.show_appends_for_pn(pn, cooker_data, args.mc):
|
||||
if self.show_appends_for_pn(pn):
|
||||
appends = True
|
||||
|
||||
if not args.pnspec and self.show_appends_for_skipped(args.mc):
|
||||
if not args.pnspec and self.show_appends_for_skipped():
|
||||
appends = True
|
||||
|
||||
if not appends:
|
||||
logger.plain('No append files found')
|
||||
|
||||
def show_appends_for_pn(self, pn, cooker_data, mc):
|
||||
filenames = cooker_data.pkg_pn[pn]
|
||||
if mc:
|
||||
pn = "mc:%s:%s" % (mc, pn)
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.tinfoil.cooker_data.pkg_pn[pn]
|
||||
|
||||
best = self.tinfoil.find_best_provider(pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
|
||||
return self.show_appends_output(filenames, best_filename)
|
||||
|
||||
def show_appends_for_skipped(self, mc):
|
||||
def show_appends_for_skipped(self):
|
||||
filenames = [os.path.basename(f)
|
||||
for f in self.tinfoil.cooker.skiplist_by_mc[mc].keys()]
|
||||
for f in self.tinfoil.cooker.skiplist.keys()]
|
||||
return self.show_appends_output(filenames, None, " (skipped)")
|
||||
|
||||
def show_appends_output(self, filenames, best_filename, name_suffix = ''):
|
||||
@@ -536,7 +530,6 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
|
||||
parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
|
||||
parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_show_appends.add_argument('--mc', help='use specified multiconfig', default='')
|
||||
|
||||
parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends)
|
||||
parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true')
|
||||
|
||||
@@ -585,7 +585,7 @@ class SiblingTest(TreeTest):
|
||||
</html>'''
|
||||
# All that whitespace looks good but makes the tests more
|
||||
# difficult. Get rid of it.
|
||||
markup = re.compile(r"\n\s*").sub("", markup)
|
||||
markup = re.compile("\n\s*").sub("", markup)
|
||||
self.tree = self.soup(markup)
|
||||
|
||||
|
||||
|
||||
@@ -392,7 +392,19 @@ class SourceGenerator(NodeVisitor):
|
||||
def visit_Name(self, node):
|
||||
self.write(node.id)
|
||||
|
||||
def visit_Str(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Bytes(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Num(self, node):
|
||||
self.write(repr(node.n))
|
||||
|
||||
def visit_Constant(self, node):
|
||||
# Python 3.8 deprecated visit_Num(), visit_Str(), visit_Bytes(),
|
||||
# visit_NameConstant() and visit_Ellipsis(). They can be removed once we
|
||||
# require 3.8+.
|
||||
self.write(repr(node.value))
|
||||
|
||||
def visit_Tuple(self, node):
|
||||
|
||||
@@ -5,102 +5,151 @@
|
||||
|
||||
import asyncio
|
||||
from contextlib import closing
|
||||
import re
|
||||
import sqlite3
|
||||
import itertools
|
||||
import json
|
||||
from collections import namedtuple
|
||||
from urllib.parse import urlparse
|
||||
from bb.asyncrpc.client import parse_address, ADDR_TYPE_UNIX, ADDR_TYPE_WS
|
||||
|
||||
User = namedtuple("User", ("username", "permissions"))
|
||||
UNIX_PREFIX = "unix://"
|
||||
|
||||
def create_server(
|
||||
addr,
|
||||
dbname,
|
||||
*,
|
||||
sync=True,
|
||||
upstream=None,
|
||||
read_only=False,
|
||||
db_username=None,
|
||||
db_password=None,
|
||||
anon_perms=None,
|
||||
admin_username=None,
|
||||
admin_password=None,
|
||||
):
|
||||
def sqlite_engine():
|
||||
from .sqlite import DatabaseEngine
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
|
||||
return DatabaseEngine(dbname, sync)
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
def sqlalchemy_engine():
|
||||
from .sqlalchemy import DatabaseEngine
|
||||
UNIHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("unihash", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
return DatabaseEngine(dbname, db_username, db_password)
|
||||
UNIHASH_TABLE_COLUMNS = tuple(name for name, _, _ in UNIHASH_TABLE_DEFINITION)
|
||||
|
||||
from . import server
|
||||
OUTHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("outhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("created", "DATETIME", ""),
|
||||
|
||||
if "://" in dbname:
|
||||
db_engine = sqlalchemy_engine()
|
||||
# Optional fields
|
||||
("owner", "TEXT", ""),
|
||||
("PN", "TEXT", ""),
|
||||
("PV", "TEXT", ""),
|
||||
("PR", "TEXT", ""),
|
||||
("task", "TEXT", ""),
|
||||
("outhash_siginfo", "TEXT", ""),
|
||||
)
|
||||
|
||||
OUTHASH_TABLE_COLUMNS = tuple(name for name, _, _ in OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
def _make_table(cursor, name, definition):
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS {name} (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
{fields}
|
||||
UNIQUE({unique})
|
||||
)
|
||||
'''.format(
|
||||
name=name,
|
||||
fields=" ".join("%s %s," % (name, typ) for name, typ, _ in definition),
|
||||
unique=", ".join(name for name, _, flags in definition if "UNIQUE" in flags)
|
||||
))
|
||||
|
||||
|
||||
def setup_database(database, sync=True):
|
||||
db = sqlite3.connect(database)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
_make_table(cursor, "unihashes_v2", UNIHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "outhashes_v2", OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
cursor.execute('PRAGMA journal_mode = WAL')
|
||||
cursor.execute('PRAGMA synchronous = %s' % ('NORMAL' if sync else 'OFF'))
|
||||
|
||||
# Drop old indexes
|
||||
cursor.execute('DROP INDEX IF EXISTS taskhash_lookup')
|
||||
cursor.execute('DROP INDEX IF EXISTS outhash_lookup')
|
||||
cursor.execute('DROP INDEX IF EXISTS taskhash_lookup_v2')
|
||||
cursor.execute('DROP INDEX IF EXISTS outhash_lookup_v2')
|
||||
|
||||
# TODO: Upgrade from tasks_v2?
|
||||
cursor.execute('DROP TABLE IF EXISTS tasks_v2')
|
||||
|
||||
# Create new indexes
|
||||
cursor.execute('CREATE INDEX IF NOT EXISTS taskhash_lookup_v3 ON unihashes_v2 (method, taskhash)')
|
||||
cursor.execute('CREATE INDEX IF NOT EXISTS outhash_lookup_v3 ON outhashes_v2 (method, outhash)')
|
||||
|
||||
return db
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX):],))
|
||||
else:
|
||||
db_engine = sqlite_engine()
|
||||
m = re.match(r'\[(?P<host>[^\]]*)\]:(?P<port>\d+)$', addr)
|
||||
if m is not None:
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
else:
|
||||
host, port = addr.split(':')
|
||||
|
||||
if anon_perms is None:
|
||||
anon_perms = server.DEFAULT_ANON_PERMS
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
s = server.Server(
|
||||
db_engine,
|
||||
upstream=upstream,
|
||||
read_only=read_only,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=admin_username,
|
||||
admin_password=admin_password,
|
||||
)
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def create_server(addr, dbname, *, sync=True, upstream=None, read_only=False):
|
||||
from . import server
|
||||
db = setup_database(dbname, sync=sync)
|
||||
s = server.Server(db, upstream=upstream, read_only=read_only)
|
||||
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
s.start_unix_server(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
url = urlparse(a[0])
|
||||
s.start_websocket_server(url.hostname, url.port)
|
||||
else:
|
||||
s.start_tcp_server(*a)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def create_client(addr, username=None, password=None):
|
||||
def create_client(addr):
|
||||
from . import client
|
||||
c = client.Client()
|
||||
|
||||
c = client.Client(username, password)
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
c.connect_unix(*a)
|
||||
else:
|
||||
c.connect_tcp(*a)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
c.connect_websocket(*a)
|
||||
else:
|
||||
c.connect_tcp(*a)
|
||||
return c
|
||||
except Exception as e:
|
||||
c.close()
|
||||
raise e
|
||||
return c
|
||||
|
||||
|
||||
async def create_async_client(addr, username=None, password=None):
|
||||
async def create_async_client(addr):
|
||||
from . import client
|
||||
c = client.AsyncClient()
|
||||
|
||||
c = client.AsyncClient(username, password)
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
await c.connect_websocket(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
return c
|
||||
except Exception as e:
|
||||
await c.close()
|
||||
raise e
|
||||
return c
|
||||
|
||||
@@ -5,430 +5,117 @@
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import asyncio
|
||||
import bb.asyncrpc
|
||||
import json
|
||||
from . import create_async_client
|
||||
|
||||
|
||||
logger = logging.getLogger("hashserv.client")
|
||||
|
||||
|
||||
class Batch(object):
|
||||
def __init__(self):
|
||||
self.done = False
|
||||
self.cond = asyncio.Condition()
|
||||
self.pending = []
|
||||
self.results = []
|
||||
self.sent_count = 0
|
||||
|
||||
async def recv(self, socket):
|
||||
while True:
|
||||
async with self.cond:
|
||||
await self.cond.wait_for(lambda: self.pending or self.done)
|
||||
|
||||
if not self.pending:
|
||||
if self.done:
|
||||
return
|
||||
continue
|
||||
|
||||
r = await socket.recv()
|
||||
self.results.append(r)
|
||||
|
||||
async with self.cond:
|
||||
self.pending.pop(0)
|
||||
|
||||
async def send(self, socket, msgs):
|
||||
try:
|
||||
# In the event of a restart due to a reconnect, all in-flight
|
||||
# messages need to be resent first to keep to result count in sync
|
||||
for m in self.pending:
|
||||
await socket.send(m)
|
||||
|
||||
for m in msgs:
|
||||
# Add the message to the pending list before attempting to send
|
||||
# it so that if the send fails it will be retried
|
||||
async with self.cond:
|
||||
self.pending.append(m)
|
||||
self.cond.notify()
|
||||
self.sent_count += 1
|
||||
|
||||
await socket.send(m)
|
||||
|
||||
finally:
|
||||
async with self.cond:
|
||||
self.done = True
|
||||
self.cond.notify()
|
||||
|
||||
async def process(self, socket, msgs):
|
||||
await asyncio.gather(
|
||||
self.recv(socket),
|
||||
self.send(socket, msgs),
|
||||
)
|
||||
|
||||
if len(self.results) != self.sent_count:
|
||||
raise ValueError(
|
||||
f"Expected result count {len(self.results)}. Expected {self.sent_count}"
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
|
||||
class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
MODE_NORMAL = 0
|
||||
MODE_GET_STREAM = 1
|
||||
MODE_EXIST_STREAM = 2
|
||||
|
||||
def __init__(self, username=None, password=None):
|
||||
super().__init__("OEHASHEQUIV", "1.1", logger)
|
||||
def __init__(self):
|
||||
super().__init__('OEHASHEQUIV', '1.1', logger)
|
||||
self.mode = self.MODE_NORMAL
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.saved_become_user = None
|
||||
|
||||
async def setup_connection(self):
|
||||
await super().setup_connection()
|
||||
cur_mode = self.mode
|
||||
self.mode = self.MODE_NORMAL
|
||||
if self.username:
|
||||
# Save off become user temporarily because auth() resets it
|
||||
become = self.saved_become_user
|
||||
await self.auth(self.username, self.password)
|
||||
|
||||
if become:
|
||||
await self.become_user(become)
|
||||
|
||||
async def send_stream_batch(self, mode, msgs):
|
||||
"""
|
||||
Does a "batch" process of stream messages. This sends the query
|
||||
messages as fast as possible, and simultaneously attempts to read the
|
||||
messages back. This helps to mitigate the effects of latency to the
|
||||
hash equivalence server be allowing multiple queries to be "in-flight"
|
||||
at once
|
||||
|
||||
The implementation does more complicated tracking using a count of sent
|
||||
messages so that `msgs` can be a generator function (i.e. its length is
|
||||
unknown)
|
||||
|
||||
"""
|
||||
|
||||
b = Batch()
|
||||
await self._set_mode(cur_mode)
|
||||
|
||||
async def send_stream(self, msg):
|
||||
async def proc():
|
||||
nonlocal b
|
||||
|
||||
await self._set_mode(mode)
|
||||
return await b.process(self.socket, msgs)
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
raise ConnectionError("Connection closed")
|
||||
return l.decode("utf-8").rstrip()
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def invoke(self, *args, **kwargs):
|
||||
# It's OK if connection errors cause a failure here, because the mode
|
||||
# is also reset to normal on a new connection
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await super().invoke(*args, **kwargs)
|
||||
|
||||
async def _set_mode(self, new_mode):
|
||||
async def stream_to_normal():
|
||||
await self.socket.send("END")
|
||||
return await self.socket.recv()
|
||||
|
||||
async def normal_to_stream(command):
|
||||
r = await self.invoke({command: None})
|
||||
if new_mode == self.MODE_NORMAL and self.mode == self.MODE_GET_STREAM:
|
||||
r = await self.send_stream("END")
|
||||
if r != "ok":
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to stream mode: Bad response from server {r!r}"
|
||||
)
|
||||
|
||||
self.logger.debug("Mode is now %s", command)
|
||||
|
||||
if new_mode == self.mode:
|
||||
return
|
||||
|
||||
self.logger.debug("Transitioning mode %s -> %s", self.mode, new_mode)
|
||||
|
||||
# Always transition to normal mode before switching to any other mode
|
||||
if self.mode != self.MODE_NORMAL:
|
||||
r = await self._send_wrapper(stream_to_normal)
|
||||
raise ConnectionError("Bad response from server %r" % r)
|
||||
elif new_mode == self.MODE_GET_STREAM and self.mode == self.MODE_NORMAL:
|
||||
r = await self.send_message({"get-stream": None})
|
||||
if r != "ok":
|
||||
self.check_invoke_error(r)
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to normal mode: Bad response from server {r!r}"
|
||||
)
|
||||
self.logger.debug("Mode is now normal")
|
||||
|
||||
if new_mode == self.MODE_GET_STREAM:
|
||||
await normal_to_stream("get-stream")
|
||||
elif new_mode == self.MODE_EXIST_STREAM:
|
||||
await normal_to_stream("exists-stream")
|
||||
elif new_mode != self.MODE_NORMAL:
|
||||
raise Exception("Undefined mode transition {self.mode!r} -> {new_mode!r}")
|
||||
raise ConnectionError("Bad response from server %r" % r)
|
||||
elif new_mode != self.mode:
|
||||
raise Exception(
|
||||
"Undefined mode transition %r -> %r" % (self.mode, new_mode)
|
||||
)
|
||||
|
||||
self.mode = new_mode
|
||||
|
||||
async def get_unihash(self, method, taskhash):
|
||||
r = await self.get_unihash_batch([(method, taskhash)])
|
||||
return r[0]
|
||||
|
||||
async def get_unihash_batch(self, args):
|
||||
result = await self.send_stream_batch(
|
||||
self.MODE_GET_STREAM,
|
||||
(f"{method} {taskhash}" for method, taskhash in args),
|
||||
)
|
||||
return [r if r else None for r in result]
|
||||
await self._set_mode(self.MODE_GET_STREAM)
|
||||
r = await self.send_stream("%s %s" % (method, taskhash))
|
||||
if not r:
|
||||
return None
|
||||
return r
|
||||
|
||||
async def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["outhash"] = outhash
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report": m})
|
||||
return await self.send_message({"report": m})
|
||||
|
||||
async def report_unihash_equiv(self, taskhash, method, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report-equiv": m})
|
||||
return await self.send_message({"report-equiv": m})
|
||||
|
||||
async def get_taskhash(self, method, taskhash, all_properties=False):
|
||||
return await self.invoke(
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get": {"taskhash": taskhash, "method": method, "all": all_properties}}
|
||||
)
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
r = await self.unihash_exists_batch([unihash])
|
||||
return r[0]
|
||||
|
||||
async def unihash_exists_batch(self, unihashes):
|
||||
result = await self.send_stream_batch(self.MODE_EXIST_STREAM, unihashes)
|
||||
return [r == "true" for r in result]
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash, with_unihash=True):
|
||||
return await self.invoke(
|
||||
{
|
||||
"get-outhash": {
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
"method": method,
|
||||
"with_unihash": with_unihash,
|
||||
}
|
||||
}
|
||||
async def get_outhash(self, method, outhash, taskhash):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get-outhash": {"outhash": outhash, "taskhash": taskhash, "method": method}}
|
||||
)
|
||||
|
||||
async def get_stats(self):
|
||||
return await self.invoke({"get-stats": None})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"get-stats": None})
|
||||
|
||||
async def reset_stats(self):
|
||||
return await self.invoke({"reset-stats": None})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"reset-stats": None})
|
||||
|
||||
async def backfill_wait(self):
|
||||
return (await self.invoke({"backfill-wait": None}))["tasks"]
|
||||
|
||||
async def remove(self, where):
|
||||
return await self.invoke({"remove": {"where": where}})
|
||||
|
||||
async def clean_unused(self, max_age):
|
||||
return await self.invoke({"clean-unused": {"max_age_seconds": max_age}})
|
||||
|
||||
async def auth(self, username, token):
|
||||
result = await self.invoke({"auth": {"username": username, "token": token}})
|
||||
self.username = username
|
||||
self.password = token
|
||||
self.saved_become_user = None
|
||||
return result
|
||||
|
||||
async def refresh_token(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
result = await self.invoke({"refresh-token": m})
|
||||
if (
|
||||
self.username
|
||||
and not self.saved_become_user
|
||||
and result["username"] == self.username
|
||||
):
|
||||
self.password = result["token"]
|
||||
return result
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"set-user-perms": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def get_user(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
return await self.invoke({"get-user": m})
|
||||
|
||||
async def get_all_users(self):
|
||||
return (await self.invoke({"get-all-users": {}}))["users"]
|
||||
|
||||
async def new_user(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"new-user": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def delete_user(self, username):
|
||||
return await self.invoke({"delete-user": {"username": username}})
|
||||
|
||||
async def become_user(self, username):
|
||||
result = await self.invoke({"become-user": {"username": username}})
|
||||
if username == self.username:
|
||||
self.saved_become_user = None
|
||||
else:
|
||||
self.saved_become_user = username
|
||||
return result
|
||||
|
||||
async def get_db_usage(self):
|
||||
return (await self.invoke({"get-db-usage": {}}))["usage"]
|
||||
|
||||
async def get_db_query_columns(self):
|
||||
return (await self.invoke({"get-db-query-columns": {}}))["columns"]
|
||||
|
||||
async def gc_status(self):
|
||||
return await self.invoke({"gc-status": {}})
|
||||
|
||||
async def gc_mark(self, mark, where):
|
||||
"""
|
||||
Starts a new garbage collection operation identified by "mark". If
|
||||
garbage collection is already in progress with "mark", the collection
|
||||
is continued.
|
||||
|
||||
All unihash entries that match the "where" clause are marked to be
|
||||
kept. In addition, any new entries added to the database after this
|
||||
command will be automatically marked with "mark"
|
||||
"""
|
||||
return await self.invoke({"gc-mark": {"mark": mark, "where": where}})
|
||||
|
||||
async def gc_sweep(self, mark):
|
||||
"""
|
||||
Finishes garbage collection for "mark". All unihash entries that have
|
||||
not been marked will be deleted.
|
||||
|
||||
It is recommended to clean unused outhash entries after running this to
|
||||
cleanup any dangling outhashes
|
||||
"""
|
||||
return await self.invoke({"gc-sweep": {"mark": mark}})
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return (await self.send_message({"backfill-wait": None}))["tasks"]
|
||||
|
||||
|
||||
class Client(bb.asyncrpc.Client):
|
||||
def __init__(self, username=None, password=None):
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._add_methods(
|
||||
"connect_tcp",
|
||||
"connect_websocket",
|
||||
"get_unihash",
|
||||
"get_unihash_batch",
|
||||
"report_unihash",
|
||||
"report_unihash_equiv",
|
||||
"get_taskhash",
|
||||
"unihash_exists",
|
||||
"unihash_exists_batch",
|
||||
"get_outhash",
|
||||
"get_stats",
|
||||
"reset_stats",
|
||||
"backfill_wait",
|
||||
"remove",
|
||||
"clean_unused",
|
||||
"auth",
|
||||
"refresh_token",
|
||||
"set_user_perms",
|
||||
"get_user",
|
||||
"get_all_users",
|
||||
"new_user",
|
||||
"delete_user",
|
||||
"become_user",
|
||||
"get_db_usage",
|
||||
"get_db_query_columns",
|
||||
"gc_status",
|
||||
"gc_mark",
|
||||
"gc_sweep",
|
||||
)
|
||||
|
||||
def _get_async_client(self):
|
||||
return AsyncClient(self.username, self.password)
|
||||
|
||||
|
||||
class ClientPool(bb.asyncrpc.ClientPool):
|
||||
def __init__(
|
||||
self,
|
||||
address,
|
||||
max_clients,
|
||||
*,
|
||||
username=None,
|
||||
password=None,
|
||||
become=None,
|
||||
):
|
||||
super().__init__(max_clients)
|
||||
self.address = address
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.become = become
|
||||
|
||||
async def _new_client(self):
|
||||
client = await create_async_client(
|
||||
self.address,
|
||||
username=self.username,
|
||||
password=self.password,
|
||||
)
|
||||
if self.become:
|
||||
await client.become_user(self.become)
|
||||
return client
|
||||
|
||||
def _run_key_tasks(self, queries, call):
|
||||
results = {key: None for key in queries.keys()}
|
||||
|
||||
def make_task(key, args):
|
||||
async def task(client):
|
||||
nonlocal results
|
||||
unihash = await call(client, args)
|
||||
results[key] = unihash
|
||||
|
||||
return task
|
||||
|
||||
def gen_tasks():
|
||||
for key, args in queries.items():
|
||||
yield make_task(key, args)
|
||||
|
||||
self.run_tasks(gen_tasks())
|
||||
return results
|
||||
|
||||
def get_unihashes(self, queries):
|
||||
"""
|
||||
Query multiple unihashes in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a tuple of (method, taskhash).
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is the queried unihash (which might be none if the query
|
||||
failed)
|
||||
"""
|
||||
|
||||
async def call(client, args):
|
||||
method, taskhash = args
|
||||
return await client.get_unihash(method, taskhash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
|
||||
def unihashes_exist(self, queries):
|
||||
"""
|
||||
Query multiple unihash existence checks in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a unihash.
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is True or False if the unihash is known by the server (or
|
||||
None if there was a failure)
|
||||
"""
|
||||
|
||||
async def call(client, unihash):
|
||||
return await client.unihash_exists(unihash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
return AsyncClient()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,598 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from . import User
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy import (
|
||||
MetaData,
|
||||
Column,
|
||||
Table,
|
||||
Text,
|
||||
Integer,
|
||||
UniqueConstraint,
|
||||
DateTime,
|
||||
Index,
|
||||
select,
|
||||
insert,
|
||||
exists,
|
||||
literal,
|
||||
and_,
|
||||
delete,
|
||||
update,
|
||||
func,
|
||||
inspect,
|
||||
)
|
||||
import sqlalchemy.engine
|
||||
from sqlalchemy.orm import declarative_base
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.dialects.postgresql import insert as postgres_insert
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV3(Base):
|
||||
__tablename__ = "unihashes_v3"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
gc_mark = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v4", "method", "taskhash"),
|
||||
Index("unihash_lookup_v1", "unihash"),
|
||||
)
|
||||
|
||||
|
||||
class OuthashesV2(Base):
|
||||
__tablename__ = "outhashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
outhash = Column(Text, nullable=False)
|
||||
created = Column(DateTime)
|
||||
owner = Column(Text)
|
||||
PN = Column(Text)
|
||||
PV = Column(Text)
|
||||
PR = Column(Text)
|
||||
task = Column(Text)
|
||||
outhash_siginfo = Column(Text)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash", "outhash"),
|
||||
Index("outhash_lookup_v3", "method", "outhash"),
|
||||
)
|
||||
|
||||
|
||||
class Users(Base):
|
||||
__tablename__ = "users"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
username = Column(Text, nullable=False)
|
||||
token = Column(Text, nullable=False)
|
||||
permissions = Column(Text)
|
||||
|
||||
__table_args__ = (UniqueConstraint("username"),)
|
||||
|
||||
|
||||
class Config(Base):
|
||||
__tablename__ = "config"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
name = Column(Text, nullable=False)
|
||||
value = Column(Text)
|
||||
__table_args__ = (
|
||||
UniqueConstraint("name"),
|
||||
Index("config_lookup", "name"),
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Old table versions
|
||||
#
|
||||
DeprecatedBase = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV2(DeprecatedBase):
|
||||
__tablename__ = "unihashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v3", "method", "taskhash"),
|
||||
)
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, url, username=None, password=None):
|
||||
self.logger = logging.getLogger("hashserv.sqlalchemy")
|
||||
self.url = sqlalchemy.engine.make_url(url)
|
||||
|
||||
if username is not None:
|
||||
self.url = self.url.set(username=username)
|
||||
|
||||
if password is not None:
|
||||
self.url = self.url.set(password=password)
|
||||
|
||||
async def create(self):
|
||||
def check_table_exists(conn, name):
|
||||
return inspect(conn).has_table(name)
|
||||
|
||||
self.logger.info("Using database %s", self.url)
|
||||
if self.url.drivername == 'postgresql+psycopg':
|
||||
# Psygopg 3 (psygopg) driver can handle async connection pooling
|
||||
self.engine = create_async_engine(self.url, max_overflow=-1)
|
||||
else:
|
||||
self.engine = create_async_engine(self.url, poolclass=NullPool)
|
||||
|
||||
async with self.engine.begin() as conn:
|
||||
# Create tables
|
||||
self.logger.info("Creating tables...")
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
|
||||
if await conn.run_sync(check_table_exists, UnihashesV2.__tablename__):
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
statement = insert(UnihashesV3).from_select(
|
||||
["id", "method", "unihash", "taskhash", "gc_mark"],
|
||||
select(
|
||||
UnihashesV2.id,
|
||||
UnihashesV2.method,
|
||||
UnihashesV2.unihash,
|
||||
UnihashesV2.taskhash,
|
||||
literal("").label("gc_mark"),
|
||||
),
|
||||
)
|
||||
self.logger.debug("%s", statement)
|
||||
await conn.execute(statement)
|
||||
|
||||
await conn.run_sync(Base.metadata.drop_all, [UnihashesV2.__table__])
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(self.engine, logger)
|
||||
|
||||
|
||||
def map_row(row):
|
||||
if row is None:
|
||||
return None
|
||||
return dict(**row._mapping)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row.username,
|
||||
permissions=set(row.permissions.split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(table, condition):
|
||||
where = {}
|
||||
for c in table.__table__.columns:
|
||||
if c.key in condition and condition[c.key] is not None:
|
||||
where[c] = condition[c.key]
|
||||
|
||||
return [(k == v) for k, v in where.items()]
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, engine, logger):
|
||||
self.engine = engine
|
||||
self.db = None
|
||||
self.logger = logger
|
||||
|
||||
async def __aenter__(self):
|
||||
self.db = await self.engine.connect()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
await self.db.close()
|
||||
self.db = None
|
||||
|
||||
async def _execute(self, statement):
|
||||
self.logger.debug("%s", statement)
|
||||
return await self.db.execute(statement)
|
||||
|
||||
async def _set_config(self, name, value):
|
||||
while True:
|
||||
result = await self._execute(
|
||||
update(Config).where(Config.name == name).values(value=value)
|
||||
)
|
||||
|
||||
if result.rowcount == 0:
|
||||
self.logger.debug("Config '%s' not found. Adding it", name)
|
||||
try:
|
||||
await self._execute(insert(Config).values(name=name, value=value))
|
||||
except IntegrityError:
|
||||
# Race. Try again
|
||||
continue
|
||||
|
||||
break
|
||||
|
||||
def _get_config_subquery(self, name, default=None):
|
||||
if default is not None:
|
||||
return func.coalesce(
|
||||
select(Config.value).where(Config.name == name).scalar_subquery(),
|
||||
default,
|
||||
)
|
||||
return select(Config.value).where(Config.name == name).scalar_subquery()
|
||||
|
||||
async def _get_config(self, name):
|
||||
result = await self._execute(select(Config.value).where(Config.name == name))
|
||||
row = result.first()
|
||||
if row is None:
|
||||
return None
|
||||
return row.value
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2,
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.taskhash == taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2, UnihashesV3.unihash.label("unihash"))
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(UnihashesV3).where(UnihashesV3.unihash == unihash).limit(1)
|
||||
)
|
||||
|
||||
return result.first() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2.taskhash.label("taskhash"),
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
OuthashesV2.taskhash != taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
UnihashesV3.unihash,
|
||||
UnihashesV3.method,
|
||||
UnihashesV3.taskhash,
|
||||
).where(
|
||||
UnihashesV3.method == method,
|
||||
UnihashesV3.taskhash == taskhash,
|
||||
)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def remove(self, condition):
|
||||
async def do_remove(table):
|
||||
where = _make_condition_statement(table, condition)
|
||||
if where:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(delete(table).where(*where))
|
||||
return result.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
count += await do_remove(UnihashesV3)
|
||||
count += await do_remove(OuthashesV2)
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
async with self.db.begin():
|
||||
return await self._get_config("gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
async with self.db.begin():
|
||||
gc_mark_subquery = self._get_config_subquery("gc-mark", "")
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark == gc_mark_subquery)
|
||||
)
|
||||
keep_rows = result.scalar()
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark != gc_mark_subquery)
|
||||
)
|
||||
remove_rows = result.scalar()
|
||||
|
||||
return (keep_rows, remove_rows, await self._get_config("gc-mark"))
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
async with self.db.begin():
|
||||
await self._set_config("gc-mark", mark)
|
||||
|
||||
where = _make_condition_statement(UnihashesV3, condition)
|
||||
if not where:
|
||||
return 0
|
||||
|
||||
result = await self._execute(
|
||||
update(UnihashesV3)
|
||||
.values(gc_mark=self._get_config_subquery("gc-mark", ""))
|
||||
.where(*where)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def gc_sweep(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(UnihashesV3).where(
|
||||
# A sneaky conditional that provides some errant use
|
||||
# protection: If the config mark is NULL, this will not
|
||||
# match any rows because No default is specified in the
|
||||
# select statement
|
||||
UnihashesV3.gc_mark
|
||||
!= self._get_config_subquery("gc-mark")
|
||||
)
|
||||
)
|
||||
await self._set_config("gc-mark", None)
|
||||
|
||||
return result.rowcount
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(OuthashesV2).where(
|
||||
OuthashesV2.created < oldest,
|
||||
~(
|
||||
select(UnihashesV3.id)
|
||||
.where(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
)
|
||||
.limit(1)
|
||||
.exists()
|
||||
),
|
||||
)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(UnihashesV3)
|
||||
.values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
.on_conflict_do_nothing(index_elements=("method", "taskhash"))
|
||||
)
|
||||
else:
|
||||
statement = insert(UnihashesV3).values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s, %s already in unihash database", method, taskhash, unihash
|
||||
)
|
||||
return False
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
outhash_columns = set(c.key for c in OuthashesV2.__table__.columns)
|
||||
|
||||
data = {k: v for k, v in data.items() if k in outhash_columns}
|
||||
|
||||
if "created" in data and not isinstance(data["created"], datetime):
|
||||
data["created"] = datetime.fromisoformat(data["created"])
|
||||
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(OuthashesV2)
|
||||
.values(**data)
|
||||
.on_conflict_do_nothing(
|
||||
index_elements=("method", "taskhash", "outhash")
|
||||
)
|
||||
)
|
||||
else:
|
||||
statement = insert(OuthashesV2).values(**data)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s already in outhash database", data["method"], data["outhash"]
|
||||
)
|
||||
return False
|
||||
|
||||
async def _get_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
Users.token,
|
||||
).where(
|
||||
Users.username == username,
|
||||
)
|
||||
)
|
||||
return result.first()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = await self._get_user(username)
|
||||
if not row:
|
||||
return None, None
|
||||
return map_user(row), row.token
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(await self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(
|
||||
Users.username == username,
|
||||
)
|
||||
.values(
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(Users.username == username)
|
||||
.values(permissions=" ".join(permissions))
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
)
|
||||
)
|
||||
return [map_user(row) for row in result]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
try:
|
||||
async with self.db.begin():
|
||||
await self._execute(
|
||||
insert(Users).values(
|
||||
username=username,
|
||||
permissions=" ".join(permissions),
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return True
|
||||
except IntegrityError as e:
|
||||
self.logger.debug("Cannot create new user %s: %s", username, e)
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(Users).where(Users.username == username)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
async with self.db.begin() as session:
|
||||
for name, table in Base.metadata.tables.items():
|
||||
result = await self._execute(
|
||||
statement=select(func.count()).select_from(table)
|
||||
)
|
||||
usage[name] = {
|
||||
"rows": result.scalar(),
|
||||
}
|
||||
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for table in (UnihashesV3, OuthashesV2):
|
||||
for c in table.__table__.columns:
|
||||
if not isinstance(c.type, Text):
|
||||
continue
|
||||
columns.add(c.key)
|
||||
|
||||
return list(columns)
|
||||
@@ -1,562 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
import sqlite3
|
||||
import logging
|
||||
from contextlib import closing
|
||||
from . import User
|
||||
|
||||
logger = logging.getLogger("hashserv.sqlite")
|
||||
|
||||
UNIHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("unihash", "TEXT NOT NULL", ""),
|
||||
("gc_mark", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
UNIHASH_TABLE_COLUMNS = tuple(name for name, _, _ in UNIHASH_TABLE_DEFINITION)
|
||||
|
||||
OUTHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("outhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("created", "DATETIME", ""),
|
||||
# Optional fields
|
||||
("owner", "TEXT", ""),
|
||||
("PN", "TEXT", ""),
|
||||
("PV", "TEXT", ""),
|
||||
("PR", "TEXT", ""),
|
||||
("task", "TEXT", ""),
|
||||
("outhash_siginfo", "TEXT", ""),
|
||||
)
|
||||
|
||||
OUTHASH_TABLE_COLUMNS = tuple(name for name, _, _ in OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
USERS_TABLE_DEFINITION = (
|
||||
("username", "TEXT NOT NULL", "UNIQUE"),
|
||||
("token", "TEXT NOT NULL", ""),
|
||||
("permissions", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
USERS_TABLE_COLUMNS = tuple(name for name, _, _ in USERS_TABLE_DEFINITION)
|
||||
|
||||
|
||||
CONFIG_TABLE_DEFINITION = (
|
||||
("name", "TEXT NOT NULL", "UNIQUE"),
|
||||
("value", "TEXT", ""),
|
||||
)
|
||||
|
||||
CONFIG_TABLE_COLUMNS = tuple(name for name, _, _ in CONFIG_TABLE_DEFINITION)
|
||||
|
||||
|
||||
def _make_table(cursor, name, definition):
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS {name} (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
{fields}
|
||||
UNIQUE({unique})
|
||||
)
|
||||
""".format(
|
||||
name=name,
|
||||
fields=" ".join("%s %s," % (name, typ) for name, typ, _ in definition),
|
||||
unique=", ".join(
|
||||
name for name, _, flags in definition if "UNIQUE" in flags
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row["username"],
|
||||
permissions=set(row["permissions"].split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(columns, condition):
|
||||
where = {}
|
||||
for c in columns:
|
||||
if c in condition and condition[c] is not None:
|
||||
where[c] = condition[c]
|
||||
|
||||
return where, " AND ".join("%s=:%s" % (k, k) for k in where.keys())
|
||||
|
||||
|
||||
def _get_sqlite_version(cursor):
|
||||
cursor.execute("SELECT sqlite_version()")
|
||||
|
||||
version = []
|
||||
for v in cursor.fetchone()[0].split("."):
|
||||
try:
|
||||
version.append(int(v))
|
||||
except ValueError:
|
||||
version.append(v)
|
||||
|
||||
return tuple(version)
|
||||
|
||||
|
||||
def _schema_table_name(version):
|
||||
if version >= (3, 33):
|
||||
return "sqlite_schema"
|
||||
|
||||
return "sqlite_master"
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
self.sync = sync
|
||||
|
||||
async def create(self):
|
||||
db = sqlite3.connect(self.dbname)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
_make_table(cursor, "unihashes_v3", UNIHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "outhashes_v2", OUTHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "users", USERS_TABLE_DEFINITION)
|
||||
_make_table(cursor, "config", CONFIG_TABLE_DEFINITION)
|
||||
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if self.sync else "OFF")
|
||||
)
|
||||
|
||||
# Drop old indexes
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v3")
|
||||
|
||||
# TODO: Upgrade from tasks_v2?
|
||||
cursor.execute("DROP TABLE IF EXISTS tasks_v2")
|
||||
|
||||
# Create new indexes
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS taskhash_lookup_v4 ON unihashes_v3 (method, taskhash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS unihash_lookup_v1 ON unihashes_v3 (unihash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS outhash_lookup_v3 ON outhashes_v2 (method, outhash)"
|
||||
)
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS config_lookup ON config (name)")
|
||||
|
||||
sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(sqlite_version)} WHERE type = 'table' AND name = 'unihashes_v2'
|
||||
"""
|
||||
)
|
||||
if cursor.fetchone():
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO unihashes_v3 (id, method, unihash, taskhash, gc_mark)
|
||||
SELECT id, method, unihash, taskhash, '' FROM unihashes_v2
|
||||
"""
|
||||
)
|
||||
cursor.execute("DROP TABLE unihashes_v2")
|
||||
db.commit()
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(logger, self.dbname, self.sync)
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, logger, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
|
||||
self.db = sqlite3.connect(self.dbname)
|
||||
self.db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if sync else "OFF")
|
||||
)
|
||||
|
||||
self.sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def _set_config(self, cursor, name, value):
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO config (id, name, value) VALUES
|
||||
((SELECT id FROM config WHERE name=:name), :name, :value)
|
||||
""",
|
||||
{
|
||||
"name": name,
|
||||
"value": value,
|
||||
},
|
||||
)
|
||||
|
||||
async def _get_config(self, cursor, name):
|
||||
cursor.execute(
|
||||
"SELECT value FROM config WHERE name=:name",
|
||||
{
|
||||
"name": name,
|
||||
},
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return row["value"]
|
||||
|
||||
async def close(self):
|
||||
self.db.close()
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.taskhash=:taskhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM unihashes_v3 WHERE unihash=:unihash
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM outhashes_v2
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT outhashes_v2.taskhash AS taskhash, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
-- Select any matching output hash except the one we just inserted
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash AND outhashes_v2.taskhash!=:taskhash
|
||||
-- Pick the oldest hash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"SELECT taskhash, method, unihash FROM unihashes_v3 WHERE method=:method AND taskhash=:taskhash",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def remove(self, condition):
|
||||
def do_remove(columns, table_name, cursor):
|
||||
where, clause = _make_condition_statement(columns, condition)
|
||||
if where:
|
||||
query = f"DELETE FROM {table_name} WHERE {clause}"
|
||||
cursor.execute(query, where)
|
||||
return cursor.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
count += do_remove(OUTHASH_TABLE_COLUMNS, "outhashes_v2", cursor)
|
||||
count += do_remove(UNIHASH_TABLE_COLUMNS, "unihashes_v3", cursor)
|
||||
self.db.commit()
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
return await self._get_config(cursor, "gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
keep_rows = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark!=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
remove_rows = cursor.fetchone()[0]
|
||||
|
||||
current_mark = await self._get_config(cursor, "gc-mark")
|
||||
|
||||
return (keep_rows, remove_rows, current_mark)
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
await self._set_config(cursor, "gc-mark", mark)
|
||||
|
||||
where, clause = _make_condition_statement(UNIHASH_TABLE_COLUMNS, condition)
|
||||
|
||||
new_rows = 0
|
||||
if where:
|
||||
cursor.execute(
|
||||
f"""
|
||||
UPDATE unihashes_v3 SET
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
WHERE {clause}
|
||||
""",
|
||||
where,
|
||||
)
|
||||
new_rows = cursor.rowcount
|
||||
|
||||
self.db.commit()
|
||||
return new_rows
|
||||
|
||||
async def gc_sweep(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
# NOTE: COALESCE is not used in this query so that if the current
|
||||
# mark is NULL, nothing will happen
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM unihashes_v3 WHERE
|
||||
gc_mark!=(SELECT value FROM config WHERE name='gc-mark')
|
||||
"""
|
||||
)
|
||||
count = cursor.rowcount
|
||||
await self._set_config(cursor, "gc-mark", None)
|
||||
|
||||
self.db.commit()
|
||||
return count
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM outhashes_v2 WHERE created<:oldest AND NOT EXISTS (
|
||||
SELECT unihashes_v3.id FROM unihashes_v3 WHERE unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash LIMIT 1
|
||||
)
|
||||
""",
|
||||
{
|
||||
"oldest": oldest,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO unihashes_v3 (method, taskhash, unihash, gc_mark) VALUES
|
||||
(
|
||||
:method,
|
||||
:taskhash,
|
||||
:unihash,
|
||||
COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
)
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
data = {k: v for k, v in data.items() if k in OUTHASH_TABLE_COLUMNS}
|
||||
keys = sorted(data.keys())
|
||||
query = "INSERT OR IGNORE INTO outhashes_v2 ({fields}) VALUES({values})".format(
|
||||
fields=", ".join(keys),
|
||||
values=", ".join(":" + k for k in keys),
|
||||
)
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(query, data)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
def _get_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT username, permissions, token FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = self._get_user(username)
|
||||
if row is None:
|
||||
return None, None
|
||||
return map_user(row), row["token"]
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET token=:token WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET permissions=:permissions WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("SELECT username, permissions FROM users")
|
||||
return [map_user(r) for r in cursor.fetchall()]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO users (username, token, permissions) VALUES (:username, :token, :permissions)
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return True
|
||||
except sqlite3.IntegrityError:
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(self.sqlite_version)} WHERE type = 'table' AND name NOT LIKE 'sqlite_%'
|
||||
"""
|
||||
)
|
||||
for row in cursor.fetchall():
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM %s
|
||||
"""
|
||||
% row["name"],
|
||||
)
|
||||
usage[row["name"]] = {
|
||||
"rows": cursor.fetchone()[0],
|
||||
}
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for name, typ, _ in UNIHASH_TABLE_DEFINITION + OUTHASH_TABLE_DEFINITION:
|
||||
if typ.startswith("TEXT"):
|
||||
columns.add(name)
|
||||
return list(columns)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -178,9 +178,9 @@ class LayerIndex():
|
||||
'''Load the layerindex.
|
||||
|
||||
indexURI - An index to load. (Use multiple calls to load multiple indexes)
|
||||
|
||||
|
||||
reload - If reload is True, then any previously loaded indexes will be forgotten.
|
||||
|
||||
|
||||
load - List of elements to load. Default loads all items.
|
||||
Note: plugs may ignore this.
|
||||
|
||||
@@ -383,14 +383,7 @@ layerBranches set. If not, they are effectively blank.'''
|
||||
|
||||
# Get a list of dependencies and then recursively process them
|
||||
for layerdependency in layerbranch.index.layerDependencies_layerBranchId[layerbranch.id]:
|
||||
try:
|
||||
deplayerbranch = layerdependency.dependency_layerBranch
|
||||
except AttributeError as e:
|
||||
logger.error('LayerBranch does not exist for dependent layer {}:{}\n' \
|
||||
' Cannot continue successfully.\n' \
|
||||
' You might be able to resolve this by checking out the layer locally.\n' \
|
||||
' Consider reaching out the to the layer maintainers or the layerindex admins' \
|
||||
.format(layerdependency.dependency.name, layerbranch.branch.name))
|
||||
deplayerbranch = layerdependency.dependency_layerBranch
|
||||
|
||||
if ignores and deplayerbranch.layer.name in ignores:
|
||||
continue
|
||||
@@ -853,7 +846,7 @@ class LayerIndexObj():
|
||||
continue
|
||||
|
||||
for layerdependency in layerbranch.index.layerDependencies_layerBranchId[layerbranch.id]:
|
||||
deplayerbranch = layerdependency.dependency_layerBranch or None
|
||||
deplayerbranch = layerdependency.dependency_layerBranch
|
||||
|
||||
if ignores and deplayerbranch.layer.name in ignores:
|
||||
continue
|
||||
|
||||
@@ -253,7 +253,7 @@ class ProgressBar(object):
|
||||
if (self.maxval is not UnknownLength
|
||||
and not 0 <= value <= self.maxval):
|
||||
|
||||
self.maxval = value
|
||||
raise ValueError('Value out of range')
|
||||
|
||||
self.currval = value
|
||||
|
||||
|
||||
@@ -7,13 +7,13 @@
|
||||
__version__ = "1.0.0"
|
||||
|
||||
import os, time
|
||||
import sys, logging
|
||||
import sys,logging
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
raise ValueError('Invalid log level: %s' % loglevel)
|
||||
FORMAT = '%(asctime)-15s %(message)s'
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
class NotFoundError(Exception):
|
||||
|
||||
@@ -11,61 +11,40 @@ logger = logging.getLogger("BitBake.PRserv")
|
||||
|
||||
class PRAsyncClient(bb.asyncrpc.AsyncClient):
|
||||
def __init__(self):
|
||||
super().__init__("PRSERVICE", "1.0", logger)
|
||||
super().__init__('PRSERVICE', '1.0', logger)
|
||||
|
||||
async def getPR(self, version, pkgarch, checksum):
|
||||
response = await self.invoke(
|
||||
{"get-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum}}
|
||||
response = await self.send_message(
|
||||
{'get-pr': {'version': version, 'pkgarch': pkgarch, 'checksum': checksum}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
|
||||
async def test_pr(self, version, pkgarch, checksum):
|
||||
response = await self.invoke(
|
||||
{"test-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
|
||||
async def test_package(self, version, pkgarch):
|
||||
response = await self.invoke(
|
||||
{"test-package": {"version": version, "pkgarch": pkgarch}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
|
||||
async def max_package_pr(self, version, pkgarch):
|
||||
response = await self.invoke(
|
||||
{"max-package-pr": {"version": version, "pkgarch": pkgarch}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
return response['value']
|
||||
|
||||
async def importone(self, version, pkgarch, checksum, value):
|
||||
response = await self.invoke(
|
||||
{"import-one": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "value": value}}
|
||||
response = await self.send_message(
|
||||
{'import-one': {'version': version, 'pkgarch': pkgarch, 'checksum': checksum, 'value': value}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
return response['value']
|
||||
|
||||
async def export(self, version, pkgarch, checksum, colinfo):
|
||||
response = await self.invoke(
|
||||
{"export": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "colinfo": colinfo}}
|
||||
response = await self.send_message(
|
||||
{'export': {'version': version, 'pkgarch': pkgarch, 'checksum': checksum, 'colinfo': colinfo}}
|
||||
)
|
||||
if response:
|
||||
return (response["metainfo"], response["datainfo"])
|
||||
return (response['metainfo'], response['datainfo'])
|
||||
|
||||
async def is_readonly(self):
|
||||
response = await self.invoke(
|
||||
{"is-readonly": {}}
|
||||
response = await self.send_message(
|
||||
{'is-readonly': {}}
|
||||
)
|
||||
if response:
|
||||
return response["readonly"]
|
||||
return response['readonly']
|
||||
|
||||
class PRClient(bb.asyncrpc.Client):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._add_methods("getPR", "test_pr", "test_package", "importone", "export", "is_readonly")
|
||||
self._add_methods('getPR', 'importone', 'export', 'is_readonly')
|
||||
|
||||
def _get_async_client(self):
|
||||
return PRAsyncClient()
|
||||
|
||||
@@ -38,9 +38,9 @@ class PRTable(object):
|
||||
self.read_only = read_only
|
||||
self.dirty = False
|
||||
if nohist:
|
||||
self.table = "%s_nohist" % table
|
||||
self.table = "%s_nohist" % table
|
||||
else:
|
||||
self.table = "%s_hist" % table
|
||||
self.table = "%s_hist" % table
|
||||
|
||||
if self.read_only:
|
||||
table_exists = self._execute(
|
||||
@@ -64,7 +64,7 @@ class PRTable(object):
|
||||
try:
|
||||
return self.conn.execute(*query)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if "is locked" in str(exc) and end > time.time():
|
||||
if 'is locked' in str(exc) and end > time.time():
|
||||
continue
|
||||
raise exc
|
||||
|
||||
@@ -78,53 +78,7 @@ class PRTable(object):
|
||||
self.sync()
|
||||
self.dirty = False
|
||||
|
||||
def test_package(self, version, pkgarch):
|
||||
"""Returns whether the specified package version is found in the database for the specified architecture"""
|
||||
|
||||
# Just returns the value if found or None otherwise
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=?;" % self.table,
|
||||
(version, pkgarch))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def test_value(self, version, pkgarch, value):
|
||||
"""Returns whether the specified value is found in the database for the specified package and architecture"""
|
||||
|
||||
# Just returns the value if found or None otherwise
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? and value=?;" % self.table,
|
||||
(version, pkgarch, value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def find_value(self, version, pkgarch, checksum):
|
||||
"""Returns the value for the specified checksum if found or None otherwise."""
|
||||
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def find_max_value(self, version, pkgarch):
|
||||
"""Returns the greatest value for (version, pkgarch), or None if not found. Doesn't create a new value"""
|
||||
|
||||
data = self._execute("SELECT max(value) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_value_hist(self, version, pkgarch, checksum):
|
||||
def _getValueHist(self, version, pkgarch, checksum):
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
@@ -133,7 +87,7 @@ class PRTable(object):
|
||||
else:
|
||||
#no value found, try to insert
|
||||
if self.read_only:
|
||||
data = self._execute("SELECT ifnull(max(value)+1, 0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
data = self._execute("SELECT ifnull(max(value)+1,0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
@@ -142,9 +96,9 @@ class PRTable(object):
|
||||
return 0
|
||||
|
||||
try:
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1, 0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table, self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1,0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table,self.table),
|
||||
(version,pkgarch, checksum,version, pkgarch))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
@@ -158,10 +112,10 @@ class PRTable(object):
|
||||
else:
|
||||
raise prserv.NotFoundError
|
||||
|
||||
def _get_value_no_hist(self, version, pkgarch, checksum):
|
||||
def _getValueNohist(self, version, pkgarch, checksum):
|
||||
data=self._execute("SELECT value FROM %s \
|
||||
WHERE version=? AND pkgarch=? AND checksum=? AND \
|
||||
value >= (select max(value) from %s where version=? AND pkgarch=?);"
|
||||
value >= (select max(value) from %s where version=? AND pkgarch=?);"
|
||||
% (self.table, self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
row=data.fetchone()
|
||||
@@ -170,13 +124,17 @@ class PRTable(object):
|
||||
else:
|
||||
#no value found, try to insert
|
||||
if self.read_only:
|
||||
data = self._execute("SELECT ifnull(max(value)+1, 0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
data = self._execute("SELECT ifnull(max(value)+1,0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
return data.fetchone()[0]
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return 0
|
||||
|
||||
try:
|
||||
self._execute("INSERT OR REPLACE INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1, 0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table, self.table),
|
||||
self._execute("INSERT OR REPLACE INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1,0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table,self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
@@ -192,17 +150,17 @@ class PRTable(object):
|
||||
else:
|
||||
raise prserv.NotFoundError
|
||||
|
||||
def get_value(self, version, pkgarch, checksum):
|
||||
def getValue(self, version, pkgarch, checksum):
|
||||
if self.nohist:
|
||||
return self._get_value_no_hist(version, pkgarch, checksum)
|
||||
return self._getValueNohist(version, pkgarch, checksum)
|
||||
else:
|
||||
return self._get_value_hist(version, pkgarch, checksum)
|
||||
return self._getValueHist(version, pkgarch, checksum)
|
||||
|
||||
def _import_hist(self, version, pkgarch, checksum, value):
|
||||
def _importHist(self, version, pkgarch, checksum, value):
|
||||
if self.read_only:
|
||||
return None
|
||||
|
||||
val = None
|
||||
val = None
|
||||
data = self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row = data.fetchone()
|
||||
@@ -225,27 +183,27 @@ class PRTable(object):
|
||||
val = row[0]
|
||||
return val
|
||||
|
||||
def _import_no_hist(self, version, pkgarch, checksum, value):
|
||||
def _importNohist(self, version, pkgarch, checksum, value):
|
||||
if self.read_only:
|
||||
return None
|
||||
|
||||
try:
|
||||
#try to insert
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, ?);" % (self.table),
|
||||
(version, pkgarch, checksum, value))
|
||||
(version, pkgarch, checksum,value))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
#already have the record, try to update
|
||||
try:
|
||||
self._execute("UPDATE %s SET value=? WHERE version=? AND pkgarch=? AND checksum=? AND value<?"
|
||||
self._execute("UPDATE %s SET value=? WHERE version=? AND pkgarch=? AND checksum=? AND value<?"
|
||||
% (self.table),
|
||||
(value, version, pkgarch, checksum, value))
|
||||
(value,version,pkgarch,checksum,value))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
self.dirty = True
|
||||
|
||||
data = self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=? AND value>=?;" % self.table,
|
||||
(version, pkgarch, checksum, value))
|
||||
(version,pkgarch,checksum,value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
@@ -254,33 +212,33 @@ class PRTable(object):
|
||||
|
||||
def importone(self, version, pkgarch, checksum, value):
|
||||
if self.nohist:
|
||||
return self._import_no_hist(version, pkgarch, checksum, value)
|
||||
return self._importNohist(version, pkgarch, checksum, value)
|
||||
else:
|
||||
return self._import_hist(version, pkgarch, checksum, value)
|
||||
return self._importHist(version, pkgarch, checksum, value)
|
||||
|
||||
def export(self, version, pkgarch, checksum, colinfo):
|
||||
metainfo = {}
|
||||
#column info
|
||||
#column info
|
||||
if colinfo:
|
||||
metainfo["tbl_name"] = self.table
|
||||
metainfo["core_ver"] = prserv.__version__
|
||||
metainfo["col_info"] = []
|
||||
metainfo['tbl_name'] = self.table
|
||||
metainfo['core_ver'] = prserv.__version__
|
||||
metainfo['col_info'] = []
|
||||
data = self._execute("PRAGMA table_info(%s);" % self.table)
|
||||
for row in data:
|
||||
col = {}
|
||||
col["name"] = row["name"]
|
||||
col["type"] = row["type"]
|
||||
col["notnull"] = row["notnull"]
|
||||
col["dflt_value"] = row["dflt_value"]
|
||||
col["pk"] = row["pk"]
|
||||
metainfo["col_info"].append(col)
|
||||
col['name'] = row['name']
|
||||
col['type'] = row['type']
|
||||
col['notnull'] = row['notnull']
|
||||
col['dflt_value'] = row['dflt_value']
|
||||
col['pk'] = row['pk']
|
||||
metainfo['col_info'].append(col)
|
||||
|
||||
#data info
|
||||
datainfo = []
|
||||
|
||||
if self.nohist:
|
||||
sqlstmt = "SELECT T1.version, T1.pkgarch, T1.checksum, T1.value FROM %s as T1, \
|
||||
(SELECT version, pkgarch, max(value) as maxvalue FROM %s GROUP BY version, pkgarch) as T2 \
|
||||
(SELECT version,pkgarch,max(value) as maxvalue FROM %s GROUP BY version,pkgarch) as T2 \
|
||||
WHERE T1.version=T2.version AND T1.pkgarch=T2.pkgarch AND T1.value=T2.maxvalue " % (self.table, self.table)
|
||||
else:
|
||||
sqlstmt = "SELECT * FROM %s as T1 WHERE 1=1 " % self.table
|
||||
@@ -303,12 +261,12 @@ class PRTable(object):
|
||||
else:
|
||||
data = self._execute(sqlstmt)
|
||||
for row in data:
|
||||
if row["version"]:
|
||||
if row['version']:
|
||||
col = {}
|
||||
col["version"] = row["version"]
|
||||
col["pkgarch"] = row["pkgarch"]
|
||||
col["checksum"] = row["checksum"]
|
||||
col["value"] = row["value"]
|
||||
col['version'] = row['version']
|
||||
col['pkgarch'] = row['pkgarch']
|
||||
col['checksum'] = row['checksum']
|
||||
col['value'] = row['value']
|
||||
datainfo.append(col)
|
||||
return (metainfo, datainfo)
|
||||
|
||||
@@ -317,7 +275,7 @@ class PRTable(object):
|
||||
for line in self.conn.iterdump():
|
||||
writeCount = writeCount + len(line) + 1
|
||||
fd.write(line)
|
||||
fd.write("\n")
|
||||
fd.write('\n')
|
||||
return writeCount
|
||||
|
||||
class PRData(object):
|
||||
@@ -344,7 +302,7 @@ class PRData(object):
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def __getitem__(self, tblname):
|
||||
def __getitem__(self,tblname):
|
||||
if not isinstance(tblname, str):
|
||||
raise TypeError("tblname argument must be a string, not '%s'" %
|
||||
type(tblname))
|
||||
@@ -358,4 +316,4 @@ class PRData(object):
|
||||
if tblname in self._tables:
|
||||
del self._tables[tblname]
|
||||
logger.info("drop table %s" % (tblname))
|
||||
self.connection.execute("DROP TABLE IF EXISTS %s;" % tblname)
|
||||
self.connection.execute("DROP TABLE IF EXISTS %s;" % tblname)
|
||||
|
||||
@@ -20,101 +20,77 @@ PIDPREFIX = "/tmp/PRServer_%s_%s.pid"
|
||||
singleton = None
|
||||
|
||||
class PRServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
def __init__(self, socket, server):
|
||||
super().__init__(socket, "PRSERVICE", server.logger)
|
||||
self.server = server
|
||||
|
||||
def __init__(self, reader, writer, table, read_only):
|
||||
super().__init__(reader, writer, 'PRSERVICE', logger)
|
||||
self.handlers.update({
|
||||
"get-pr": self.handle_get_pr,
|
||||
"test-pr": self.handle_test_pr,
|
||||
"test-package": self.handle_test_package,
|
||||
"max-package-pr": self.handle_max_package_pr,
|
||||
"import-one": self.handle_import_one,
|
||||
"export": self.handle_export,
|
||||
"is-readonly": self.handle_is_readonly,
|
||||
'get-pr': self.handle_get_pr,
|
||||
'import-one': self.handle_import_one,
|
||||
'export': self.handle_export,
|
||||
'is-readonly': self.handle_is_readonly,
|
||||
})
|
||||
self.table = table
|
||||
self.read_only = read_only
|
||||
|
||||
def validate_proto_version(self):
|
||||
return (self.proto_version == (1, 0))
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
try:
|
||||
return await super().dispatch_message(msg)
|
||||
await super().dispatch_message(msg)
|
||||
except:
|
||||
self.server.table.sync()
|
||||
self.table.sync()
|
||||
raise
|
||||
else:
|
||||
self.server.table.sync_if_dirty()
|
||||
|
||||
async def handle_test_pr(self, request):
|
||||
'''Finds the PR value corresponding to the request. If not found, returns None and doesn't insert a new value'''
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
|
||||
value = self.server.table.find_value(version, pkgarch, checksum)
|
||||
return {"value": value}
|
||||
|
||||
async def handle_test_package(self, request):
|
||||
'''Tells whether there are entries for (version, pkgarch) in the db. Returns True or False'''
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
|
||||
value = self.server.table.test_package(version, pkgarch)
|
||||
return {"value": value}
|
||||
|
||||
async def handle_max_package_pr(self, request):
|
||||
'''Finds the greatest PR value for (version, pkgarch) in the db. Returns None if no entry was found'''
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
|
||||
value = self.server.table.find_max_value(version, pkgarch)
|
||||
return {"value": value}
|
||||
self.table.sync_if_dirty()
|
||||
|
||||
async def handle_get_pr(self, request):
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
version = request['version']
|
||||
pkgarch = request['pkgarch']
|
||||
checksum = request['checksum']
|
||||
|
||||
response = None
|
||||
try:
|
||||
value = self.server.table.get_value(version, pkgarch, checksum)
|
||||
response = {"value": value}
|
||||
value = self.table.getValue(version, pkgarch, checksum)
|
||||
response = {'value': value}
|
||||
except prserv.NotFoundError:
|
||||
self.logger.error("failure storing value in database for (%s, %s)",version, checksum)
|
||||
logger.error("can not find value for (%s, %s)",version, checksum)
|
||||
except sqlite3.Error as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
return response
|
||||
self.write_message(response)
|
||||
|
||||
async def handle_import_one(self, request):
|
||||
response = None
|
||||
if not self.server.read_only:
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
value = request["value"]
|
||||
if not self.read_only:
|
||||
version = request['version']
|
||||
pkgarch = request['pkgarch']
|
||||
checksum = request['checksum']
|
||||
value = request['value']
|
||||
|
||||
value = self.server.table.importone(version, pkgarch, checksum, value)
|
||||
value = self.table.importone(version, pkgarch, checksum, value)
|
||||
if value is not None:
|
||||
response = {"value": value}
|
||||
response = {'value': value}
|
||||
|
||||
return response
|
||||
self.write_message(response)
|
||||
|
||||
async def handle_export(self, request):
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
colinfo = request["colinfo"]
|
||||
version = request['version']
|
||||
pkgarch = request['pkgarch']
|
||||
checksum = request['checksum']
|
||||
colinfo = request['colinfo']
|
||||
|
||||
try:
|
||||
(metainfo, datainfo) = self.server.table.export(version, pkgarch, checksum, colinfo)
|
||||
(metainfo, datainfo) = self.table.export(version, pkgarch, checksum, colinfo)
|
||||
except sqlite3.Error as exc:
|
||||
self.logger.error(str(exc))
|
||||
logger.error(str(exc))
|
||||
metainfo = datainfo = None
|
||||
|
||||
return {"metainfo": metainfo, "datainfo": datainfo}
|
||||
response = {'metainfo': metainfo, 'datainfo': datainfo}
|
||||
self.write_message(response)
|
||||
|
||||
async def handle_is_readonly(self, request):
|
||||
return {"readonly": self.server.read_only}
|
||||
response = {'readonly': self.read_only}
|
||||
self.write_message(response)
|
||||
|
||||
class PRServer(bb.asyncrpc.AsyncServer):
|
||||
def __init__(self, dbfile, read_only=False):
|
||||
@@ -123,23 +99,20 @@ class PRServer(bb.asyncrpc.AsyncServer):
|
||||
self.table = None
|
||||
self.read_only = read_only
|
||||
|
||||
def accept_client(self, socket):
|
||||
return PRServerClient(socket, self)
|
||||
def accept_client(self, reader, writer):
|
||||
return PRServerClient(reader, writer, self.table, self.read_only)
|
||||
|
||||
def start(self):
|
||||
tasks = super().start()
|
||||
def _serve_forever(self):
|
||||
self.db = prserv.db.PRData(self.dbfile, read_only=self.read_only)
|
||||
self.table = self.db["PRMAIN"]
|
||||
|
||||
self.logger.info("Started PRServer with DBfile: %s, Address: %s, PID: %s" %
|
||||
logger.info("Started PRServer with DBfile: %s, Address: %s, PID: %s" %
|
||||
(self.dbfile, self.address, str(os.getpid())))
|
||||
|
||||
return tasks
|
||||
super()._serve_forever()
|
||||
|
||||
async def stop(self):
|
||||
self.table.sync_if_dirty()
|
||||
self.db.disconnect()
|
||||
await super().stop()
|
||||
|
||||
def signal_handler(self):
|
||||
super().signal_handler()
|
||||
@@ -156,12 +129,12 @@ class PRServSingleton(object):
|
||||
def start(self):
|
||||
self.prserv = PRServer(self.dbfile)
|
||||
self.prserv.start_tcp_server(socket.gethostbyname(self.host), self.port)
|
||||
self.process = self.prserv.serve_as_process(log_level=logging.WARNING)
|
||||
self.process = self.prserv.serve_as_process()
|
||||
|
||||
if not self.prserv.address:
|
||||
raise PRServiceConfigError
|
||||
if not self.port:
|
||||
self.port = int(self.prserv.address.rsplit(":", 1)[1])
|
||||
self.port = int(self.prserv.address.rsplit(':', 1)[1])
|
||||
|
||||
def run_as_daemon(func, pidfile, logfile):
|
||||
"""
|
||||
@@ -197,18 +170,18 @@ def run_as_daemon(func, pidfile, logfile):
|
||||
# stdout/stderr or it could be 'real' unix fd forking where we need
|
||||
# to physically close the fds to prevent the program launching us from
|
||||
# potentially hanging on a pipe. Handle both cases.
|
||||
si = open("/dev/null", "r")
|
||||
si = open('/dev/null', 'r')
|
||||
try:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(si.fileno(),sys.stdin.fileno())
|
||||
except (AttributeError, io.UnsupportedOperation):
|
||||
sys.stdin = si
|
||||
so = open(logfile, "a+")
|
||||
so = open(logfile, 'a+')
|
||||
try:
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(),sys.stdout.fileno())
|
||||
except (AttributeError, io.UnsupportedOperation):
|
||||
sys.stdout = so
|
||||
try:
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
os.dup2(so.fileno(),sys.stderr.fileno())
|
||||
except (AttributeError, io.UnsupportedOperation):
|
||||
sys.stderr = so
|
||||
|
||||
@@ -226,7 +199,7 @@ def run_as_daemon(func, pidfile, logfile):
|
||||
|
||||
# write pidfile
|
||||
pid = str(os.getpid())
|
||||
with open(pidfile, "w") as pf:
|
||||
with open(pidfile, 'w') as pf:
|
||||
pf.write("%s\n" % pid)
|
||||
|
||||
func()
|
||||
@@ -271,15 +244,15 @@ def stop_daemon(host, port):
|
||||
# so at least advise the user which ports the corresponding server is listening
|
||||
ports = []
|
||||
portstr = ""
|
||||
for pf in glob.glob(PIDPREFIX % (ip, "*")):
|
||||
for pf in glob.glob(PIDPREFIX % (ip,'*')):
|
||||
bn = os.path.basename(pf)
|
||||
root, _ = os.path.splitext(bn)
|
||||
ports.append(root.split("_")[-1])
|
||||
ports.append(root.split('_')[-1])
|
||||
if len(ports):
|
||||
portstr = "Wrong port? Other ports listening at %s: %s" % (host, " ".join(ports))
|
||||
portstr = "Wrong port? Other ports listening at %s: %s" % (host, ' '.join(ports))
|
||||
|
||||
sys.stderr.write("pidfile %s does not exist. Daemon not running? %s\n"
|
||||
% (pidfile, portstr))
|
||||
% (pidfile,portstr))
|
||||
return 1
|
||||
|
||||
try:
|
||||
@@ -288,11 +261,8 @@ def stop_daemon(host, port):
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
time.sleep(0.1)
|
||||
|
||||
try:
|
||||
if os.path.exists(pidfile):
|
||||
os.remove(pidfile)
|
||||
except FileNotFoundError:
|
||||
# The PID file might have been removed by the exiting process
|
||||
pass
|
||||
|
||||
except OSError as e:
|
||||
err = str(e)
|
||||
@@ -310,7 +280,7 @@ def is_running(pid):
|
||||
return True
|
||||
|
||||
def is_local_special(host, port):
|
||||
if (host == "localhost" or host == "127.0.0.1") and not port:
|
||||
if (host == 'localhost' or host == '127.0.0.1') and not port:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -321,7 +291,7 @@ class PRServiceConfigError(Exception):
|
||||
def auto_start(d):
|
||||
global singleton
|
||||
|
||||
host_params = list(filter(None, (d.getVar("PRSERV_HOST") or "").split(":")))
|
||||
host_params = list(filter(None, (d.getVar('PRSERV_HOST') or '').split(':')))
|
||||
if not host_params:
|
||||
# Shutdown any existing PR Server
|
||||
auto_shutdown()
|
||||
@@ -330,7 +300,7 @@ def auto_start(d):
|
||||
if len(host_params) != 2:
|
||||
# Shutdown any existing PR Server
|
||||
auto_shutdown()
|
||||
logger.critical("\n".join(["PRSERV_HOST: incorrect format",
|
||||
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
|
||||
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
|
||||
raise PRServiceConfigError
|
||||
|
||||
@@ -374,17 +344,17 @@ def auto_shutdown():
|
||||
def ping(host, port):
|
||||
from . import client
|
||||
|
||||
with client.PRClient() as conn:
|
||||
conn.connect_tcp(host, port)
|
||||
return conn.ping()
|
||||
conn = client.PRClient()
|
||||
conn.connect_tcp(host, port)
|
||||
return conn.ping()
|
||||
|
||||
def connect(host, port):
|
||||
from . import client
|
||||
|
||||
global singleton
|
||||
|
||||
if host.strip().lower() == "localhost" and not port:
|
||||
host = "localhost"
|
||||
if host.strip().lower() == 'localhost' and not port:
|
||||
host = 'localhost'
|
||||
port = singleton.port
|
||||
|
||||
conn = client.PRClient()
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
from django.urls import re_path as url
|
||||
from django.conf.urls import url
|
||||
|
||||
import bldcollector.views
|
||||
|
||||
|
||||
@@ -14,11 +14,8 @@ import subprocess
|
||||
import toastermain
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
|
||||
from toastermain.logs import log_view_mixin
|
||||
|
||||
|
||||
@csrf_exempt
|
||||
@log_view_mixin
|
||||
def eventfile(request):
|
||||
""" Receives a file by POST, and runs toaster-eventreply on this file """
|
||||
if request.method != "POST":
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from django.db import models
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.encoding import force_text
|
||||
from orm.models import Project, Build, Layer_Version
|
||||
|
||||
import logging
|
||||
@@ -124,7 +124,7 @@ class BuildRequest(models.Model):
|
||||
return self.brvariable_set.get(name="MACHINE").value
|
||||
|
||||
def __str__(self):
|
||||
return force_str('%s %s' % (self.project, self.get_state_display()))
|
||||
return force_text('%s %s' % (self.project, self.get_state_display()))
|
||||
|
||||
# These tables specify the settings for running an actual build.
|
||||
# They MUST be kept in sync with the tables in orm.models.Project*
|
||||
|
||||
1
bitbake/lib/toaster/logs/.gitignore
vendored
1
bitbake/lib/toaster/logs/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
*.log*
|
||||
@@ -12,7 +12,7 @@
|
||||
</object>
|
||||
<object model="orm.toastersetting" pk="4">
|
||||
<field type="CharField" name="name">DEFCONF_MACHINE</field>
|
||||
<field type="CharField" name="value">qemux86-64</field>
|
||||
<field type="CharField" name="value">qemux86</field>
|
||||
</object>
|
||||
<object model="orm.toastersetting" pk="5">
|
||||
<field type="CharField" name="name">DEFCONF_SSTATE_DIR</field>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user