mirror of
https://git.yoctoproject.org/poky
synced 2026-02-15 21:23:04 +01:00
Compare commits
131 Commits
yocto-5.2.
...
mickledore
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5c69f78fc | ||
|
|
ad1f61d866 | ||
|
|
5d569941fa | ||
|
|
fa603fbd47 | ||
|
|
1805cb9232 | ||
|
|
96a7cf3331 | ||
|
|
649debbc4f | ||
|
|
9fa1f1403f | ||
|
|
6c64c50802 | ||
|
|
abe094e789 | ||
|
|
3dd115a78c | ||
|
|
98952875bc | ||
|
|
10f4547dff | ||
|
|
465036fa0c | ||
|
|
9c19a13f73 | ||
|
|
fb99682785 | ||
|
|
b735b2d11c | ||
|
|
00dd38813b | ||
|
|
70c3caa787 | ||
|
|
da27e5f570 | ||
|
|
2c49e1c152 | ||
|
|
5d50cbf525 | ||
|
|
d8c4df476e | ||
|
|
626e16baaf | ||
|
|
d30fb748fe | ||
|
|
55237f38ce | ||
|
|
5b75d8af0b | ||
|
|
d71b2e9385 | ||
|
|
baea4975da | ||
|
|
9c93500e73 | ||
|
|
18359cd619 | ||
|
|
50c9991085 | ||
|
|
30d015c636 | ||
|
|
cd1e4106e6 | ||
|
|
adfba885be | ||
|
|
b5d97edb11 | ||
|
|
60f959a0b0 | ||
|
|
d14a46aef2 | ||
|
|
40355fd859 | ||
|
|
eb319d974c | ||
|
|
b8e4efae7f | ||
|
|
7131b9c210 | ||
|
|
755967f4d7 | ||
|
|
8319b7e6fd | ||
|
|
fcdf501ba0 | ||
|
|
9e144f49cb | ||
|
|
ea7d581b13 | ||
|
|
291424b7f4 | ||
|
|
9326c714c5 | ||
|
|
5d347d6ab1 | ||
|
|
a962829eb9 | ||
|
|
d22204fd6f | ||
|
|
6974d4c6b5 | ||
|
|
f2892e2aad | ||
|
|
93ab4e1fc2 | ||
|
|
e37179d270 | ||
|
|
63e0c05652 | ||
|
|
f8b02f941a | ||
|
|
63defae6bd | ||
|
|
12dcea0dde | ||
|
|
0658aed579 | ||
|
|
6e73a100ba | ||
|
|
3dd7ff0309 | ||
|
|
9281ddf7a9 | ||
|
|
db920a7f00 | ||
|
|
c7133ecad7 | ||
|
|
67f34911b8 | ||
|
|
e6eea3249e | ||
|
|
98eb7954c6 | ||
|
|
ab60045c61 | ||
|
|
f95bbd3ef6 | ||
|
|
ddb298ce89 | ||
|
|
d68f8623be | ||
|
|
7692dce85a | ||
|
|
387df791fe | ||
|
|
772fdfb114 | ||
|
|
7c3b868a28 | ||
|
|
2c2ab20c21 | ||
|
|
4a1c9c4eb1 | ||
|
|
833761c898 | ||
|
|
050c75f8a3 | ||
|
|
f65afa2ef0 | ||
|
|
7d464df227 | ||
|
|
3ac07c43a1 | ||
|
|
a9fa7412c7 | ||
|
|
55838bb626 | ||
|
|
edb884a7ed | ||
|
|
878271e1a1 | ||
|
|
21790e71d5 | ||
|
|
b8007d3c22 | ||
|
|
bca7ec652f | ||
|
|
f73e712b6b | ||
|
|
60012ae54a | ||
|
|
45ccdcfcbc | ||
|
|
8b3b075dd5 | ||
|
|
c3248e0da1 | ||
|
|
65cc65fa8d | ||
|
|
410290c2f5 | ||
|
|
ea2feb23bc | ||
|
|
eb292619e7 | ||
|
|
b93e695de6 | ||
|
|
338bc72e4d | ||
|
|
3c0b78802d | ||
|
|
23d946b9ba | ||
|
|
1b9bcc7b19 | ||
|
|
1d4d5371ec | ||
|
|
4f833991c2 | ||
|
|
e55e243f84 | ||
|
|
20c58a6cb2 | ||
|
|
c3c439d62a | ||
|
|
bdf37e43b0 | ||
|
|
958d52f37b | ||
|
|
42a6d47754 | ||
|
|
64111246ce | ||
|
|
b1b4ad9a80 | ||
|
|
e9af582acd | ||
|
|
0a75b4afc8 | ||
|
|
d109d6452f | ||
|
|
18d1bcefec | ||
|
|
1000c4f2c0 | ||
|
|
801734bc6c | ||
|
|
a91fb4ff74 | ||
|
|
54f3339f38 | ||
|
|
50c5035dc8 | ||
|
|
c078df73b9 | ||
|
|
c570cf1733 | ||
|
|
39428da6b6 | ||
|
|
acf268757f | ||
|
|
4bb775aecb | ||
|
|
7ebcf1477a | ||
|
|
fe76a450eb |
@@ -1,4 +0,0 @@
|
||||
[b4]
|
||||
prep-perpatch-check-cmd = ./scripts/b4-wrapper-poky.py prep-perpatch-check-cmd
|
||||
send-auto-cc-cmd = ./scripts/b4-wrapper-poky.py send-auto-cc-cmd
|
||||
send-auto-to-cmd = ./scripts/b4-wrapper-poky.py send-auto-to-cmd
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -36,4 +36,3 @@ _toaster_clones/
|
||||
downloads/
|
||||
sstate-cache/
|
||||
toaster.sqlite
|
||||
.vscode/
|
||||
|
||||
@@ -41,7 +41,6 @@ Component/Subsystem Maintainers
|
||||
* devtool: Saul Wold
|
||||
* eSDK: Saul Wold
|
||||
* overlayfs: Vyacheslav Yurkov
|
||||
* Patchtest: Trevor Gamblin
|
||||
|
||||
Maintainers needed
|
||||
------------------
|
||||
@@ -53,6 +52,7 @@ Maintainers needed
|
||||
* error reporting system/web UI
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
@@ -67,3 +67,5 @@ Shadow maintainers/development needed
|
||||
|
||||
* toaster
|
||||
* bitbake
|
||||
|
||||
|
||||
|
||||
35
Makefile
Normal file
35
Makefile
Normal file
@@ -0,0 +1,35 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
endif
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile.sphinx clean publish
|
||||
|
||||
publish: Makefile.sphinx html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/
|
||||
cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/
|
||||
cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR)
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile.sphinx
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -6,27 +6,24 @@ of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
<https://www.openembedded.org/>
|
||||
https://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
<https://docs.yoctoproject.org/>
|
||||
https://docs.yoctoproject.org/
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: <https://docs.yoctoproject.org/dev/contributor-guide/>
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
|
||||
```
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
```
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
<https://lists.openembedded.org/g/openembedded-core>
|
||||
|
||||
https://lists.openembedded.org/g/openembedded-core
|
||||
|
||||
Source code:
|
||||
<https://git.openembedded.org/openembedded-core/>
|
||||
|
||||
https://git.openembedded.org/openembedded-core/
|
||||
|
||||
@@ -5,10 +5,10 @@ To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported in 32 and 64 bit variants:
|
||||
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,22 +0,0 @@
|
||||
How to Report a Potential Vulnerability
|
||||
=======================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[Security Bugzilla](https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security)
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [Stable release and LTS](https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS)
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [Release page](https://wiki.yoctoproject.org/wiki/Releases) contains
|
||||
a list of all releases of the Yocto Project. Versions in grey are no longer
|
||||
actively maintained with security patches, but well-tested patches may still
|
||||
be accepted for them for significant issues.
|
||||
@@ -1,4 +0,0 @@
|
||||
[b4]
|
||||
send-series-to = bitbake-devel@lists.openembedded.org
|
||||
send-auto-cc-cmd = ./contrib/b4-wrapper-bitbake.py send-auto-cc-cmd
|
||||
prep-pre-flight-checks = disable-needs-checking
|
||||
@@ -18,19 +18,16 @@ Bitbake requires Python version 3.8 or newer.
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to bitbake-devel@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
If you're sending a patch related to the BitBake manual, make sure you copy
|
||||
the Yocto Project documentation mailing list:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org --cc docs@lists.yoctoproject.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/bitbake-devel
|
||||
@@ -48,7 +45,8 @@ it has so many corner cases. The datastore has many tests too. Testing with the
|
||||
recommended before submitting patches, particularly to the fetcher and datastore. We also
|
||||
appreciate new test cases and may require them for more obscure issues.
|
||||
|
||||
To run the tests "zstd" and "git" must be installed.
|
||||
To run the tests "zstd" and "git" must be installed. Git must be correctly configured, in
|
||||
particular the user.email and user.name values must be set.
|
||||
|
||||
The assumption is made that this testsuite is run from an initialized OpenEmbedded build
|
||||
environment (i.e. `source oe-init-build-env` is used). If this is not the case, run the
|
||||
@@ -56,8 +54,3 @@ testsuite as follows:
|
||||
|
||||
export PATH=$(pwd)/bin:$PATH
|
||||
bin/bitbake-selftest
|
||||
|
||||
The testsuite can alternatively be executed using pytest, e.g. obtained from PyPI (in this
|
||||
case, the PATH is configured automatically):
|
||||
|
||||
pytest
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -27,7 +27,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
__version__ = "2.12.0"
|
||||
__version__ = "2.4.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-layers
|
||||
@@ -72,17 +72,13 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
elif sig2 not in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[latestsigs[0]]['path']]
|
||||
if len(latestsigs) > 1:
|
||||
latestfiles.append(sigfiles[latestsigs[1]]['path'])
|
||||
|
||||
return latestfiles
|
||||
|
||||
@@ -100,7 +96,7 @@ def recursecb(key, hash1, hash2):
|
||||
elif hash2 not in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, color=color)
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
|
||||
@@ -16,7 +16,6 @@ bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.providers
|
||||
import bb.tinfoil
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -27,40 +26,26 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
|
||||
parser.add_argument('--ignore-undefined', help='Suppress any errors related to undefined variables', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.value:
|
||||
if args.unexpand:
|
||||
sys.exit("--unexpand only makes sense with --value")
|
||||
if args.unexpand and not args.value:
|
||||
print("--unexpand only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
if args.flag:
|
||||
sys.exit("--flag only makes sense with --value")
|
||||
if args.flag and not args.value:
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
quiet = args.quiet or args.value
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not args.quiet) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
try:
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
except bb.providers.NoProvider as e:
|
||||
sys.exit(str(e))
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
# Expand keys and run anonymous functions to get identical result to
|
||||
# "bitbake -e"
|
||||
d = tinfoil.finalizeData()
|
||||
|
||||
value = None
|
||||
d = tinfoil.config_data
|
||||
if args.flag:
|
||||
value = d.getVarFlag(args.variable, args.flag, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The flag '{args.flag}' is not defined for variable '{args.variable}'")
|
||||
else:
|
||||
value = d.getVar(args.variable, expand=not args.unexpand)
|
||||
if value is None and not args.ignore_undefined:
|
||||
sys.exit(f"The variable '{args.variable}' is not defined")
|
||||
if args.value:
|
||||
print(str(value if value is not None else ""))
|
||||
print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
|
||||
elif args.value:
|
||||
print(str(d.getVar(args.variable, expand=(not args.unexpand))))
|
||||
else:
|
||||
bb.data.emit_var(args.variable, d=d, all=True)
|
||||
|
||||
@@ -14,10 +14,6 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
import statistics
|
||||
import textwrap
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -40,42 +36,18 @@ except ImportError:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
import bb.asyncrpc
|
||||
|
||||
DEFAULT_ADDRESS = 'unix://./hashserve.sock'
|
||||
METHOD = 'stress.test.method'
|
||||
|
||||
def print_user(u):
|
||||
print(f"Username: {u['username']}")
|
||||
if "permissions" in u:
|
||||
print("Permissions: " + " ".join(u["permissions"]))
|
||||
if "token" in u:
|
||||
print(f"Token: {u['token']}")
|
||||
|
||||
|
||||
def main():
|
||||
def handle_get(args, client):
|
||||
result = client.get_taskhash(args.method, args.taskhash, all_properties=True)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_get_outhash(args, client):
|
||||
result = client.get_outhash(args.method, args.outhash, args.taskhash)
|
||||
if not result:
|
||||
return 0
|
||||
|
||||
print(json.dumps(result, sort_keys=True, indent=4))
|
||||
return 0
|
||||
|
||||
def handle_stats(args, client):
|
||||
if args.reset:
|
||||
s = client.reset_stats()
|
||||
else:
|
||||
s = client.get_stats()
|
||||
print(json.dumps(s, sort_keys=True, indent=4))
|
||||
pprint.pprint(s)
|
||||
return 0
|
||||
|
||||
def handle_stress(args, client):
|
||||
@@ -83,59 +55,47 @@ def main():
|
||||
nonlocal found_hashes
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
nonlocal times
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
|
||||
times.append(elapsed)
|
||||
pbar.update()
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
missed_hashes = 0
|
||||
lock = threading.Lock()
|
||||
times = []
|
||||
total_requests = args.clients * args.requests
|
||||
start_time = time.perf_counter()
|
||||
with ProgressBar(total=args.clients * args.requests) as pbar:
|
||||
with ProgressBar(total=total_requests) as pbar:
|
||||
threads = [threading.Thread(target=thread_main, args=(pbar, lock), daemon=False) for _ in range(args.clients)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
total_elapsed = time.perf_counter() - start_time
|
||||
|
||||
elapsed = time.perf_counter() - start_time
|
||||
with lock:
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
stddev = statistics.pstdev(times)
|
||||
|
||||
print(f"Number of clients: {args.clients}")
|
||||
print(f"Requests per client: {args.requests}")
|
||||
print(f"Number of requests: {len(times)}")
|
||||
print(f"Total elapsed time: {total_elapsed:.3f}s")
|
||||
print(f"Total request rate: {len(times)/total_elapsed:.3f} req/s")
|
||||
print(f"Average request time: {mean:.3f}s")
|
||||
print(f"Median request time: {median:.3f}s")
|
||||
print(f"Request time std dev: {stddev:.3f}s")
|
||||
print(f"Maximum request time: {max(times):.3f}s")
|
||||
print(f"Minimum request time: {min(times):.3f}s")
|
||||
print(f"Hashes found: {found_hashes}")
|
||||
print(f"Hashes missed: {missed_hashes}")
|
||||
print("%d requests in %.1fs. %.1f requests per second" % (total_requests, elapsed, total_requests / elapsed))
|
||||
print("Average request time %.8fs" % (elapsed / total_requests))
|
||||
print("Max request time was %.8fs" % max_time)
|
||||
print("Found %d hashes, missed %d" % (found_hashes, missed_hashes))
|
||||
|
||||
if args.report:
|
||||
with ProgressBar(total=args.requests) as pbar:
|
||||
@@ -153,173 +113,12 @@ def main():
|
||||
with lock:
|
||||
pbar.update()
|
||||
|
||||
def handle_remove(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
if where:
|
||||
result = client.remove(where)
|
||||
print("Removed %d row(s)" % (result["count"]))
|
||||
else:
|
||||
print("No query specified")
|
||||
|
||||
def handle_clean_unused(args, client):
|
||||
result = client.clean_unused(args.max_age)
|
||||
print("Removed %d rows" % (result["count"]))
|
||||
return 0
|
||||
|
||||
def handle_refresh_token(args, client):
|
||||
r = client.refresh_token(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_set_user_permissions(args, client):
|
||||
r = client.set_user_perms(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_user(args, client):
|
||||
r = client.get_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_all_users(args, client):
|
||||
users = client.get_all_users()
|
||||
print("{username:20}| {permissions}".format(username="Username", permissions="Permissions"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for u in users:
|
||||
print("{username:20}| {permissions}".format(username=u["username"], permissions=" ".join(u["permissions"])))
|
||||
|
||||
def handle_new_user(args, client):
|
||||
r = client.new_user(args.username, args.permissions)
|
||||
print_user(r)
|
||||
|
||||
def handle_delete_user(args, client):
|
||||
r = client.delete_user(args.username)
|
||||
print_user(r)
|
||||
|
||||
def handle_get_db_usage(args, client):
|
||||
usage = client.get_db_usage()
|
||||
print(usage)
|
||||
tables = sorted(usage.keys())
|
||||
print("{name:20}| {rows:20}".format(name="Table name", rows="Rows"))
|
||||
print(("-" * 20) + "+" + ("-" * 20))
|
||||
for t in tables:
|
||||
print("{name:20}| {rows:<20}".format(name=t, rows=usage[t]["rows"]))
|
||||
print()
|
||||
|
||||
total_rows = sum(t["rows"] for t in usage.values())
|
||||
print(f"Total rows: {total_rows}")
|
||||
|
||||
def handle_get_db_query_columns(args, client):
|
||||
columns = client.get_db_query_columns()
|
||||
print("\n".join(sorted(columns)))
|
||||
|
||||
def handle_gc_status(args, client):
|
||||
result = client.gc_status()
|
||||
if not result["mark"]:
|
||||
print("No Garbage collection in progress")
|
||||
return 0
|
||||
|
||||
print("Current Mark: %s" % result["mark"])
|
||||
print("Total hashes to keep: %d" % result["keep"])
|
||||
print("Total hashes to remove: %s" % result["remove"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark(args, client):
|
||||
where = {k: v for k, v in args.where}
|
||||
result = client.gc_mark(args.mark, where)
|
||||
print("New hashes marked: %d" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark_stream(args, client):
|
||||
stdin = (l.strip() for l in sys.stdin)
|
||||
marked_hashes = 0
|
||||
|
||||
try:
|
||||
result = client.gc_mark_stream(args.mark, stdin)
|
||||
marked_hashes = result["count"]
|
||||
except ConnectionError:
|
||||
logger.warning(
|
||||
"Server doesn't seem to support `gc-mark-stream`. Sending "
|
||||
"hashes sequentially using `gc-mark` API."
|
||||
)
|
||||
for line in stdin:
|
||||
pairs = line.split()
|
||||
condition = dict(zip(pairs[::2], pairs[1::2]))
|
||||
result = client.gc_mark(args.mark, condition)
|
||||
marked_hashes += result["count"]
|
||||
|
||||
print("New hashes marked: %d" % marked_hashes)
|
||||
return 0
|
||||
|
||||
def handle_gc_sweep(args, client):
|
||||
result = client.gc_sweep(args.mark)
|
||||
print("Removed %d rows" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_unihash_exists(args, client):
|
||||
result = client.unihash_exists(args.unihash)
|
||||
if args.quiet:
|
||||
return 0 if result else 1
|
||||
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
def handle_ping(args, client):
|
||||
times = []
|
||||
for i in range(1, args.count + 1):
|
||||
if not args.quiet:
|
||||
print(f"Ping {i} of {args.count}... ", end="")
|
||||
start_time = time.perf_counter()
|
||||
client.ping()
|
||||
elapsed = time.perf_counter() - start_time
|
||||
times.append(elapsed)
|
||||
if not args.quiet:
|
||||
print(f"{elapsed:.3f}s")
|
||||
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
std_dev = statistics.pstdev(times)
|
||||
|
||||
if not args.quiet:
|
||||
print("------------------------")
|
||||
print(f"Number of pings: {len(times)}")
|
||||
print(f"Average round trip time: {mean:.3f}s")
|
||||
print(f"Median round trip time: {median:.3f}s")
|
||||
print(f"Round trip time std dev: {std_dev:.3f}s")
|
||||
print(f"Min time is: {min(times):.3f}s")
|
||||
print(f"Max time is: {max(times):.3f}s")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Hash Equivalence Client',
|
||||
epilog=textwrap.dedent(
|
||||
"""
|
||||
Possible ADDRESS options are:
|
||||
unix://PATH Connect to UNIX domain socket at PATH
|
||||
ws://HOST[:PORT] Connect to websocket at HOST:PORT (default port is 80)
|
||||
wss://HOST[:PORT] Connect to secure websocket at HOST:PORT (default port is 443)
|
||||
HOST:PORT Connect to TCP server at HOST:PORT
|
||||
"""
|
||||
),
|
||||
)
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME")
|
||||
parser.add_argument('--password', '-p', metavar="TOKEN", help="Authenticate using token TOKEN")
|
||||
parser.add_argument('--become', '-b', metavar="USERNAME", help="Impersonate user USERNAME (if allowed) when performing actions")
|
||||
parser.add_argument('--no-netrc', '-n', action="store_false", dest="netrc", help="Do not use .netrc")
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
get_parser = subparsers.add_parser('get', help="Get the unihash for a taskhash")
|
||||
get_parser.add_argument("method", help="Method to query")
|
||||
get_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_parser.set_defaults(func=handle_get)
|
||||
|
||||
get_outhash_parser = subparsers.add_parser('get-outhash', help="Get output hash information")
|
||||
get_outhash_parser.add_argument("method", help="Method to query")
|
||||
get_outhash_parser.add_argument("outhash", help="Output hash to query")
|
||||
get_outhash_parser.add_argument("taskhash", help="Task hash to query")
|
||||
get_outhash_parser.set_defaults(func=handle_get_outhash)
|
||||
|
||||
stats_parser = subparsers.add_parser('stats', help='Show server stats')
|
||||
stats_parser.add_argument('--reset', action='store_true',
|
||||
help='Reset server stats')
|
||||
@@ -338,79 +137,6 @@ def main():
|
||||
help='Include string in outhash')
|
||||
stress_parser.set_defaults(func=handle_stress)
|
||||
|
||||
remove_parser = subparsers.add_parser('remove', help="Remove hash entries")
|
||||
remove_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Remove entries from table where KEY == VALUE")
|
||||
remove_parser.set_defaults(func=handle_remove)
|
||||
|
||||
clean_unused_parser = subparsers.add_parser('clean-unused', help="Remove unused database entries")
|
||||
clean_unused_parser.add_argument("max_age", metavar="SECONDS", type=int, help="Remove unused entries older than SECONDS old")
|
||||
clean_unused_parser.set_defaults(func=handle_clean_unused)
|
||||
|
||||
refresh_token_parser = subparsers.add_parser('refresh-token', help="Refresh auth token")
|
||||
refresh_token_parser.add_argument("--username", "-u", help="Refresh the token for another user (if authorized)")
|
||||
refresh_token_parser.set_defaults(func=handle_refresh_token)
|
||||
|
||||
set_user_perms_parser = subparsers.add_parser('set-user-perms', help="Set new permissions for user")
|
||||
set_user_perms_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
set_user_perms_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
set_user_perms_parser.set_defaults(func=handle_set_user_permissions)
|
||||
|
||||
get_user_parser = subparsers.add_parser('get-user', help="Get user")
|
||||
get_user_parser.add_argument("--username", "-u", help="Username")
|
||||
get_user_parser.set_defaults(func=handle_get_user)
|
||||
|
||||
get_all_users_parser = subparsers.add_parser('get-all-users', help="List all users")
|
||||
get_all_users_parser.set_defaults(func=handle_get_all_users)
|
||||
|
||||
new_user_parser = subparsers.add_parser('new-user', help="Create new user")
|
||||
new_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
new_user_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions")
|
||||
new_user_parser.set_defaults(func=handle_new_user)
|
||||
|
||||
delete_user_parser = subparsers.add_parser('delete-user', help="Delete user")
|
||||
delete_user_parser.add_argument("--username", "-u", help="Username", required=True)
|
||||
delete_user_parser.set_defaults(func=handle_delete_user)
|
||||
|
||||
db_usage_parser = subparsers.add_parser('get-db-usage', help="Database Usage")
|
||||
db_usage_parser.set_defaults(func=handle_get_db_usage)
|
||||
|
||||
db_query_columns_parser = subparsers.add_parser('get-db-query-columns', help="Show columns that can be used in database queries")
|
||||
db_query_columns_parser.set_defaults(func=handle_get_db_query_columns)
|
||||
|
||||
gc_status_parser = subparsers.add_parser("gc-status", help="Show garbage collection status")
|
||||
gc_status_parser.set_defaults(func=handle_gc_status)
|
||||
|
||||
gc_mark_parser = subparsers.add_parser('gc-mark', help="Mark hashes to be kept for garbage collection")
|
||||
gc_mark_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[],
|
||||
help="Keep entries in table where KEY == VALUE")
|
||||
gc_mark_parser.set_defaults(func=handle_gc_mark)
|
||||
|
||||
gc_mark_parser_stream = subparsers.add_parser(
|
||||
'gc-mark-stream',
|
||||
help=(
|
||||
"Mark multiple hashes to be retained for garbage collection. Input should be provided via stdin, "
|
||||
"with each line formatted as key-value pairs separated by spaces, for example 'column1 foo column2 bar'."
|
||||
)
|
||||
)
|
||||
gc_mark_parser_stream.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser_stream.set_defaults(func=handle_gc_mark_stream)
|
||||
|
||||
gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked")
|
||||
gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_sweep_parser.set_defaults(func=handle_gc_sweep)
|
||||
|
||||
unihash_exists_parser = subparsers.add_parser('unihash-exists', help="Check if a unihash is known to the server")
|
||||
unihash_exists_parser.add_argument("--quiet", action="store_true", help="Don't print status. Instead, exit with 0 if unihash exists and 1 if it does not")
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
ping_parser = subparsers.add_parser('ping', help="Ping server")
|
||||
ping_parser.add_argument("-n", "--count", type=int, help="Number of pings. Default is %(default)s", default=10)
|
||||
ping_parser.add_argument("-q", "--quiet", action="store_true", help="Don't print each ping; only print results")
|
||||
ping_parser.set_defaults(func=handle_ping)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
@@ -424,30 +150,11 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
login = args.login
|
||||
password = args.password
|
||||
|
||||
if login is None and args.netrc:
|
||||
try:
|
||||
n = netrc.netrc()
|
||||
auth = n.authenticators(args.address)
|
||||
if auth is not None:
|
||||
login, _, password = auth
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except netrc.NetrcParseError as e:
|
||||
sys.stderr.write(f"Error parsing {e.filename}:{e.lineno}: {e.msg}\n")
|
||||
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
try:
|
||||
with hashserv.create_client(args.address, login, password) as client:
|
||||
if args.become:
|
||||
client.become_user(args.become)
|
||||
return func(args, client)
|
||||
except bb.asyncrpc.InvokeError as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
return func(args, client)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -11,169 +11,56 @@ import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import hashserv
|
||||
from hashserv.server import DEFAULT_ANON_PERMS
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DEFAULT_BIND = "unix://./hashserve.sock"
|
||||
DEFAULT_BIND = 'unix://./hashserve.sock'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hash Equivalence Reference Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
epilog="""
|
||||
The bind address may take one of the following formats:
|
||||
unix://PATH - Bind to unix domain socket at PATH
|
||||
ws://ADDRESS:PORT - Bind to websocket on ADDRESS:PORT
|
||||
ADDRESS:PORT - Bind to raw TCP socket on ADDRESS:PORT
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Reference Server. Version=%s' % VERSION,
|
||||
epilog='''The bind address is the path to a unix domain socket if it is
|
||||
prefixed with "unix://". Otherwise, it is an IP address
|
||||
and port in form ADDRESS:PORT. To bind to all addresses, leave
|
||||
the ADDRESS empty, e.g. "--bind :8686". To bind to a specific
|
||||
IPv6 address, enclose the address in "[]", e.g.
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
To bind to all addresses, leave the ADDRESS empty, e.g. "--bind :8686" or
|
||||
"--bind ws://:8686". To bind to a specific IPv6 address, enclose the address in
|
||||
"[]", e.g. "--bind [::1]:8686" or "--bind ws://[::1]:8686"
|
||||
|
||||
Note that the default Anonymous permissions are designed to not break existing
|
||||
server instances when upgrading, but are not particularly secure defaults. If
|
||||
you want to use authentication, it is recommended that you use "--anon-perms
|
||||
@read" to only give anonymous users read access, or "--anon-perms @none" to
|
||||
give un-authenticated users no access at all.
|
||||
|
||||
Setting "--anon-perms @all" or "--anon-perms @user-admin" is not allowed, since
|
||||
this would allow anonymous users to manage all users accounts, which is a bad
|
||||
idea.
|
||||
|
||||
If you are using user authentication, you should run your server in websockets
|
||||
mode with an SSL terminating load balancer in front of it (as this server does
|
||||
not implement SSL). Otherwise all usernames and passwords will be transmitted
|
||||
in the clear. When configured this way, clients can connect using a secure
|
||||
websocket, as in "wss://SERVER:PORT"
|
||||
|
||||
The following permissions are supported by the server:
|
||||
|
||||
@none - No permissions
|
||||
@read - The ability to read equivalent hashes from the server
|
||||
@report - The ability to report equivalent hashes to the server
|
||||
@db-admin - Manage the hash database(s). This includes cleaning the
|
||||
database, removing hashes, etc.
|
||||
@user-admin - The ability to manage user accounts. This includes, creating
|
||||
users, deleting users, resetting login tokens, and assigning
|
||||
permissions.
|
||||
@all - All possible permissions, including any that may be added
|
||||
in the future
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--bind",
|
||||
default=os.environ.get("HASHSERVER_BIND", DEFAULT_BIND),
|
||||
help='Bind address (default $HASHSERVER_BIND, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--database",
|
||||
default=os.environ.get("HASHSERVER_DB", "./hashserv.db"),
|
||||
help='Database file (default $HASHSERVER_DB, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default=os.environ.get("HASHSERVER_LOG_LEVEL", "WARNING"),
|
||||
help='Set logging level (default $HASHSERVER_LOG_LEVEL, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("HASHSERVER_UPSTREAM", None),
|
||||
help="Upstream hashserv to pull hashes from ($HASHSERVER_UPSTREAM)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="Disallow write operations from clients ($HASHSERVER_READ_ONLY)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-username",
|
||||
default=os.environ.get("HASHSERVER_DB_USERNAME", None),
|
||||
help="Database username ($HASHSERVER_DB_USERNAME)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--db-password",
|
||||
default=os.environ.get("HASHSERVER_DB_PASSWORD", None),
|
||||
help="Database password ($HASHSERVER_DB_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--anon-perms",
|
||||
metavar="PERM[,PERM[,...]]",
|
||||
default=os.environ.get("HASHSERVER_ANON_PERMS", ",".join(DEFAULT_ANON_PERMS)),
|
||||
help='Permissions to give anonymous users (default $HASHSERVER_ANON_PERMS, "%(default)s")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-user",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_USER", None),
|
||||
help="Create default admin user with name ADMIN_USER ($HASHSERVER_ADMIN_USER)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--admin-password",
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reuseport",
|
||||
action="store_true",
|
||||
help="Enable SO_REUSEPORT, allowing multiple servers to bind to the same port for load balancing",
|
||||
)
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger("hashserv")
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError(
|
||||
"Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log
|
||||
)
|
||||
raise ValueError('Invalid log level: %s' % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
read_only = (os.environ.get("HASHSERVER_READ_ONLY", "0") == "1") or args.read_only
|
||||
if "," in args.anon_perms:
|
||||
anon_perms = args.anon_perms.split(",")
|
||||
else:
|
||||
anon_perms = args.anon_perms.split()
|
||||
|
||||
server = hashserv.create_server(
|
||||
args.bind,
|
||||
args.database,
|
||||
upstream=args.upstream,
|
||||
read_only=read_only,
|
||||
db_username=args.db_username,
|
||||
db_password=args.db_password,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
reuseport=args.reuseport,
|
||||
)
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -18,14 +18,13 @@ import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
toolname = os.path.basename(__file__).split(".")[0]
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create(toolname, sys.stdout)
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -34,7 +33,7 @@ def main():
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Forced execution: can be specified multiple times. -F will force add without recipe parse verification and -FF will additionally force the run withput layer parsing.', action='count', default=0)
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -58,23 +57,18 @@ def main():
|
||||
level=logger.getEffectiveLevel())
|
||||
|
||||
plugins = []
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
|
||||
if global_args.force > 1:
|
||||
bbpaths = []
|
||||
else:
|
||||
tinfoil.prepare(True)
|
||||
bbpaths = tinfoil.config_data.getVar('BBPATH').split(':')
|
||||
|
||||
for path in ([topdir] + bbpaths):
|
||||
pluginbasepath = {"bitbake-layers":'bblayers', 'bitbake-config-build':'bbconfigbuild'}[toolname]
|
||||
pluginpath = os.path.join(path, 'lib', pluginbasepath)
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init') and global_args.force <= 1:
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
@@ -92,6 +86,8 @@ def main():
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
finally:
|
||||
tinfoil.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -7,97 +7,49 @@
|
||||
|
||||
import os
|
||||
import sys,logging
|
||||
import argparse
|
||||
import optparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib"))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "2.0.0"
|
||||
__version__="1.0.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRHOST_DEFAULT='0.0.0.0'
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser = optparse.OptionParser(
|
||||
version="Bitbake PR Service Core version %s, %%prog version %s" % (prserv.__version__, __version__),
|
||||
usage = "%prog < --start | --stop > [options]")
|
||||
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--file",
|
||||
default="prserv.sqlite3",
|
||||
help="database filename (default: prserv.sqlite3)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--log",
|
||||
default="prserv.log",
|
||||
help="log filename(default: prserv.log)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
default="INFO",
|
||||
help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--start",
|
||||
action="store_true",
|
||||
help="start daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stop",
|
||||
action="store_true",
|
||||
help="stop daemon",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
help="ip address to bind",
|
||||
default=PRHOST_DEFAULT,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=PRPORT_DEFAULT,
|
||||
help="port number (default: 8585)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--read-only",
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("PRSERV_UPSTREAM", None),
|
||||
help="Upstream PR service (host:port)",
|
||||
)
|
||||
parser.add_option("-f", "--file", help="database filename(default: prserv.sqlite3)", action="store",
|
||||
dest="dbfile", type="string", default="prserv.sqlite3")
|
||||
parser.add_option("-l", "--log", help="log filename(default: prserv.log)", action="store",
|
||||
dest="logfile", type="string", default="prserv.log")
|
||||
parser.add_option("--loglevel", help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
action = "store", type="string", dest="loglevel", default = "INFO")
|
||||
parser.add_option("--start", help="start daemon",
|
||||
action="store_true", dest="start")
|
||||
parser.add_option("--stop", help="stop daemon",
|
||||
action="store_true", dest="stop")
|
||||
parser.add_option("--host", help="ip address to bind", action="store",
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default: 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
parser.add_option("-r", "--read-only", help="open database in read-only mode",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(
|
||||
args.file,
|
||||
args.host,
|
||||
args.port,
|
||||
os.path.abspath(args.log),
|
||||
args.read_only,
|
||||
args.upstream
|
||||
)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
if options.start:
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile), options.read_only)
|
||||
elif options.stop:
|
||||
ret=prserv.serv.stop_daemon(options.host, options.port)
|
||||
else:
|
||||
ret=parser.print_help()
|
||||
return ret
|
||||
|
||||
@@ -15,7 +15,6 @@ import unittest
|
||||
try:
|
||||
import bb
|
||||
import hashserv
|
||||
import prserv
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
@@ -28,12 +27,12 @@ tests = ["bb.tests.codeparser",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.persist_data",
|
||||
"bb.tests.runqueue",
|
||||
"bb.tests.siggen",
|
||||
"bb.tests.utils",
|
||||
"bb.tests.compression",
|
||||
"hashserv.tests",
|
||||
"prserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
|
||||
@@ -9,7 +9,6 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
@@ -39,9 +38,9 @@ if xmlrpcinterface[0] == "None":
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
|
||||
with open(logfile, 'a+') as so:
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
|
||||
@@ -9,7 +9,6 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*")
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
from bb import fetch2
|
||||
import logging
|
||||
@@ -22,14 +21,9 @@ import traceback
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
import fcntl
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
# Remove when we have a minimum of python 3.10
|
||||
if not hasattr(fcntl, 'F_SETPIPE_SZ'):
|
||||
fcntl.F_SETPIPE_SZ = 1031
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
@@ -50,6 +44,7 @@ if sys.argv[1].startswith("decafbadbad"):
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
import fcntl
|
||||
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
||||
fl |= os.O_SYNC
|
||||
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
|
||||
@@ -61,12 +56,6 @@ logger = logging.getLogger("BitBake")
|
||||
|
||||
worker_pipe = sys.stdout.fileno()
|
||||
bb.utils.nonblockingfd(worker_pipe)
|
||||
# Try to make the pipe buffers larger as it is much more efficient. If we can't
|
||||
# e.g. out of buffer space (/proc/sys/fs/pipe-user-pages-soft) then just pass over.
|
||||
try:
|
||||
fcntl.fcntl(worker_pipe, fcntl.F_SETPIPE_SZ, 512 * 1024)
|
||||
except:
|
||||
pass
|
||||
# Need to guard against multiprocessing being used in child processes
|
||||
# and multiple processes trying to write to the parent at the same time
|
||||
worker_pipe_lock = None
|
||||
@@ -102,21 +91,21 @@ def worker_fire_prepickled(event):
|
||||
worker_thread_exit = False
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = bytearray()
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int.extend(worker_queue.get(True, 1))
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
del worker_queue_int[0:written]
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
|
||||
raise
|
||||
@@ -162,7 +151,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
taskhash = runtask['taskhash']
|
||||
unihash = runtask['unihash']
|
||||
appends = runtask['appends']
|
||||
layername = runtask['layername']
|
||||
taskdepdata = runtask['taskdepdata']
|
||||
quieterrors = runtask['quieterrors']
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
@@ -194,7 +182,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (runtask['fakerootenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -206,7 +194,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (runtask['fakerootnoenv'] or "").split()
|
||||
for key, value in (var.split('=',1) for var in envvars):
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
fakeenv[key] = value
|
||||
@@ -248,13 +236,11 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
|
||||
# No stdin & stdout
|
||||
# stdout is used as a status report channel and must not be used by child processes.
|
||||
dumbio = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(dumbio, sys.stdin.fileno())
|
||||
os.dup2(dumbio, sys.stdout.fileno())
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
if umask is not None:
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
try:
|
||||
@@ -276,7 +262,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
bb.parse.siggen.set_taskhashes(workerdata["newhashes"])
|
||||
ret = 0
|
||||
|
||||
the_data = databuilder.parseRecipe(fn, appends, layername)
|
||||
the_data = databuilder.parseRecipe(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', taskhash)
|
||||
the_data.setVar('BB_UNIHASH', unihash)
|
||||
bb.parse.siggen.setup_datacache_from_datastore(fn, the_data)
|
||||
@@ -318,10 +304,6 @@ def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask):
|
||||
if not quieterrors:
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
@@ -363,12 +345,12 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(512*1024) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
@@ -396,7 +378,7 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
@@ -430,7 +412,7 @@ class BitbakeWorker(object):
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue.extend(r)
|
||||
self.queue = self.queue + r
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
@@ -450,30 +432,18 @@ class BitbakeWorker(object):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
opening_tag = b"<" + item + b">"
|
||||
if not self.queue.startswith(opening_tag):
|
||||
return
|
||||
|
||||
tag_len = len(opening_tag)
|
||||
if len(self.queue) < tag_len + 4:
|
||||
# we need to receive more data
|
||||
return
|
||||
header = self.queue[tag_len:tag_len + 4]
|
||||
payload_len = int.from_bytes(header, 'big')
|
||||
# closing tag has length (tag_len + 1)
|
||||
if len(self.queue) < tag_len * 2 + 1 + payload_len:
|
||||
# we need to receive more data
|
||||
return
|
||||
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
if index != -1:
|
||||
try:
|
||||
func(self.queue[(tag_len + 4):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(b"</") + len(item) + len(b">")):]
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
while index != -1:
|
||||
try:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
|
||||
@@ -24,17 +24,15 @@ warnings.simplefilter("default")
|
||||
version = 1.0
|
||||
|
||||
|
||||
git_cmd = ['git', '-c', 'safe.bareRepository=all']
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(git_cmd + ['rev-parse', '--git-dir']).rstrip()
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(git_cmd + ['fetch', '--unshallow'])
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
@@ -43,21 +41,21 @@ def main():
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(git_cmd + ['rev-list'] + args.revisions).splitlines()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines()
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(git_cmd + ['rev-parse', '{}^@'.format(rev)]):
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(git_cmd + ['fsck', '--unreachable'])
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
@@ -74,12 +72,12 @@ def process_args():
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(git_cmd + ['rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
@@ -97,7 +95,7 @@ def make_shallow(shallow_file, revisions, refs):
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(git_cmd + ['for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
@@ -115,8 +113,8 @@ def filter_refs(refs):
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(git_cmd + ['update-ref', '--no-deref', '--stdin', '-z'],
|
||||
input=''.join('delete ' + l + '\0\0' for l in to_remove))
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
@@ -128,7 +126,7 @@ def follow_history_intersections(revisions, refs):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(git_cmd + ['rev-parse', '%s^@' % rev]).splitlines()
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
@@ -136,12 +134,12 @@ def follow_history_intersections(revisions, refs):
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(git_cmd + ['merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(git_cmd + ['merge-base', parent, ref]).rstrip()
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
@@ -161,14 +159,14 @@ def iter_except(func, exception, start=None):
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(git_cmd + ['reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(git_cmd + ['repack', '-ad'])
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(git_cmd + ['prune', '--expire', 'now'])
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -84,7 +84,7 @@ webserverStartAll()
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/web.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
sleep 1
|
||||
@@ -181,14 +181,6 @@ WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
# ${BUILDDIR}/toaster_logs/ became the default location for toaster logs
|
||||
# This is needed for implemented django-log-viewer: https://pypi.org/project/django-log-viewer/
|
||||
# If the directory does not exist, create it.
|
||||
TOASTER_LOGS_DIR="${BUILDDIR}/toaster_logs/"
|
||||
if [ ! -d $TOASTER_LOGS_DIR ]
|
||||
then
|
||||
mkdir $TOASTER_LOGS_DIR
|
||||
fi
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
@@ -307,7 +299,7 @@ case $CMD in
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${TOASTER_LOGS_DIR}/toaster_runbuilds.log 2>&1 \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
|
||||
@@ -30,23 +30,79 @@ sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib'))
|
||||
|
||||
import bb.cooker
|
||||
from bb.ui import toasterui
|
||||
from bb.ui import eventreplay
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
event_str = json.loads(line)['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
|
||||
def main(argv):
|
||||
with open(argv[-1]) as eventfile:
|
||||
# load variables from the first line
|
||||
variables = None
|
||||
while line := eventfile.readline().strip():
|
||||
try:
|
||||
variables = json.loads(line)['allvariables']
|
||||
break
|
||||
except (KeyError, json.JSONDecodeError):
|
||||
continue
|
||||
if not variables:
|
||||
sys.exit("Cannot find allvariables entry in event log file %s" % argv[-1])
|
||||
eventfile.seek(0)
|
||||
variables = json.loads(eventfile.readline().strip())['allvariables']
|
||||
|
||||
params = namedtuple('ConfigParams', ['observe_only'])(True)
|
||||
player = eventreplay.EventPlayer(eventfile, variables)
|
||||
player = EventPlayer(eventfile, variables)
|
||||
|
||||
return toasterui.main(player, player, params)
|
||||
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# This script is to be called by b4:
|
||||
# - through b4.send-auto-cc-cmd with "send-auto-cc-cmd" as first argument,
|
||||
#
|
||||
# When send-auto-cc-cmd is passed:
|
||||
#
|
||||
# This returns the list of Cc recipients for a patch.
|
||||
#
|
||||
# This script takes as stdin a patch.
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
cmd = sys.argv[1]
|
||||
if cmd != "send-auto-cc-cmd":
|
||||
sys.exit(-1)
|
||||
|
||||
patch = sys.stdin.read()
|
||||
|
||||
if subprocess.call(["which", "lsdiff"], stdout=subprocess.DEVNULL) != 0:
|
||||
print("lsdiff missing from host, please install patchutils")
|
||||
sys.exit(-1)
|
||||
|
||||
files = subprocess.check_output(["lsdiff", "--strip-match=1", "--strip=1", "--include=doc/*"],
|
||||
input=patch, text=True)
|
||||
if len(files):
|
||||
print("docs@lists.yoctoproject.org")
|
||||
else:
|
||||
# Handle patches made with --no-prefix
|
||||
files = subprocess.check_output(["lsdiff", "--include=doc/*"],
|
||||
input=patch, text=True)
|
||||
if len(files):
|
||||
print("docs@lists.yoctoproject.org")
|
||||
|
||||
sys.exit(0)
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
FROM alpine:3.13.1
|
||||
|
||||
RUN apk add --no-cache python3 libgcc
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
|
||||
@@ -11,18 +11,10 @@ if &compatible || version < 600 || exists("b:loaded_bitbake_plugin")
|
||||
endif
|
||||
|
||||
" .bb, .bbappend and .bbclass
|
||||
au BufNewFile,BufRead *.{bb,bbappend,bbclass} setfiletype bitbake
|
||||
au BufNewFile,BufRead *.{bb,bbappend,bbclass} set filetype=bitbake
|
||||
|
||||
" .inc -- meanwhile included upstream
|
||||
if !has("patch-9.0.0055")
|
||||
au BufNewFile,BufRead *.inc call s:BBIncDetect()
|
||||
def s:BBIncDetect()
|
||||
l:lines = getline(1) .. getline(2) .. getline(3)
|
||||
if l:lines =~# '\<\%(require\|inherit\)\>' || lines =~# '[A-Z][A-Za-z0-9_:${}]*\s\+\%(??\|[?:+]\)\?= '
|
||||
set filetype bitbake
|
||||
endif
|
||||
enddef
|
||||
endif
|
||||
" .inc
|
||||
au BufNewFile,BufRead *.inc set filetype=bitbake
|
||||
|
||||
" .conf
|
||||
au BufNewFile,BufRead *.conf
|
||||
|
||||
@@ -40,7 +40,7 @@ set cpo&vim
|
||||
|
||||
let s:maxoff = 50 " maximum number of lines to look backwards for ()
|
||||
|
||||
function! GetBBPythonIndent(lnum)
|
||||
function GetPythonIndent(lnum)
|
||||
|
||||
" If this line is explicitly joined: If the previous line was also joined,
|
||||
" line it up with that one, otherwise add two 'shiftwidth'
|
||||
@@ -257,7 +257,7 @@ let b:did_indent = 1
|
||||
setlocal indentkeys+=0\"
|
||||
|
||||
|
||||
function! BitbakeIndent(lnum)
|
||||
function BitbakeIndent(lnum)
|
||||
if !has('syntax_items')
|
||||
return -1
|
||||
endif
|
||||
@@ -315,7 +315,7 @@ function! BitbakeIndent(lnum)
|
||||
endif
|
||||
|
||||
if index(["bbPyDefRegion", "bbPyFuncRegion"], name) != -1
|
||||
let ret = GetBBPythonIndent(a:lnum)
|
||||
let ret = GetPythonIndent(a:lnum)
|
||||
" Should normally always be indented by at least one shiftwidth; but allow
|
||||
" return of -1 (defer to autoindent) or -2 (force indent to 0)
|
||||
if ret == 0
|
||||
|
||||
@@ -63,14 +63,13 @@ syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*
|
||||
|
||||
" Includes and requires
|
||||
syn keyword bbInclude inherit include require contained
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref
|
||||
syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest
|
||||
|
||||
" Add taks and similar
|
||||
syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained
|
||||
syn match bbStatementRest /[^\\]*$/ skipwhite contained contains=bbStatement,bbVarDeref,bbVarPyValue
|
||||
syn region bbStatementRestCont start=/.*\\$/ end=/^[^\\]*$/ contained contains=bbStatement,bbVarDeref,bbVarPyValue,bbContinue keepend
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest,bbStatementRestCont
|
||||
syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement
|
||||
syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
|
||||
" OE Important Functions
|
||||
syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained
|
||||
@@ -123,7 +122,6 @@ hi def link bbPyFlag Type
|
||||
hi def link bbPyDef Statement
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbStatementRestCont Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
hi def link bbOverrideOperator Operator
|
||||
|
||||
@@ -47,8 +47,8 @@ To install all required packages run:
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
$ cd doc
|
||||
$ make html
|
||||
$ cd documentation
|
||||
$ make -f Makefile.sphinx html
|
||||
|
||||
The resulting HTML index page will be _build/html/index.html, and you
|
||||
can browse your own copy of the locally generated documentation with
|
||||
|
||||
@@ -586,11 +586,10 @@ or possibly those defined in the metadata/signature handler itself. The
|
||||
simplest parameter to pass is "none", which causes a set of signature
|
||||
information to be written out into ``STAMPS_DIR`` corresponding to the
|
||||
targets specified. The other currently available parameter is
|
||||
"printdiff", which causes BitBake to try to establish the most recent
|
||||
"printdiff", which causes BitBake to try to establish the closest
|
||||
signature match it can (e.g. in the sstate cache) and then run
|
||||
compare the matched signatures to determine the stamps and delta
|
||||
where these two stamp trees diverge. This can be used to determine why
|
||||
tasks need to be re-run in situations where that is not expected.
|
||||
``bitbake-diffsigs`` over the matches to determine the stamps and delta
|
||||
where these two stamp trees diverge.
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -476,14 +476,6 @@ Here are some example URLs::
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
Using tags with the git fetcher may cause surprising behaviour. Bitbake needs to
|
||||
resolve the tag to a specific revision and to do that, it has to connect to and use
|
||||
the upstream repository. This is because the revision the tags point at can change and
|
||||
we've seen cases of this happening in well known public repositories. This can mean
|
||||
many more network connections than expected and recipes may be reparsed at every build.
|
||||
Source mirrors will also be bypassed as the upstream repository is the only source
|
||||
of truth to resolve the revision accurately. For these reasons, whilst the fetcher
|
||||
can support tags, we recommend being specific about revisions in recipes.
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
@@ -696,41 +688,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _gcp-fetcher:
|
||||
|
||||
GCP Fetcher (``gs://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from a
|
||||
`Google Cloud Storage Bucket <https://cloud.google.com/storage/docs/buckets>`__.
|
||||
It uses the `Google Cloud Storage Python Client <https://cloud.google.com/python/docs/reference/storage/latest>`__
|
||||
to check the status of objects in the bucket and download them.
|
||||
The use of the Python client makes it substantially faster than using command
|
||||
line tools such as gsutil.
|
||||
|
||||
The fetcher requires the Google Cloud Storage Python Client to be installed, along
|
||||
with the gsutil tool.
|
||||
|
||||
The fetcher requires that the machine has valid credentials for accessing the
|
||||
chosen bucket. Instructions for authentication can be found in the
|
||||
`Google Cloud documentation <https://cloud.google.com/docs/authentication/provide-credentials-adc#local-dev>`__.
|
||||
|
||||
If it used from the OpenEmbedded build system, the fetcher can be used for
|
||||
fetching sstate artifacts from a GCS bucket by specifying the
|
||||
``SSTATE_MIRRORS`` variable as shown below::
|
||||
|
||||
SSTATE_MIRRORS ?= "\
|
||||
file://.* gs://<bucket name>/PATH \
|
||||
"
|
||||
|
||||
The fetcher can also be used in recipes::
|
||||
|
||||
SRC_URI = "gs://<bucket name>/<foo_container>/<bar_file>"
|
||||
|
||||
However, the checksum of the file should be also be provided::
|
||||
|
||||
SRC_URI[sha256sum] = "<sha256 string>"
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
@@ -834,8 +791,6 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- OSC (``osc://``)
|
||||
|
||||
- S3 (``s3://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
- Secure Shell (``ssh://``)
|
||||
|
||||
@@ -209,12 +209,12 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
Without a value for :term:`PN`, the variables :term:`STAMP`, :term:`T`, and :term:`B`, prevent more
|
||||
than one recipe from working. You can fix this by either setting :term:`PN` to
|
||||
Without a value for PN , the variables STAMP , T , and B , prevent more
|
||||
than one recipe from working. You can fix this by either setting PN to
|
||||
have a value similar to what OpenEmbedded and BitBake use in the default
|
||||
``bitbake.conf`` file (see previous example). Or, by manually updating each
|
||||
recipe to set :term:`PN`. You will also need to include :term:`PN` as part of the :term:`STAMP`,
|
||||
:term:`T`, and :term:`B` variable definitions in the ``local.conf`` file.
|
||||
bitbake.conf file (see previous example). Or, by manually updating each
|
||||
recipe to set PN . You will also need to include PN as part of the STAMP
|
||||
, T , and B variable definitions in the local.conf file.
|
||||
|
||||
The ``TMPDIR`` variable establishes a directory that BitBake uses
|
||||
for build output and intermediate files other than the cached
|
||||
@@ -319,9 +319,9 @@ Following is the complete "Hello World" example.
|
||||
|
||||
.. note::
|
||||
|
||||
We are setting both ``LAYERSERIES_CORENAMES`` and :term:`LAYERSERIES_COMPAT` in this particular case, because we
|
||||
We are setting both LAYERSERIES_CORENAMES and LAYERSERIES_COMPAT in this particular case, because we
|
||||
are using bitbake without OpenEmbedded.
|
||||
You should usually just use :term:`LAYERSERIES_COMPAT` to specify the OE-Core versions for which your layer
|
||||
You should usually just use LAYERSERIES_COMPAT to specify the OE-Core versions for which your layer
|
||||
is compatible, and add the meta-openembedded layer to your project.
|
||||
|
||||
You need to create the recipe file next. Inside your layer at the
|
||||
|
||||
@@ -349,84 +349,40 @@ Usage and syntax
|
||||
Following is the usage and syntax for BitBake::
|
||||
|
||||
$ bitbake -h
|
||||
usage: bitbake [-s] [-e] [-g] [-u UI] [--version] [-h] [-f] [-c CMD]
|
||||
[-C INVALIDATE_STAMP] [--runall RUNALL] [--runonly RUNONLY]
|
||||
[--no-setscene] [--skip-setscene] [--setscene-only] [-n] [-p]
|
||||
[-k] [-P] [-S SIGNATURE_HANDLER] [--revisions-changed]
|
||||
[-b BUILDFILE] [-D] [-l DEBUG_DOMAINS] [-v] [-q]
|
||||
[-w WRITEEVENTLOG] [-B BIND] [-T SERVER_TIMEOUT]
|
||||
[--remote-server REMOTE_SERVER] [-m] [--token XMLRPCTOKEN]
|
||||
[--observe-only] [--status-only] [--server-only] [-r PREFILE]
|
||||
[-R POSTFILE] [-I EXTRA_ASSUME_PROVIDED]
|
||||
[recipename/target ...]
|
||||
Usage: bitbake [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH
|
||||
which will provide the layer, BBFILES and other configuration information.
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.
|
||||
|
||||
General options:
|
||||
recipename/target Execute the specified task (default is 'build') for
|
||||
these target recipes (.bb files).
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-u UI, --ui UI The user interface to use (knotty, ncurses, taskexp,
|
||||
taskexp_ncurses or teamcity - default knotty).
|
||||
--version Show programs version and exit.
|
||||
-h, --help Show this help message and exit.
|
||||
|
||||
Task control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
--runall RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
|
||||
Execution control options:
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
Options:
|
||||
--version show program's version number and exit
|
||||
-h, --help show this help message and exit
|
||||
-b BUILDFILE, --buildfile=BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
-k, --continue Continue as much as possible after an error. While the
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-P, --profile Profile the command and save reports.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means
|
||||
recursively compare the dumped signature with the most
|
||||
recent one in a local build or sstate cache (can be
|
||||
used to find out why tasks re-run when that is not
|
||||
expected)
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
-b BUILDFILE, --buildfile BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
|
||||
Logging/output control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp=INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
-r PREFILE, --read=PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
@@ -436,47 +392,65 @@ Following is the usage and syntax for BitBake::
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-l DEBUG_DOMAINS, --log-domains DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
-w WRITEEVENTLOG, --write-log WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
|
||||
Server options:
|
||||
-B BIND, --bind BIND The name/address for the bitbake xmlrpc server to bind
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means compare
|
||||
the dumped signature with the cached one.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps=EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses, taskexp or
|
||||
teamcity - default knotty).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout SERVER_TIMEOUT
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
--remote-server REMOTE_SERVER
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
--token XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
|
||||
Configuration options:
|
||||
-r PREFILE, --read PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing.
|
||||
|
||||
..
|
||||
Bitbake help output generated with "stty columns 80; bin/bitbake -h"
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
|
||||
.. _bitbake-examples:
|
||||
|
||||
|
||||
@@ -754,9 +754,7 @@ share the task.
|
||||
This section presents the mechanisms BitBake provides to allow you to
|
||||
share functionality between recipes. Specifically, the mechanisms
|
||||
include ``include``, ``inherit``, :term:`INHERIT`, and ``require``
|
||||
directives. There is also a higher-level abstraction called
|
||||
``configuration fragments`` that is enabled with ``addfragments``
|
||||
directive.
|
||||
directives.
|
||||
|
||||
Locating Include and Class Files
|
||||
--------------------------------
|
||||
@@ -773,8 +771,6 @@ In order for include and class files to be found by BitBake, they need
|
||||
to be located in a "classes" subdirectory that can be found in
|
||||
:term:`BBPATH`.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit:
|
||||
|
||||
``inherit`` Directive
|
||||
---------------------
|
||||
|
||||
@@ -813,43 +809,19 @@ An advantage with the inherit directive as compared to both the
|
||||
:ref:`include <bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>` and :ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives is that you can inherit class files conditionally. You can
|
||||
accomplish this by using a variable expression after the ``inherit``
|
||||
statement.
|
||||
statement. Here is an example::
|
||||
|
||||
For inheriting classes conditionally, using the :ref:`inherit_defer
|
||||
<ref-bitbake-user-manual-metadata-inherit-defer>` directive is advised as
|
||||
:ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>` is
|
||||
evaluated at the end of parsing.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit-defer:
|
||||
|
||||
``inherit_defer`` Directive
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>`
|
||||
directive works like the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive, except that it is only
|
||||
evaluated at the end of parsing. Its usage is recommended when a conditional
|
||||
expression is used.
|
||||
|
||||
This allows conditional expressions to be evaluated "late", meaning changes to
|
||||
the variable after the line is parsed will take effect. With the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive this is not the case.
|
||||
|
||||
Here is an example::
|
||||
|
||||
inherit_defer ${VARNAME}
|
||||
inherit ${VARNAME}
|
||||
|
||||
If ``VARNAME`` is
|
||||
going to be set, it needs to be set before the ``inherit_defer`` statement is
|
||||
going to be set, it needs to be set before the ``inherit`` statement is
|
||||
parsed. One way to achieve a conditional inherit in this case is to use
|
||||
overrides::
|
||||
|
||||
VARIABLE = ""
|
||||
VARIABLE:someoverride = "myclass"
|
||||
|
||||
Another method is by using :ref:`anonymous Python
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:Anonymous Python Functions>`.
|
||||
Here is an example::
|
||||
Another method is by using anonymous Python. Here is an example::
|
||||
|
||||
python () {
|
||||
if condition == value:
|
||||
@@ -858,14 +830,11 @@ Here is an example::
|
||||
d.setVar('VARIABLE', '')
|
||||
}
|
||||
|
||||
Alternatively, you could use an inline Python expression in the
|
||||
Alternatively, you could use an in-line Python expression in the
|
||||
following form::
|
||||
|
||||
inherit_defer ${@'classname' if condition else ''}
|
||||
|
||||
Or::
|
||||
|
||||
inherit_defer ${@bb.utils.contains('VARIABLE', 'something', 'classname', '', d)}
|
||||
inherit ${@'classname' if condition else ''}
|
||||
inherit ${@functionname(params)}
|
||||
|
||||
In all cases, if the expression evaluates to an
|
||||
empty string, the statement does not trigger a syntax error because it
|
||||
@@ -900,33 +869,6 @@ definitions::
|
||||
of include . Doing so makes sure that an error is produced if the file cannot
|
||||
be found.
|
||||
|
||||
``include_all`` Directive
|
||||
-------------------------
|
||||
|
||||
The ``include_all`` directive works like the :ref:`include
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive but will include all of the files that match the specified path in
|
||||
the enabled layers (layers part of :term:`BBLAYERS`).
|
||||
|
||||
For example, let's say a ``maintainers.inc`` file is present in different layers
|
||||
and is conventionally placed in the ``conf/distro/include`` directory of each
|
||||
layer. In that case the ``include_all`` directive can be used to include
|
||||
the ``maintainers.inc`` file for all of these layers::
|
||||
|
||||
include_all conf/distro/include/maintainers.inc
|
||||
|
||||
In other words, the ``maintainers.inc`` file for each layer is included through
|
||||
the :ref:`include <bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive.
|
||||
|
||||
BitBake will iterate through the colon-separated :term:`BBPATH` list to look for
|
||||
matching files to include, from left to right. As a consequence, matching files
|
||||
are included in that order.
|
||||
|
||||
As the ``include_all`` directive uses the :ref:`include
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive in the background, no error is produced if no files are matched.
|
||||
|
||||
.. _require-inclusion:
|
||||
|
||||
``require`` Directive
|
||||
@@ -991,50 +933,6 @@ the ``autotools`` and ``pkgconfig`` classes::
|
||||
|
||||
INHERIT += "autotools pkgconfig"
|
||||
|
||||
``addfragments`` Directive
|
||||
--------------------------
|
||||
|
||||
This directive allows fine-tuning local configurations with configuration
|
||||
snippets contained in layers in a structured, controlled way. Typically it would
|
||||
go into ``bitbake.conf``, for example::
|
||||
|
||||
addfragments conf/fragments OE_FRAGMENTS OE_FRAGMENTS_METADATA_VARS
|
||||
|
||||
``addfragments`` takes three parameters:
|
||||
|
||||
- path prefix for fragment files inside the layer file tree that bitbake
|
||||
uses to construct full paths to the fragment files
|
||||
|
||||
- name of variable that holds the list of enabled fragments in an
|
||||
active build
|
||||
|
||||
- name of variable that contains a list of variable names containing
|
||||
fragment-specific metadata (such as descriptions)
|
||||
|
||||
This allows listing enabled configuration fragments in ``OE_FRAGMENTS``
|
||||
variable like this::
|
||||
|
||||
OE_FRAGMENTS = "core/domain/somefragment core/someotherfragment anotherlayer/anotherdomain/anotherfragment"
|
||||
|
||||
Fragment names listed in this variable must be prefixed by the layer name
|
||||
where a fragment file is located, defined by :term:`BBFILE_COLLECTIONS` in ``layer.conf``.
|
||||
|
||||
The implementation then expands this list into
|
||||
:ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives with full paths to respective layers::
|
||||
|
||||
require /path/to/core-layer/conf/fragments/domain/somefragment.conf
|
||||
require /path/to/core-layer/conf/fragments/someotherfragment.conf
|
||||
require /path/to/another-layer/conf/fragments/anotherdomain/anotherfragment.conf
|
||||
|
||||
The variable containing a list of fragment metadata variables could look like this::
|
||||
|
||||
OE_FRAGMENTS_METADATA_VARS = "BB_CONF_FRAGMENT_SUMMARY BB_CONF_FRAGMENT_DESCRIPTION"
|
||||
|
||||
The implementation will add a flag containing the fragment name to each of those variables
|
||||
when parsing fragments, so that the variables are namespaced by fragment name, and do not override
|
||||
each other when several fragments are enabled.
|
||||
|
||||
Functions
|
||||
=========
|
||||
|
||||
@@ -1621,12 +1519,6 @@ functionality of the task:
|
||||
released. You can use this variable flag to accomplish mutual
|
||||
exclusion.
|
||||
|
||||
- ``[network]``: When set to "1", allows a task to access the network. By
|
||||
default, only the ``do_fetch`` task is granted network access. Recipes
|
||||
shouldn't access the network outside of ``do_fetch`` as it usually
|
||||
undermines fetcher source mirroring, image and licence manifests, software
|
||||
auditing and supply chain security.
|
||||
|
||||
- ``[noexec]``: When set to "1", marks the task as being empty, with
|
||||
no execution required. You can use the ``[noexec]`` flag to set up
|
||||
tasks as dependency placeholders, or to disable tasks defined
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-2.5
|
||||
|
||||
================
|
||||
Variable Context
|
||||
================
|
||||
|
||||
|
|
||||
|
||||
Variables might only have an impact or can be used in certain contexts. Some
|
||||
should only be used in global files like ``.conf``, while others are intended only
|
||||
for local files like ``.bb``. This chapter aims to describe some important variable
|
||||
contexts.
|
||||
|
||||
.. _ref-varcontext-configuration:
|
||||
|
||||
BitBake's own configuration
|
||||
===========================
|
||||
|
||||
Variables starting with ``BB_`` usually configure the behaviour of BitBake itself.
|
||||
For example, one could configure:
|
||||
|
||||
- System resources, like disk space to be used (:term:`BB_DISKMON_DIRS`),
|
||||
or the number of tasks to be run in parallel by BitBake (:term:`BB_NUMBER_THREADS`).
|
||||
|
||||
- How the fetchers shall behave, e.g., :term:`BB_FETCH_PREMIRRORONLY` is used
|
||||
by BitBake to determine if BitBake's fetcher shall search only
|
||||
:term:`PREMIRRORS` for files.
|
||||
|
||||
Those variables are usually configured globally.
|
||||
|
||||
BitBake configuration
|
||||
=====================
|
||||
|
||||
There are variables:
|
||||
|
||||
- Like :term:`B` or :term:`T`, that are used to specify directories used by
|
||||
BitBake during the build of a particular recipe. Those variables are
|
||||
specified in ``bitbake.conf``. Some, like :term:`B`, are quite often
|
||||
overwritten in recipes.
|
||||
|
||||
- Starting with ``FAKEROOT``, to configure how the ``fakeroot`` command is
|
||||
handled. Those are usually set by ``bitbake.conf`` and might get adapted in a
|
||||
``bbclass``.
|
||||
|
||||
- Detailing where BitBake will store and fetch information from, for
|
||||
data reuse between build runs like :term:`CACHE`, :term:`DL_DIR` or
|
||||
:term:`PERSISTENT_DIR`. Those are usually global.
|
||||
|
||||
|
||||
Layers and files
|
||||
================
|
||||
|
||||
Variables starting with ``LAYER`` configure how BitBake handles layers.
|
||||
Additionally, variables starting with ``BB`` configure how layers and files are
|
||||
handled. For example:
|
||||
|
||||
- :term:`LAYERDEPENDS` is used to configure on which layers a given layer
|
||||
depends.
|
||||
|
||||
- The configured layers are contained in :term:`BBLAYERS` and files in
|
||||
:term:`BBFILES`.
|
||||
|
||||
Those variables are often used in the files ``layer.conf`` and ``bblayers.conf``.
|
||||
|
||||
Recipes and packages
|
||||
====================
|
||||
|
||||
Variables handling recipes and packages can be split into:
|
||||
|
||||
- :term:`PN`, :term:`PV` or :term:`PF` for example, contain information about
|
||||
the name or revision of a recipe or package. Usually, the default set in
|
||||
``bitbake.conf`` is used, but those are from time to time overwritten in
|
||||
recipes.
|
||||
|
||||
- :term:`SUMMARY`, :term:`DESCRIPTION`, :term:`LICENSE` or :term:`HOMEPAGE`
|
||||
contain the expected information and should be set specifically for every
|
||||
recipe.
|
||||
|
||||
- In recipes, variables are also used to control build and runtime
|
||||
dependencies between recipes/packages with other recipes/packages. The
|
||||
most common should be: :term:`PROVIDES`, :term:`RPROVIDES`, :term:`DEPENDS`,
|
||||
and :term:`RDEPENDS`.
|
||||
|
||||
- There are further variables starting with ``SRC`` that specify the sources in
|
||||
a recipe like :term:`SRC_URI` or :term:`SRCDATE`. Those are also usually set
|
||||
in recipes.
|
||||
|
||||
- Which version or provider of a recipe should be given preference when
|
||||
multiple recipes would provide the same item, is controlled by variables
|
||||
starting with ``PREFERRED_``. Those are normally set in the configuration
|
||||
files of a ``MACHINE`` or ``DISTRO``.
|
||||
@@ -127,10 +127,17 @@ overview of their function and contents.
|
||||
Contains the name of the currently running task. The name does not
|
||||
include the ``do_`` prefix.
|
||||
|
||||
:term:`BB_CURRENT_MC`
|
||||
Contains the name of the current multiconfig a task is being run under.
|
||||
The name is taken from the multiconfig configuration file (a file
|
||||
``mc1.conf`` would make this variable equal to ``mc1``).
|
||||
:term:`BB_DANGLINGAPPENDS_WARNONLY`
|
||||
Defines how BitBake handles situations where an append file
|
||||
(``.bbappend``) has no corresponding recipe file (``.bb``). This
|
||||
condition often occurs when layers get out of sync (e.g. ``oe-core``
|
||||
bumps a recipe version and the old recipe no longer exists and the
|
||||
other layer has not been updated to the new version of the recipe
|
||||
yet).
|
||||
|
||||
The default fatal behavior is safest because it is the sane reaction
|
||||
given something is out of sync. It is important to realize when your
|
||||
changes are no longer being applied.
|
||||
|
||||
:term:`BB_DEFAULT_TASK`
|
||||
The default task to use when none is specified (e.g. with the ``-c``
|
||||
@@ -320,26 +327,11 @@ overview of their function and contents.
|
||||
mirror tarball. If the shallow mirror tarball cannot be fetched, it will
|
||||
try to fetch the full mirror tarball and use that.
|
||||
|
||||
This setting causes an initial shallow clone instead of an initial full bare clone.
|
||||
The amount of data transferred during the initial clone will be significantly reduced.
|
||||
|
||||
However, every time the source revision (referenced in :term:`SRCREV`)
|
||||
changes, regardless of whether the cache within the download directory
|
||||
(defined by :term:`DL_DIR`) has been cleaned up or not,
|
||||
the data transfer may be significantly higher because entirely
|
||||
new shallow clones are required for each source revision change.
|
||||
|
||||
Over time, numerous shallow clones may cumulatively transfer
|
||||
the same amount of data as an initial full bare clone.
|
||||
This is especially the case with very large repositories.
|
||||
|
||||
Existing initial full bare clones, created without this setting,
|
||||
will still be utilized.
|
||||
|
||||
If the Git error "Server does not allow request for unadvertised object"
|
||||
occurs, an initial full bare clone is fetched automatically.
|
||||
This may happen if the Git server does not allow the request
|
||||
or if the Git client has issues with this functionality.
|
||||
When a mirror tarball is not available, a full git clone will be performed
|
||||
regardless of whether this variable is set or not. Support for shallow
|
||||
clones is not currently implemented as git does not directly support
|
||||
shallow cloning a particular git commit hash (it only supports cloning
|
||||
from a tag or branch reference).
|
||||
|
||||
See also :term:`BB_GIT_SHALLOW_DEPTH` and
|
||||
:term:`BB_GENERATE_SHALLOW_TARBALLS`.
|
||||
@@ -432,7 +424,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
@@ -440,15 +432,6 @@ overview of their function and contents.
|
||||
``ConfigParsed`` event can set the variable to trigger the re-parse.
|
||||
You must be careful to avoid recursive loops with this functionality.
|
||||
|
||||
:term:`BB_LOADFACTOR_MAX`
|
||||
Setting this to a value will cause BitBake to check the system load
|
||||
average before executing new tasks. If the load average is above the
|
||||
the number of CPUs multipled by this factor, no new task will be started
|
||||
unless there is no task executing. A value of "1.5" has been found to
|
||||
work reasonably. This is helpful for systems which don't have pressure
|
||||
regulation enabled, which is more granular. Pressure values take
|
||||
precedence over loadfactor.
|
||||
|
||||
:term:`BB_LOGCONFIG`
|
||||
Specifies the name of a config file that contains the user logging
|
||||
configuration. See
|
||||
@@ -580,7 +563,7 @@ overview of their function and contents.
|
||||
:term:`BB_RUNFMT` variable is undefined and the run filenames get
|
||||
created using the following form::
|
||||
|
||||
run.{func}.{pid}
|
||||
run.{task}.{pid}
|
||||
|
||||
If you want to force run files to take a specific name, you can set this
|
||||
variable in a configuration file.
|
||||
@@ -707,12 +690,6 @@ overview of their function and contents.
|
||||
Within an executing task, this variable holds the hash of the task as
|
||||
returned by the currently enabled signature generator.
|
||||
|
||||
:term:`BB_USE_HOME_NPMRC`
|
||||
Controls whether or not BitBake uses the user's .npmrc file within their
|
||||
home directory within the npm fetcher. This can be used for authentication
|
||||
of private NPM registries, among other uses. This is turned off by default
|
||||
and requires the user to explicitly set it to "1" to enable.
|
||||
|
||||
:term:`BB_VERBOSE_LOGS`
|
||||
Controls how verbose BitBake is during builds. If set, shell scripts
|
||||
echo commands and shell script output appears on standard out
|
||||
@@ -780,10 +757,6 @@ overview of their function and contents.
|
||||
:term:`BBFILE_PRIORITY`
|
||||
Assigns the priority for recipe files in each layer.
|
||||
|
||||
This variable is used in the ``conf/layer.conf`` file and must be
|
||||
suffixed with a `_` followed by the name of the specific layer (e.g.
|
||||
``BBFILE_PRIORITY_emenlow``). Colon as separator is not supported.
|
||||
|
||||
This variable is useful in situations where the same recipe appears
|
||||
in more than one layer. Setting this variable allows you to
|
||||
prioritize a layer against other layers that contain the same recipe
|
||||
@@ -798,7 +771,7 @@ overview of their function and contents.
|
||||
higher precedence. For example, the value 6 has a higher precedence
|
||||
than the value 5. If not specified, the :term:`BBFILE_PRIORITY` variable
|
||||
is set based on layer dependencies (see the :term:`LAYERDEPENDS` variable
|
||||
for more information). The default priority, if unspecified for a
|
||||
for more information. The default priority, if unspecified for a
|
||||
layer with no dependencies, is the lowest defined priority + 1 (or 1
|
||||
if no priorities are defined).
|
||||
|
||||
@@ -947,9 +920,9 @@ overview of their function and contents.
|
||||
section.
|
||||
|
||||
:term:`BBPATH`
|
||||
A colon-separated list used by BitBake to locate class (``.bbclass``)
|
||||
and configuration (``.conf``) files. This variable is analogous to the
|
||||
``PATH`` variable.
|
||||
Used by BitBake to locate class (``.bbclass``) and configuration
|
||||
(``.conf``) files. This variable is analogous to the ``PATH``
|
||||
variable.
|
||||
|
||||
If you run BitBake from a directory outside of the build directory,
|
||||
you must be sure to set :term:`BBPATH` to point to the build directory.
|
||||
@@ -1099,11 +1072,6 @@ overview of their function and contents.
|
||||
environment variable. The value is a colon-separated list of
|
||||
directories that are searched left-to-right in order.
|
||||
|
||||
:term:`FILE_LAYERNAME`
|
||||
During parsing and task execution, this is set to the name of the
|
||||
layer containing the recipe file. Code can use this to identify which
|
||||
layer a recipe is from.
|
||||
|
||||
:term:`GITDIR`
|
||||
The directory in which a local copy of a Git repository is stored
|
||||
when it is cloned.
|
||||
@@ -1197,8 +1165,8 @@ overview of their function and contents.
|
||||
order.
|
||||
|
||||
:term:`OVERRIDES`
|
||||
A colon-separated list that BitBake uses to control what variables are
|
||||
overridden after BitBake parses recipes and configuration files.
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
|
||||
Following is a simple example that uses an overrides list based on
|
||||
machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can
|
||||
|
||||
@@ -13,7 +13,6 @@ BitBake User Manual
|
||||
bitbake-user-manual/bitbake-user-manual-intro
|
||||
bitbake-user-manual/bitbake-user-manual-execution
|
||||
bitbake-user-manual/bitbake-user-manual-metadata
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables-context
|
||||
bitbake-user-manual/bitbake-user-manual-fetching
|
||||
bitbake-user-manual/bitbake-user-manual-ref-variables
|
||||
bitbake-user-manual/bitbake-user-manual-hello
|
||||
|
||||
@@ -4,46 +4,28 @@
|
||||
BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
****************************
|
||||
Release Series 5.1 (styhead)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.10 User Manual </bitbake/2.10/>`
|
||||
|
||||
*******************************
|
||||
Release Series 5.0 (scarthgap)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.8 User Manual </bitbake/2.8/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
================================
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.3 (nanbield)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.6 User Manual </bitbake/2.6/>`
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.2 User Manual </bitbake/2.2/>`
|
||||
|
||||
*****************************
|
||||
Release Series 4.0 (kirstone)
|
||||
*****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
|
||||
================================
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
******************************
|
||||
Release Series 3.4 (honister)
|
||||
******************************
|
||||
@@ -62,11 +44,10 @@ Release Series 3.2 (gatesgarth)
|
||||
|
||||
- :yocto_docs:`BitBake 1.48 User Manual </bitbake/1.48/>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
*******************************************
|
||||
Release Series 3.1 (dunfell first versions)
|
||||
*******************************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
- :yocto_docs:`3.1 BitBake User Manual </3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.1 BitBake User Manual </3.1.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.2 BitBake User Manual </3.1.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
@@ -36,9 +36,8 @@ class COWDictMeta(COWMeta):
|
||||
__marker__ = tuple()
|
||||
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
@@ -162,9 +161,8 @@ class COWDictMeta(COWMeta):
|
||||
|
||||
class COWSetMeta(COWDictMeta):
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
@@ -9,19 +9,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.12.0"
|
||||
__version__ = "2.4.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 9, 0):
|
||||
raise RuntimeError("Sorry, python 3.9.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise RuntimeError("Sorry, python 3.8.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
# https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
|
||||
# https://bugs.ams1.psf.io/issue42888
|
||||
# so ensure libgcc_s is loaded early on
|
||||
import ctypes
|
||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||
|
||||
class BBHandledException(Exception):
|
||||
"""
|
||||
@@ -36,7 +29,6 @@ class BBHandledException(Exception):
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
@@ -104,6 +96,26 @@ class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin):
|
||||
self.setup_bblogger(logger.name)
|
||||
super().__init__(logger, *args, **kwargs)
|
||||
|
||||
if sys.version_info < (3, 6):
|
||||
# These properties were added in Python 3.6. Add them in older versions
|
||||
# for compatibility
|
||||
@property
|
||||
def manager(self):
|
||||
return self.logger.manager
|
||||
|
||||
@manager.setter
|
||||
def manager(self, value):
|
||||
self.logger.manager = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.logger.name
|
||||
|
||||
def __repr__(self):
|
||||
logger = self.logger
|
||||
level = logger.getLevelName(logger.getEffectiveLevel())
|
||||
return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
|
||||
|
||||
logging.LoggerAdapter = BBLoggerAdapter
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
@@ -194,6 +206,7 @@ def deprecated(func, name=None, advice=""):
|
||||
# For compatibility
|
||||
def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
"""Import objects from one module into another, wrapping them with a DeprecationWarning"""
|
||||
import sys
|
||||
|
||||
module = __import__(modulename, fromlist = fromlist)
|
||||
for position, objname in enumerate(fromlist):
|
||||
@@ -207,14 +220,3 @@ def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
|
||||
setattr(sys.modules[current], newname, newobj)
|
||||
|
||||
TaskData = namedtuple("TaskData", [
|
||||
"pn",
|
||||
"taskname",
|
||||
"fn",
|
||||
"deps",
|
||||
"provides",
|
||||
"taskhash",
|
||||
"unihash",
|
||||
"hashfn",
|
||||
"taskhash_deps",
|
||||
])
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 by Garmin Ltd. or its subsidiaries
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import os
|
||||
import errno
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
libacl = ctypes.CDLL("libacl.so.1", use_errno=True)
|
||||
|
||||
|
||||
ACL_TYPE_ACCESS = 0x8000
|
||||
ACL_TYPE_DEFAULT = 0x4000
|
||||
|
||||
ACL_FIRST_ENTRY = 0
|
||||
ACL_NEXT_ENTRY = 1
|
||||
|
||||
ACL_UNDEFINED_TAG = 0x00
|
||||
ACL_USER_OBJ = 0x01
|
||||
ACL_USER = 0x02
|
||||
ACL_GROUP_OBJ = 0x04
|
||||
ACL_GROUP = 0x08
|
||||
ACL_MASK = 0x10
|
||||
ACL_OTHER = 0x20
|
||||
|
||||
ACL_READ = 0x04
|
||||
ACL_WRITE = 0x02
|
||||
ACL_EXECUTE = 0x01
|
||||
|
||||
acl_t = ctypes.c_void_p
|
||||
acl_entry_t = ctypes.c_void_p
|
||||
acl_permset_t = ctypes.c_void_p
|
||||
acl_perm_t = ctypes.c_uint
|
||||
|
||||
acl_tag_t = ctypes.c_int
|
||||
|
||||
libacl.acl_free.argtypes = [acl_t]
|
||||
|
||||
|
||||
def acl_free(acl):
|
||||
libacl.acl_free(acl)
|
||||
|
||||
|
||||
libacl.acl_get_file.restype = acl_t
|
||||
libacl.acl_get_file.argtypes = [ctypes.c_char_p, ctypes.c_uint]
|
||||
|
||||
|
||||
def acl_get_file(path, typ):
|
||||
acl = libacl.acl_get_file(os.fsencode(path), typ)
|
||||
if acl is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err), str(path))
|
||||
|
||||
return acl
|
||||
|
||||
|
||||
libacl.acl_get_entry.argtypes = [acl_t, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_entry(acl, entry_id):
|
||||
entry = acl_entry_t()
|
||||
ret = libacl.acl_get_entry(acl, entry_id, ctypes.byref(entry))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
if ret == 0:
|
||||
return None
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
libacl.acl_get_tag_type.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_tag_type(entry_d):
|
||||
tag = acl_tag_t()
|
||||
ret = libacl.acl_get_tag_type(entry_d, ctypes.byref(tag))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return tag.value
|
||||
|
||||
|
||||
libacl.acl_get_qualifier.restype = ctypes.c_void_p
|
||||
libacl.acl_get_qualifier.argtypes = [acl_entry_t]
|
||||
|
||||
|
||||
def acl_get_qualifier(entry_d):
|
||||
ret = libacl.acl_get_qualifier(entry_d)
|
||||
if ret is None:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return ctypes.c_void_p(ret)
|
||||
|
||||
|
||||
libacl.acl_get_permset.argtypes = [acl_entry_t, ctypes.c_void_p]
|
||||
|
||||
|
||||
def acl_get_permset(entry_d):
|
||||
permset = acl_permset_t()
|
||||
ret = libacl.acl_get_permset(entry_d, ctypes.byref(permset))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
|
||||
return permset
|
||||
|
||||
|
||||
libacl.acl_get_perm.argtypes = [acl_permset_t, acl_perm_t]
|
||||
|
||||
|
||||
def acl_get_perm(permset_d, perm):
|
||||
ret = libacl.acl_get_perm(permset_d, perm)
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise OSError(err, os.strerror(err))
|
||||
return bool(ret)
|
||||
|
||||
|
||||
class Entry(object):
|
||||
def __init__(self, tag, qualifier, mode):
|
||||
self.tag = tag
|
||||
self.qualifier = qualifier
|
||||
self.mode = mode
|
||||
|
||||
def __str__(self):
|
||||
typ = ""
|
||||
qual = ""
|
||||
if self.tag == ACL_USER:
|
||||
typ = "user"
|
||||
qual = pwd.getpwuid(self.qualifier).pw_name
|
||||
elif self.tag == ACL_GROUP:
|
||||
typ = "group"
|
||||
qual = grp.getgrgid(self.qualifier).gr_name
|
||||
elif self.tag == ACL_USER_OBJ:
|
||||
typ = "user"
|
||||
elif self.tag == ACL_GROUP_OBJ:
|
||||
typ = "group"
|
||||
elif self.tag == ACL_MASK:
|
||||
typ = "mask"
|
||||
elif self.tag == ACL_OTHER:
|
||||
typ = "other"
|
||||
|
||||
r = "r" if self.mode & ACL_READ else "-"
|
||||
w = "w" if self.mode & ACL_WRITE else "-"
|
||||
x = "x" if self.mode & ACL_EXECUTE else "-"
|
||||
|
||||
return f"{typ}:{qual}:{r}{w}{x}"
|
||||
|
||||
|
||||
class ACL(object):
|
||||
def __init__(self, acl):
|
||||
self.acl = acl
|
||||
|
||||
def __del__(self):
|
||||
acl_free(self.acl)
|
||||
|
||||
def entries(self):
|
||||
entry_id = ACL_FIRST_ENTRY
|
||||
while True:
|
||||
entry = acl_get_entry(self.acl, entry_id)
|
||||
if entry is None:
|
||||
break
|
||||
|
||||
permset = acl_get_permset(entry)
|
||||
|
||||
mode = 0
|
||||
for m in (ACL_READ, ACL_WRITE, ACL_EXECUTE):
|
||||
if acl_get_perm(permset, m):
|
||||
mode |= m
|
||||
|
||||
qualifier = None
|
||||
tag = acl_get_tag_type(entry)
|
||||
|
||||
if tag == ACL_USER or tag == ACL_GROUP:
|
||||
qual = acl_get_qualifier(entry)
|
||||
qualifier = ctypes.cast(qual, ctypes.POINTER(ctypes.c_int))[0]
|
||||
|
||||
yield Entry(tag, qualifier, mode)
|
||||
|
||||
entry_id = ACL_NEXT_ENTRY
|
||||
|
||||
@classmethod
|
||||
def from_path(cls, path, typ):
|
||||
acl = acl_get_file(path, typ)
|
||||
return cls(acl)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", help="File Path", type=Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
acl = ACL.from_path(args.path, ACL_TYPE_ACCESS)
|
||||
for entry in acl.entries():
|
||||
print(str(entry))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -4,13 +4,30 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
ClientError,
|
||||
ServerError,
|
||||
ConnectionClosedError,
|
||||
InvokeError,
|
||||
)
|
||||
from .serv import AsyncServer, AsyncServerConnection, ClientError, ServerError
|
||||
|
||||
@@ -10,66 +10,22 @@ import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import re
|
||||
import contextlib
|
||||
from threading import Thread
|
||||
from .connection import StreamConnection, WebsocketConnection, DEFAULT_MAX_CHUNK
|
||||
from .exceptions import ConnectionClosedError, InvokeError
|
||||
|
||||
UNIX_PREFIX = "unix://"
|
||||
WS_PREFIX = "ws://"
|
||||
WSS_PREFIX = "wss://"
|
||||
|
||||
ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
WEBSOCKETS_MIN_VERSION = (9, 1)
|
||||
# Need websockets 10 with python 3.10+
|
||||
if sys.version_info >= (3, 10, 0):
|
||||
WEBSOCKETS_MIN_VERSION = (10, 0)
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
elif addr.startswith(WS_PREFIX) or addr.startswith(WSS_PREFIX):
|
||||
return (ADDR_TYPE_WS, (addr,))
|
||||
else:
|
||||
m = re.match(r"\[(?P<host>[^\]]*)\]:(?P<port>\d+)$", addr)
|
||||
if m is not None:
|
||||
host = m.group("host")
|
||||
port = m.group("port")
|
||||
else:
|
||||
host, port = addr.split(":")
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
proto_name,
|
||||
proto_version,
|
||||
logger,
|
||||
timeout=30,
|
||||
server_headers=False,
|
||||
headers={},
|
||||
):
|
||||
self.socket = None
|
||||
def __init__(self, proto_name, proto_version, logger, timeout=30):
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.proto_name = proto_name
|
||||
self.proto_version = proto_version
|
||||
self.logger = logger
|
||||
self.timeout = timeout
|
||||
self.needs_server_headers = server_headers
|
||||
self.server_headers = {}
|
||||
self.headers = headers
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
reader, writer = await asyncio.open_connection(address, port)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
return await asyncio.open_connection(address, port)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
@@ -84,86 +40,27 @@ class AsyncClient(object):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
reader, writer = await asyncio.open_unix_connection(sock=sock)
|
||||
return StreamConnection(reader, writer, self.timeout, self.max_chunk)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
try:
|
||||
version = tuple(
|
||||
int(v)
|
||||
for v in websockets.__version__.split(".")[
|
||||
0 : len(WEBSOCKETS_MIN_VERSION)
|
||||
]
|
||||
)
|
||||
except ValueError:
|
||||
raise ImportError(
|
||||
f"Unable to parse websockets version '{websockets.__version__}'"
|
||||
)
|
||||
|
||||
if version < WEBSOCKETS_MIN_VERSION:
|
||||
min_ver_str = ".".join(str(v) for v in WEBSOCKETS_MIN_VERSION)
|
||||
raise ImportError(
|
||||
f"Websockets version {websockets.__version__} is less than minimum required version {min_ver_str}"
|
||||
)
|
||||
|
||||
async def connect_sock():
|
||||
try:
|
||||
websocket = await websockets.connect(
|
||||
uri,
|
||||
ping_interval=None,
|
||||
open_timeout=self.timeout,
|
||||
)
|
||||
except asyncio.exceptions.TimeoutError:
|
||||
raise ConnectionError("Timeout while connecting to websocket")
|
||||
except (OSError, websockets.InvalidHandshake, websockets.InvalidURI) as exc:
|
||||
raise ConnectionError(f"Could not connect to websocket: {exc}") from exc
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
os.chdir(cwd)
|
||||
return await asyncio.open_unix_connection(sock=sock)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def setup_connection(self):
|
||||
# Send headers
|
||||
await self.socket.send("%s %s" % (self.proto_name, self.proto_version))
|
||||
await self.socket.send(
|
||||
"needs-headers: %s" % ("true" if self.needs_server_headers else "false")
|
||||
)
|
||||
for k, v in self.headers.items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
|
||||
# End of headers
|
||||
await self.socket.send("")
|
||||
|
||||
self.server_headers = {}
|
||||
if self.needs_server_headers:
|
||||
while True:
|
||||
line = await self.socket.recv()
|
||||
if not line:
|
||||
# End headers
|
||||
break
|
||||
tag, value = line.split(":", 1)
|
||||
self.server_headers[tag.lower()] = value.strip()
|
||||
|
||||
async def get_header(self, tag, default):
|
||||
await self.connect()
|
||||
return self.server_headers.get(tag, default)
|
||||
s = '%s %s\n\n' % (self.proto_name, self.proto_version)
|
||||
self.writer.write(s.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def connect(self):
|
||||
if self.socket is None:
|
||||
self.socket = await self._connect_sock()
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
await self.setup_connection()
|
||||
|
||||
async def disconnect(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
async def close(self):
|
||||
await self.disconnect()
|
||||
self.reader = None
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
@@ -174,7 +71,6 @@ class AsyncClient(object):
|
||||
except (
|
||||
OSError,
|
||||
ConnectionError,
|
||||
ConnectionClosedError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
@@ -186,27 +82,49 @@ class AsyncClient(object):
|
||||
await self.close()
|
||||
count += 1
|
||||
|
||||
def check_invoke_error(self, msg):
|
||||
if isinstance(msg, dict) and "invoke-error" in msg:
|
||||
raise InvokeError(msg["invoke-error"]["message"])
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for server")
|
||||
|
||||
if not line:
|
||||
raise ConnectionError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line
|
||||
|
||||
async def invoke(self, msg):
|
||||
async def proc():
|
||||
await self.socket.send_message(msg)
|
||||
return await self.socket.recv_message()
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
result = await self._send_wrapper(proc)
|
||||
self.check_invoke_error(result)
|
||||
return result
|
||||
l = await get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def ping(self):
|
||||
return await self.invoke({"ping": {}})
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
return await self.send_message(
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
|
||||
class Client(object):
|
||||
@@ -224,7 +142,7 @@ class Client(object):
|
||||
# required (but harmless) with it.
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self._add_methods("connect_tcp", "ping")
|
||||
self._add_methods('connect_tcp', 'ping')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_async_client(self):
|
||||
@@ -253,19 +171,8 @@ class Client(object):
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
|
||||
def disconnect(self):
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
self.loop.close()
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
from datetime import datetime
|
||||
from .exceptions import ClientError, ConnectionClosedError
|
||||
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield "".join((msg, "\n"))
|
||||
else:
|
||||
yield "".join((json.dumps({"chunk-stream": None}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map("".join, itertools.zip_longest(*args, fillvalue="")):
|
||||
yield "".join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
def json_serialize(obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
raise TypeError("Type %s not serializeable" % type(obj))
|
||||
|
||||
|
||||
class StreamConnection(object):
|
||||
def __init__(self, reader, writer, timeout, max_chunk=DEFAULT_MAX_CHUNK):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.timeout = timeout
|
||||
self.max_chunk = max_chunk
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self.writer.get_extra_info("peername")
|
||||
|
||||
async def send_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg, default=json_serialize), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv_message(self):
|
||||
l = await self.recv()
|
||||
|
||||
m = json.loads(l)
|
||||
if not m:
|
||||
return m
|
||||
|
||||
if "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = await self.recv()
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
async def send(self, msg):
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def recv(self):
|
||||
if self.timeout < 0:
|
||||
line = await self.reader.readline()
|
||||
else:
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
|
||||
if not line:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line.rstrip()
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
|
||||
class WebsocketConnection(object):
|
||||
def __init__(self, socket, timeout):
|
||||
self.socket = socket
|
||||
self.timeout = timeout
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return ":".join(str(s) for s in self.socket.remote_address)
|
||||
|
||||
async def send_message(self, msg):
|
||||
await self.send(json.dumps(msg, default=json_serialize))
|
||||
|
||||
async def recv_message(self):
|
||||
m = await self.recv()
|
||||
return json.loads(m)
|
||||
|
||||
async def send(self, msg):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
await self.socket.send(msg)
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def recv(self):
|
||||
import websockets.exceptions
|
||||
|
||||
try:
|
||||
if self.timeout < 0:
|
||||
return await self.socket.recv()
|
||||
|
||||
try:
|
||||
return await asyncio.wait_for(self.socket.recv(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for data")
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
raise ConnectionClosedError("Connection closed")
|
||||
|
||||
async def close(self):
|
||||
if self.socket is not None:
|
||||
await self.socket.close()
|
||||
self.socket = None
|
||||
@@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvokeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionClosedError(Exception):
|
||||
pass
|
||||
@@ -12,353 +12,241 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import multiprocessing
|
||||
import logging
|
||||
from .connection import StreamConnection, WebsocketConnection
|
||||
from .exceptions import ClientError, ServerError, ConnectionClosedError, InvokeError
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class ClientLoggerAdapter(logging.LoggerAdapter):
|
||||
def process(self, msg, kwargs):
|
||||
return f"[Client {self.extra['address']}] {msg}", kwargs
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncServerConnection(object):
|
||||
# If a handler returns this object (e.g. `return self.NO_RESPONSE`), no
|
||||
# return message will be automatically be sent back to the client
|
||||
NO_RESPONSE = object()
|
||||
|
||||
def __init__(self, socket, proto_name, logger):
|
||||
self.socket = socket
|
||||
def __init__(self, reader, writer, proto_name, logger):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.proto_name = proto_name
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.handlers = {
|
||||
"ping": self.handle_ping,
|
||||
'chunk-stream': self.handle_chunk,
|
||||
'ping': self.handle_ping,
|
||||
}
|
||||
self.logger = ClientLoggerAdapter(
|
||||
logger,
|
||||
{
|
||||
"address": socket.address,
|
||||
},
|
||||
)
|
||||
self.client_headers = {}
|
||||
|
||||
async def close(self):
|
||||
await self.socket.close()
|
||||
|
||||
async def handle_headers(self, headers):
|
||||
return {}
|
||||
self.logger = logger
|
||||
|
||||
async def process_requests(self):
|
||||
try:
|
||||
self.logger.info("Client %r connected" % (self.socket.address,))
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
self.logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
# Read protocol and version
|
||||
client_protocol = await self.socket.recv()
|
||||
client_protocol = await self.reader.readline()
|
||||
if not client_protocol:
|
||||
return
|
||||
|
||||
(client_proto_name, client_proto_version) = client_protocol.split()
|
||||
(client_proto_name, client_proto_version) = client_protocol.decode('utf-8').rstrip().split()
|
||||
if client_proto_name != self.proto_name:
|
||||
self.logger.debug("Rejecting invalid protocol %s" % (self.proto_name))
|
||||
self.logger.debug('Rejecting invalid protocol %s' % (self.proto_name))
|
||||
return
|
||||
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split("."))
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split('.'))
|
||||
if not self.validate_proto_version():
|
||||
self.logger.debug(
|
||||
"Rejecting invalid protocol version %s" % (client_proto_version)
|
||||
)
|
||||
self.logger.debug('Rejecting invalid protocol version %s' % (client_proto_version))
|
||||
return
|
||||
|
||||
# Read headers
|
||||
self.client_headers = {}
|
||||
# Read headers. Currently, no headers are implemented, so look for
|
||||
# an empty line to signal the end of the headers
|
||||
while True:
|
||||
header = await self.socket.recv()
|
||||
if not header:
|
||||
# Empty line. End of headers
|
||||
break
|
||||
tag, value = header.split(":", 1)
|
||||
self.client_headers[tag.lower()] = value.strip()
|
||||
line = await self.reader.readline()
|
||||
if not line:
|
||||
return
|
||||
|
||||
if self.client_headers.get("needs-headers", "false") == "true":
|
||||
for k, v in (await self.handle_headers(self.client_headers)).items():
|
||||
await self.socket.send("%s: %s" % (k, v))
|
||||
await self.socket.send("")
|
||||
line = line.decode('utf-8').rstrip()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Handle messages
|
||||
while True:
|
||||
d = await self.socket.recv_message()
|
||||
d = await self.read_message()
|
||||
if d is None:
|
||||
break
|
||||
try:
|
||||
response = await self.dispatch_message(d)
|
||||
except InvokeError as e:
|
||||
await self.socket.send_message(
|
||||
{"invoke-error": {"message": str(e)}}
|
||||
)
|
||||
break
|
||||
|
||||
if response is not self.NO_RESPONSE:
|
||||
await self.socket.send_message(response)
|
||||
|
||||
except ConnectionClosedError as e:
|
||||
self.logger.info(str(e))
|
||||
except (ClientError, ConnectionError) as e:
|
||||
await self.dispatch_message(d)
|
||||
await self.writer.drain()
|
||||
except ClientError as e:
|
||||
self.logger.error(str(e))
|
||||
finally:
|
||||
await self.close()
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug("Handling %s" % k)
|
||||
return await self.handlers[k](msg[k])
|
||||
self.logger.debug('Handling %s' % k)
|
||||
await self.handlers[k](msg[k])
|
||||
return
|
||||
|
||||
raise ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
async def handle_ping(self, request):
|
||||
return {"alive": True}
|
||||
def write_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode('utf-8'))
|
||||
|
||||
async def read_message(self):
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return None
|
||||
|
||||
class StreamServer(object):
|
||||
def __init__(self, handler, logger):
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.closed = False
|
||||
|
||||
async def handle_stream_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
socket = StreamConnection(reader, writer, -1)
|
||||
if self.closed:
|
||||
await socket.close()
|
||||
return
|
||||
|
||||
await self.handler(socket)
|
||||
|
||||
async def stop(self):
|
||||
self.closed = True
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(
|
||||
self.handle_stream_client,
|
||||
self.host,
|
||||
self.port,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
|
||||
class UnixStreamServer(StreamServer):
|
||||
def __init__(self, path, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.path = path
|
||||
|
||||
def start(self, loop):
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(self.path))
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_unix_server(
|
||||
self.handle_stream_client, os.path.basename(self.path)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
message = l.decode('utf-8')
|
||||
|
||||
self.logger.debug("Listening on %r" % self.path)
|
||||
self.address = "unix://%s" % os.path.abspath(self.path)
|
||||
return [self.server.wait_closed()]
|
||||
if not message.endswith('\n'):
|
||||
return None
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
self.server.close()
|
||||
return json.loads(message)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % message)
|
||||
raise e
|
||||
|
||||
def cleanup(self):
|
||||
os.unlink(self.path)
|
||||
async def handle_chunk(self, request):
|
||||
lines = []
|
||||
try:
|
||||
while True:
|
||||
l = await self.reader.readline()
|
||||
l = l.rstrip(b"\n").decode("utf-8")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
msg = json.loads(''.join(lines))
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % lines)
|
||||
raise e
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.reuseport = reuseport
|
||||
if 'chunk-stream' in msg:
|
||||
raise ClientError("Nested chunks are not allowed")
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
await self.dispatch_message(msg)
|
||||
|
||||
self.server = loop.run_until_complete(
|
||||
websockets.server.serve(
|
||||
self.client_handler,
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug("Listening on %r" % (s.getsockname(),))
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "ws://[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "ws://%s:%d" % (name[0], name[1])
|
||||
|
||||
return [self.server.wait_closed()]
|
||||
|
||||
async def stop(self):
|
||||
self.server.close()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
async def client_handler(self, websocket):
|
||||
socket = WebsocketConnection(websocket, -1)
|
||||
await self.handler(socket)
|
||||
async def handle_ping(self, request):
|
||||
response = {'alive': True}
|
||||
self.write_message(response)
|
||||
|
||||
|
||||
class AsyncServer(object):
|
||||
def __init__(self, logger):
|
||||
self._cleanup_socket = None
|
||||
self.logger = logger
|
||||
self.start = None
|
||||
self.address = None
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port, *, reuseport=False):
|
||||
self.server = TCPStreamServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_tcp_server(self, host, port):
|
||||
def start_tcp():
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug('Listening on %r' % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
# Enable keep alives. This prevents broken client connections
|
||||
# from persisting on the server for long periods of time.
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
|
||||
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
self.start = start_tcp
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
def cleanup():
|
||||
os.unlink(path)
|
||||
|
||||
def start_websocket_server(self, host, port, reuseport=False):
|
||||
self.server = WebsocketsServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_unix():
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
self.logger.debug('Listening on %r' % path)
|
||||
|
||||
self._cleanup_socket = cleanup
|
||||
self.address = "unix://%s" % os.path.abspath(path)
|
||||
|
||||
self.start = start_unix
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, reader, writer):
|
||||
pass
|
||||
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = self.accept_client(socket)
|
||||
client = self.accept_client(reader, writer)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
self.logger.error(
|
||||
"Error from client %s: %s" % (address, str(e)), exc_info=True
|
||||
)
|
||||
self.logger.error('Error from client: %s' % str(e), exc_info=True)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
self.logger.debug("Client %s disconnected", address)
|
||||
await socket.close()
|
||||
writer.close()
|
||||
self.logger.debug('Client disconnected')
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, socket):
|
||||
pass
|
||||
|
||||
async def stop(self):
|
||||
self.logger.debug("Stopping server")
|
||||
await self.server.stop()
|
||||
|
||||
def start(self):
|
||||
tasks = self.server.start(self.loop)
|
||||
self.address = self.server.address
|
||||
return tasks
|
||||
def run_loop_forever(self):
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def signal_handler(self):
|
||||
self.logger.debug("Got exit signal")
|
||||
self.loop.create_task(self.stop())
|
||||
self.loop.stop()
|
||||
|
||||
def _serve_forever(self, tasks):
|
||||
def _serve_forever(self):
|
||||
try:
|
||||
self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGINT, self.signal_handler)
|
||||
self.loop.add_signal_handler(signal.SIGQUIT, self.signal_handler)
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
|
||||
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
self.run_loop_forever()
|
||||
self.server.close()
|
||||
|
||||
self.logger.debug("Server shutting down")
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.logger.debug('Server shutting down')
|
||||
finally:
|
||||
self.server.cleanup()
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
def serve_forever(self):
|
||||
"""
|
||||
Serve requests in the current process
|
||||
"""
|
||||
self._create_loop()
|
||||
tasks = self.start()
|
||||
self._serve_forever(tasks)
|
||||
self.loop.close()
|
||||
|
||||
def _create_loop(self):
|
||||
# Create loop and override any loop that may have existed in
|
||||
# a parent process. It is possible that the usecases of
|
||||
# serve_forever might be constrained enough to allow using
|
||||
# get_event_loop here, but better safe than sorry for now.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.start()
|
||||
self._serve_forever()
|
||||
|
||||
def serve_as_process(self, *, prefunc=None, args=(), log_level=None):
|
||||
def serve_as_process(self, *, prefunc=None, args=()):
|
||||
"""
|
||||
Serve requests in a child process
|
||||
"""
|
||||
|
||||
def run(queue):
|
||||
# Create loop and override any loop that may have existed
|
||||
# in a parent process. Without doing this and instead
|
||||
@@ -371,24 +259,21 @@ class AsyncServer(object):
|
||||
# more general, though, as any potential use of asyncio in
|
||||
# Cooker could create a loop that needs to replaced in this
|
||||
# new process.
|
||||
self._create_loop()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
try:
|
||||
self.address = None
|
||||
tasks = self.start()
|
||||
self.start()
|
||||
finally:
|
||||
# Always put the server address to wake up the parent task
|
||||
queue.put(self.address)
|
||||
queue.close()
|
||||
|
||||
if prefunc is not None:
|
||||
prefunc(self, *args)
|
||||
|
||||
if log_level is not None:
|
||||
self.logger.setLevel(log_level)
|
||||
self._serve_forever()
|
||||
|
||||
self._serve_forever(tasks)
|
||||
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
|
||||
@@ -197,8 +197,6 @@ def exec_func(func, d, dirs = None):
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
if cdir == oldcwd:
|
||||
os.chdir(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
@@ -397,7 +395,7 @@ def create_progress_handler(func, progress, logfile, d):
|
||||
# Use specified regex
|
||||
return bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
|
||||
elif progress.startswith("custom:"):
|
||||
# Use a custom progress handler that was injected via other means
|
||||
# Use a custom progress handler that was injected via OE_EXTRA_IMPORTS or __builtins__
|
||||
import functools
|
||||
from types import ModuleType
|
||||
|
||||
@@ -743,7 +741,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
if quieterr:
|
||||
if not handled:
|
||||
logger.warning(str(exc))
|
||||
logger.warning(repr(exc))
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
@@ -752,7 +750,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
if verboseStdoutLogging or handled:
|
||||
errprinted = True
|
||||
if not handled:
|
||||
logger.error(str(exc))
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
|
||||
@@ -932,13 +930,9 @@ def add_tasks(tasklist, d):
|
||||
# don't assume holding a reference
|
||||
d.setVar('_task_deps', task_deps)
|
||||
|
||||
def ensure_task_prefix(name):
|
||||
if name[:3] != "do_":
|
||||
name = "do_" + name
|
||||
return name
|
||||
|
||||
def addtask(task, before, after, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
d.setVarFlag(task, "task", 1)
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
@@ -950,20 +944,19 @@ def addtask(task, before, after, d):
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
for entry in after.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
if entry not in existing:
|
||||
existing.append(entry)
|
||||
d.setVarFlag(task, "deps", existing)
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in before.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
existing = d.getVarFlag(entry, "deps", False) or []
|
||||
if task not in existing:
|
||||
d.setVarFlag(entry, "deps", [task] + existing)
|
||||
|
||||
def deltask(task, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task in bbtasks:
|
||||
@@ -1028,9 +1021,3 @@ def tasksbetween(task_start, task_end, d):
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
def listtasks(d):
|
||||
"""
|
||||
Return the list of tasks in the current recipe.
|
||||
"""
|
||||
return tuple(d.getVar('__BBTASKS', False) or ())
|
||||
|
||||
@@ -28,7 +28,7 @@ import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "156"
|
||||
__cache_version__ = "155"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -344,7 +344,9 @@ def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
(_, mc, virtualfn) = virtualfn.split(':', 2)
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
@@ -367,7 +369,7 @@ def realfn2virtual(realfn, cls, mc):
|
||||
|
||||
def variant2virtual(realfn, variant):
|
||||
"""
|
||||
Convert a real filename + a variant to a virtual filename
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
@@ -395,7 +397,7 @@ class Cache(object):
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.mc = mc
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else ''), logger)
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else "default"), logger)
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = self.data.getVar("CACHE")
|
||||
self.clean = set()
|
||||
@@ -441,7 +443,7 @@ class Cache(object):
|
||||
else:
|
||||
symlink = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
if os.path.exists(symlink) or os.path.islink(symlink):
|
||||
if os.path.exists(symlink):
|
||||
bb.utils.remove(symlink)
|
||||
try:
|
||||
os.symlink(os.path.basename(self.cachefile), symlink)
|
||||
@@ -512,11 +514,11 @@ class Cache(object):
|
||||
|
||||
return len(self.depends_cache)
|
||||
|
||||
def parse(self, filename, appends, layername):
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc, layername=layername)
|
||||
datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
variants = []
|
||||
# Process the "real" fn last so we can store variants list
|
||||
@@ -779,6 +781,25 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
||||
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
file we need to track:
|
||||
|
||||
* Its mtime
|
||||
* The mtimes of all its dependencies
|
||||
* Whether it caused a parse.SkipRecipe exception
|
||||
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
"""
|
||||
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
|
||||
|
||||
|
||||
class CacheData(object):
|
||||
"""
|
||||
@@ -847,16 +868,6 @@ class MultiProcessCache(object):
|
||||
data = [{}]
|
||||
return data
|
||||
|
||||
def clear_cache(self):
|
||||
if not self.cachefile:
|
||||
bb.fatal("Can't clear invalid cachefile")
|
||||
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
with bb.utils.fileslocked([self.cachefile + ".lock"]):
|
||||
bb.utils.remove(self.cachefile)
|
||||
bb.utils.remove(self.cachefile + "-*")
|
||||
|
||||
def save_extras(self):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
@@ -142,28 +142,3 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
checksums.sort(key=operator.itemgetter(1))
|
||||
return checksums
|
||||
|
||||
class RevisionsCache(MultiProcessCache):
|
||||
cache_file_name = "local_srcrevisions.dat"
|
||||
CACHE_VERSION = 1
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
|
||||
def get_revs(self):
|
||||
return self.cachedata[0]
|
||||
|
||||
def get_rev(self, k):
|
||||
if k in self.cachedata_extras[0]:
|
||||
return self.cachedata_extras[0][k]
|
||||
if k in self.cachedata[0]:
|
||||
return self.cachedata[0][k]
|
||||
return None
|
||||
|
||||
def set_rev(self, k, v):
|
||||
self.cachedata[0][k] = v
|
||||
self.cachedata_extras[0][k] = v
|
||||
|
||||
def merge_data(self, source, dest):
|
||||
for h in source[0]:
|
||||
dest[0][h] = source[0][h]
|
||||
|
||||
@@ -62,7 +62,6 @@ def check_indent(codestr):
|
||||
modulecode_deps = {}
|
||||
|
||||
def add_module_functions(fn, functions, namespace):
|
||||
import os
|
||||
fstat = os.stat(fn)
|
||||
fixedhash = fn + ":" + str(fstat.st_size) + ":" + str(fstat.st_mtime)
|
||||
for f in functions:
|
||||
@@ -72,20 +71,7 @@ def add_module_functions(fn, functions, namespace):
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
try:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
except TypeError:
|
||||
# Builtin
|
||||
continue
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
try:
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
except TypeError:
|
||||
# Builtin
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
#bb.warn("Not cached %s" % f)
|
||||
@@ -95,17 +81,14 @@ def add_module_functions(fn, functions, namespace):
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
visitorcode = None
|
||||
if hasattr(functions[f], 'visitorcode'):
|
||||
visitorcode = getattr(functions[f], "visitorcode")
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra, visitorcode]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy()]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, src, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4], modulecode_deps[mod][5]]
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
@@ -128,22 +111,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -172,7 +154,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 14
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -186,8 +168,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -272,28 +254,20 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and name in modulecode_deps and modulecode_deps[name][5]:
|
||||
visitorcode = modulecode_deps[name][5]
|
||||
contains, execs, warn = visitorcode(name, node.args)
|
||||
for i in contains:
|
||||
self.contains[i] = contains[i]
|
||||
self.execs |= execs
|
||||
if warn:
|
||||
self.warn(node.func, warn)
|
||||
elif name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].value)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant):
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].value.split())
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Constant):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].value))
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
@@ -301,8 +275,8 @@ class PythonParser():
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
value = node.args[0].value
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
@@ -312,8 +286,8 @@ class PythonParser():
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
if isinstance(node.args[0], ast.Constant):
|
||||
self.var_execs.add(node.args[0].value)
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
|
||||
@@ -363,7 +337,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -372,7 +345,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
@@ -391,11 +363,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
@@ -514,34 +483,19 @@ class ShellParser():
|
||||
"""
|
||||
|
||||
words = list(words)
|
||||
for word in words:
|
||||
for word in list(words):
|
||||
wtree = pyshlex.make_wordtree(word[1])
|
||||
for part in wtree:
|
||||
if not isinstance(part, list):
|
||||
continue
|
||||
|
||||
candidates = [part]
|
||||
if part[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(part[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
# If command is of type:
|
||||
#
|
||||
# var="... $(cmd [...]) ..."
|
||||
#
|
||||
# Then iterate on what's between the quotes and if we find a
|
||||
# list, make that what we check for below.
|
||||
if len(part) >= 3 and part[0] == '"':
|
||||
for p in part[1:-1]:
|
||||
if isinstance(p, list):
|
||||
candidates.append(p)
|
||||
|
||||
for candidate in candidates:
|
||||
if len(candidate) >= 2:
|
||||
if candidate[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(candidate[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
|
||||
usetoken = False
|
||||
for word in words:
|
||||
|
||||
@@ -24,7 +24,6 @@ import io
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
import bb.parse
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
@@ -66,7 +65,7 @@ class Command:
|
||||
command = commandline.pop(0)
|
||||
|
||||
# Ensure cooker is ready for commands
|
||||
if command not in ["updateConfig", "setFeatures", "ping"]:
|
||||
if command != "updateConfig" and command != "setFeatures":
|
||||
try:
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
@@ -86,6 +85,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates_apply()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
@@ -109,7 +109,8 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self, _, process_server, halt):
|
||||
try:
|
||||
if self.cooker.state in (bb.cooker.State.ERROR, bb.cooker.State.SHUTDOWN, bb.cooker.State.FORCE_SHUTDOWN):
|
||||
self.cooker.process_inotify_updates_apply()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
self.cooker.updateCache()
|
||||
@@ -119,7 +120,7 @@ class Command:
|
||||
(command, options) = cmd
|
||||
commandmethod = getattr(CommandsAsync, command)
|
||||
needcache = getattr( commandmethod, "needcache" )
|
||||
if needcache and self.cooker.state != bb.cooker.State.RUNNING:
|
||||
if needcache and self.cooker.state != bb.cooker.state.running:
|
||||
self.cooker.updateCache()
|
||||
return True
|
||||
else:
|
||||
@@ -143,14 +144,14 @@ class Command:
|
||||
return bb.server.process.idleFinish(traceback.format_exc())
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
|
||||
def reset(self):
|
||||
if self.remotedatastores:
|
||||
@@ -168,8 +169,6 @@ class CommandsSync:
|
||||
Allow a UI to check the server is still alive
|
||||
"""
|
||||
return "Still alive!"
|
||||
ping.needconfig = False
|
||||
ping.readonly = True
|
||||
|
||||
def stateShutdown(self, command, params):
|
||||
"""
|
||||
@@ -308,11 +307,6 @@ class CommandsSync:
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def revalidateCaches(self, command, params):
|
||||
"""Called by UI clients when metadata may have changed"""
|
||||
command.cooker.revalidateCaches()
|
||||
revalidateCaches.needconfig = False
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
@@ -421,30 +415,15 @@ class CommandsSync:
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
"""
|
||||
Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`).
|
||||
|
||||
Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes`
|
||||
|
||||
:param command: Internally used parameter.
|
||||
:param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed.
|
||||
:return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage`
|
||||
"""
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
def sortkey(x):
|
||||
vfn, _ = x
|
||||
realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn
|
||||
realfn, _, mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return (-command.cooker.collections[mc].calc_bbfile_priority(realfn)[0], vfn)
|
||||
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey))
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), key=sortkey))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
@@ -566,8 +545,8 @@ class CommandsSync:
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
virtualfn = params[0]
|
||||
(fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
fn = params[0]
|
||||
mc = bb.runqueue.mc_from_tid(fn)
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
@@ -582,7 +561,6 @@ class CommandsSync:
|
||||
appendfiles = command.cooker.collections[mc].get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
layername = command.cooker.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
@@ -590,21 +568,14 @@ class CommandsSync:
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc, layername)[cls]
|
||||
envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
envdata = command.cooker.databuilder.parseRecipe(virtualfn, appendfiles, layername)
|
||||
envdata = command.cooker.databuilder.parseRecipe(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
def finalizeData(self, command, params):
|
||||
newdata = command.cooker.data.createCopy()
|
||||
bb.data.expandKeys(newdata)
|
||||
bb.parse.ast.runAnonFuncs(newdata)
|
||||
idx = command.remotedatastores.store(newdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
@@ -800,14 +771,7 @@ class CommandsAsync:
|
||||
(mc, pn) = bb.runqueue.split_mc(params[0])
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
bb.siggen.check_siggen_version(bb.siggen)
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc])
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc])
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
def getTaskSignatures(self, command, params):
|
||||
res = command.cooker.getTaskSignatures(params[0], params[1])
|
||||
bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
getTaskSignatures.needcache = True
|
||||
|
||||
@@ -13,7 +13,7 @@ def open(*args, **kwargs):
|
||||
|
||||
class LZ4File(bb.compress._pipecompress.PipeFile):
|
||||
def get_compress(self):
|
||||
return ["lz4", "-z", "-c"]
|
||||
return ["lz4c", "-z", "-c"]
|
||||
|
||||
def get_decompress(self):
|
||||
return ["lz4", "-d", "-c"]
|
||||
return ["lz4c", "-d", "-c"]
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
import enum
|
||||
|
||||
import sys, os, glob, os.path, re, time
|
||||
import itertools
|
||||
import logging
|
||||
@@ -17,11 +17,12 @@ import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
from collections import defaultdict, namedtuple
|
||||
import bb, bb.command
|
||||
import bb, bb.exceptions, bb.command
|
||||
from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build
|
||||
import queue
|
||||
import signal
|
||||
import prserv.serv
|
||||
import pyinotify
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
@@ -48,15 +49,16 @@ class CollectionError(bb.BBHandledException):
|
||||
Exception raised when layer configuration is incorrect
|
||||
"""
|
||||
|
||||
class state:
|
||||
initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7))
|
||||
|
||||
class State(enum.Enum):
|
||||
INITIAL = 0,
|
||||
PARSING = 1,
|
||||
RUNNING = 2,
|
||||
SHUTDOWN = 3,
|
||||
FORCE_SHUTDOWN = 4,
|
||||
STOPPED = 5,
|
||||
ERROR = 6
|
||||
@classmethod
|
||||
def get_name(cls, code):
|
||||
for name in dir(cls):
|
||||
value = getattr(cls, name)
|
||||
if type(value) == type(cls.initial) and value == code:
|
||||
return name
|
||||
raise ValueError("Invalid status code: %s" % code)
|
||||
|
||||
|
||||
class SkippedPackage:
|
||||
@@ -101,15 +103,12 @@ class CookerFeatures(object):
|
||||
|
||||
class EventWriter:
|
||||
def __init__(self, cooker, eventfile):
|
||||
self.file_inited = None
|
||||
self.cooker = cooker
|
||||
self.eventfile = eventfile
|
||||
self.event_queue = []
|
||||
|
||||
def write_variables(self):
|
||||
with open(self.eventfile, "a") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
def send(self, event):
|
||||
def write_event(self, event):
|
||||
with open(self.eventfile, "a") as f:
|
||||
try:
|
||||
str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8')
|
||||
@@ -119,6 +118,28 @@ class EventWriter:
|
||||
import traceback
|
||||
print(err, traceback.format_exc())
|
||||
|
||||
def send(self, event):
|
||||
if self.file_inited:
|
||||
# we have the file, just write the event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# init on bb.event.BuildStarted
|
||||
name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"):
|
||||
with open(self.eventfile, "w") as f:
|
||||
f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
|
||||
|
||||
self.file_inited = True
|
||||
|
||||
# write pending events
|
||||
for evt in self.event_queue:
|
||||
self.write_event(evt)
|
||||
|
||||
# also write the current event
|
||||
self.write_event(event)
|
||||
else:
|
||||
# queue all events until the file is inited
|
||||
self.event_queue.append(event)
|
||||
|
||||
#============================================================================#
|
||||
# BBCooker
|
||||
@@ -130,11 +151,8 @@ class BBCooker:
|
||||
|
||||
def __init__(self, featureSet=None, server=None):
|
||||
self.recipecaches = None
|
||||
self.baseconfig_valid = False
|
||||
self.parsecache_valid = False
|
||||
self.eventlog = None
|
||||
# The skiplists, one per multiconfig
|
||||
self.skiplist_by_mc = defaultdict(dict)
|
||||
self.skiplist = {}
|
||||
self.featureset = CookerFeatures()
|
||||
if featureSet:
|
||||
for f in featureSet:
|
||||
@@ -153,9 +171,17 @@ class BBCooker:
|
||||
self.waitIdle = server.wait_for_idle
|
||||
|
||||
bb.debug(1, "BBCooker starting %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatched = {}
|
||||
self.parsewatched = {}
|
||||
self.configwatcher = None
|
||||
self.confignotifier = None
|
||||
|
||||
self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
|
||||
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
|
||||
|
||||
self.watcher = None
|
||||
self.notifier = None
|
||||
|
||||
# If being called by something like tinfoil, we need to clean cached data
|
||||
# which may now be invalid
|
||||
@@ -166,6 +192,8 @@ class BBCooker:
|
||||
self.hashserv = None
|
||||
self.hashservaddr = None
|
||||
|
||||
self.inotify_modified_files = []
|
||||
|
||||
# TOSTOP must not be set or our children will hang when they output
|
||||
try:
|
||||
fd = sys.stdout.fileno()
|
||||
@@ -180,7 +208,7 @@ class BBCooker:
|
||||
pass
|
||||
|
||||
self.command = bb.command.Command(self, self.process_server)
|
||||
self.state = State.INITIAL
|
||||
self.state = state.initial
|
||||
|
||||
self.parser = None
|
||||
|
||||
@@ -189,59 +217,158 @@ class BBCooker:
|
||||
signal.signal(signal.SIGHUP, self.sigterm_exception)
|
||||
|
||||
bb.debug(1, "BBCooker startup complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.inotify_threadlock = threading.Lock()
|
||||
|
||||
def init_configdata(self):
|
||||
if not hasattr(self, "data"):
|
||||
self.initConfigurationData()
|
||||
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.handlePRServ()
|
||||
|
||||
def _baseconfig_set(self, value):
|
||||
if value and not self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config valid")
|
||||
elif not value and self.baseconfig_valid:
|
||||
bb.server.process.serverlog("Base config invalidated")
|
||||
self.baseconfig_valid = value
|
||||
def setupConfigWatcher(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
if self.configwatcher:
|
||||
self.configwatcher.close()
|
||||
self.confignotifier = None
|
||||
self.configwatcher = None
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
self.configwatcher.bbseen = set()
|
||||
self.configwatcher.bbwatchedfiles = set()
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
|
||||
def _parsecache_set(self, value):
|
||||
if value and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache valid")
|
||||
elif not value and self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parse cache invalidated")
|
||||
self.parsecache_valid = value
|
||||
def setupParserWatcher(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
if self.watcher:
|
||||
self.watcher.close()
|
||||
self.notifier = None
|
||||
self.watcher = None
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
self.watcher.bbseen = set()
|
||||
self.watcher.bbwatchedfiles = set()
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
def add_filewatch(self, deps, configwatcher=False):
|
||||
if configwatcher:
|
||||
watcher = self.configwatched
|
||||
else:
|
||||
watcher = self.parsewatched
|
||||
def process_inotify_updates(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
# read notified events and enqueue them
|
||||
n.read_events()
|
||||
|
||||
def process_inotify_updates_apply(self):
|
||||
with bb.utils.lock_timeout(self.inotify_threadlock):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n and n.check_events(timeout=0):
|
||||
n.read_events()
|
||||
n.process_events()
|
||||
|
||||
def config_notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
self.baseconfig_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if not event.pathname in self.configwatcher.bbwatchedfiles:
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.configwatcher.bbseen:
|
||||
self.configwatcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.baseconfig_valid = False
|
||||
|
||||
def notifications(self, event):
|
||||
if event.maskname == "IN_Q_OVERFLOW":
|
||||
bb.warn("inotify event queue overflowed, invalidating caches.")
|
||||
self.parsecache_valid = False
|
||||
bb.parse.clear_cache()
|
||||
return
|
||||
if event.pathname.endswith("bitbake-cookerdaemon.log") \
|
||||
or event.pathname.endswith("bitbake.lock"):
|
||||
return
|
||||
if "IN_ISDIR" in event.maskname:
|
||||
if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
|
||||
if event.pathname in self.watcher.bbseen:
|
||||
self.watcher.bbseen.remove(event.pathname)
|
||||
# Could remove all entries starting with the directory but for now...
|
||||
bb.parse.clear_cache()
|
||||
if not event.pathname in self.inotify_modified_files:
|
||||
self.inotify_modified_files.append(event.pathname)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def add_filewatch(self, deps, watcher=None, dirs=False):
|
||||
if not watcher:
|
||||
watcher = self.watcher
|
||||
for i in deps:
|
||||
f = i[0]
|
||||
mtime = i[1]
|
||||
watcher[f] = mtime
|
||||
watcher.bbwatchedfiles.add(i[0])
|
||||
if dirs:
|
||||
f = i[0]
|
||||
else:
|
||||
f = os.path.dirname(i[0])
|
||||
if f in watcher.bbseen:
|
||||
continue
|
||||
watcher.bbseen.add(f)
|
||||
watchtarget = None
|
||||
while True:
|
||||
# We try and add watches for files that don't exist but if they did, would influence
|
||||
# the parser. The parent directory of these files may not exist, in which case we need
|
||||
# to watch any parent that does exist for changes.
|
||||
try:
|
||||
watcher.add_watch(f, self.watchmask, quiet=False)
|
||||
if watchtarget:
|
||||
watcher.bbwatchedfiles.add(watchtarget)
|
||||
break
|
||||
except pyinotify.WatchManagerError as e:
|
||||
if 'ENOENT' in str(e):
|
||||
watchtarget = f
|
||||
f = os.path.dirname(f)
|
||||
if f in watcher.bbseen:
|
||||
break
|
||||
watcher.bbseen.add(f)
|
||||
continue
|
||||
if 'ENOSPC' in str(e):
|
||||
providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
|
||||
providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
|
||||
providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
|
||||
providerlog.error("Root privilege is required to modify max_user_watches.")
|
||||
raise
|
||||
|
||||
def handle_inotify_updates(self):
|
||||
# reload files for which we got notifications
|
||||
for p in self.inotify_modified_files:
|
||||
bb.parse.update_cache(p)
|
||||
if p in bb.parse.BBHandler.cached_statements:
|
||||
del bb.parse.BBHandler.cached_statements[p]
|
||||
self.inotify_modified_files = []
|
||||
|
||||
def sigterm_exception(self, signum, stackframe):
|
||||
if signum == signal.SIGTERM:
|
||||
bb.warn("Cooker received SIGTERM, shutting down...")
|
||||
elif signum == signal.SIGHUP:
|
||||
bb.warn("Cooker received SIGHUP, shutting down...")
|
||||
self.state = State.FORCE_SHUTDOWN
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
|
||||
def setFeatures(self, features):
|
||||
# we only accept a new feature set if we're in state initial, so we can reset without problems
|
||||
if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]:
|
||||
if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]:
|
||||
raise Exception("Illegal state for feature set change")
|
||||
original_featureset = list(self.featureset)
|
||||
for feature in features:
|
||||
self.featureset.setFeature(feature)
|
||||
bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset)))
|
||||
if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"):
|
||||
if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"):
|
||||
self.reset()
|
||||
|
||||
def initConfigurationData(self):
|
||||
self.state = State.INITIAL
|
||||
|
||||
self.state = state.initial
|
||||
self.caches_array = []
|
||||
|
||||
sys.path = self.orig_syspath.copy()
|
||||
@@ -249,7 +376,8 @@ class BBCooker:
|
||||
if mod not in self.orig_sysmodules:
|
||||
del sys.modules[mod]
|
||||
|
||||
self.configwatched = {}
|
||||
self.handle_inotify_updates()
|
||||
self.setupConfigWatcher()
|
||||
|
||||
# Need to preserve BB_CONSOLELOG over resets
|
||||
consolelog = None
|
||||
@@ -280,12 +408,9 @@ class BBCooker:
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False)
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
self.extraconfigdata = {}
|
||||
|
||||
eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
|
||||
if not self.configuration.writeeventlog and eventlog:
|
||||
self.setupEventLog(eventlog)
|
||||
|
||||
if consolelog:
|
||||
self.data.setVar("BB_CONSOLELOG", consolelog)
|
||||
|
||||
@@ -295,10 +420,10 @@ class BBCooker:
|
||||
self.disableDataTracking()
|
||||
|
||||
for mc in self.databuilder.mcdata.values():
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True)
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
|
||||
|
||||
self._baseconfig_set(True)
|
||||
self._parsecache_set(False)
|
||||
self.baseconfig_valid = True
|
||||
self.parsecache_valid = False
|
||||
|
||||
def handlePRServ(self):
|
||||
# Setup a PR Server based on the new configuration
|
||||
@@ -313,19 +438,13 @@ class BBCooker:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
with hashserv.create_client(upstream) as client:
|
||||
client.ping()
|
||||
except ImportError as e:
|
||||
bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module:
|
||||
%s
|
||||
Please install the needed module on the build host, or use an environment containing it (e.g a pip venv or OpenEmbedded's buildtools tarball).
|
||||
You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts."""
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
except ConnectionError as e:
|
||||
bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s"
|
||||
% (upstream, repr(e)))
|
||||
upstream = None
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
@@ -334,7 +453,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
sync=False,
|
||||
upstream=upstream,
|
||||
)
|
||||
self.hashserv.serve_as_process(log_level=logging.WARNING)
|
||||
self.hashserv.serve_as_process()
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
@@ -351,34 +470,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if hasattr(self, "data"):
|
||||
self.data.disableTracking()
|
||||
|
||||
def revalidateCaches(self):
|
||||
bb.parse.clear_cache()
|
||||
|
||||
clean = True
|
||||
for f in self.configwatched:
|
||||
if not bb.parse.check_mtime(f, self.configwatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._baseconfig_set(False)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if clean:
|
||||
for f in self.parsewatched:
|
||||
if not bb.parse.check_mtime(f, self.parsewatched[f]):
|
||||
bb.server.process.serverlog("Found %s changed, invalid cache" % f)
|
||||
self._parsecache_set(False)
|
||||
clean = False
|
||||
break
|
||||
|
||||
if not clean:
|
||||
bb.parse.BBHandler.cached_statements = {}
|
||||
|
||||
# If writes were made to any of the data stores, we need to recalculate the data
|
||||
# store cache
|
||||
if hasattr(self, "databuilder"):
|
||||
self.databuilder.calc_datastore_hashes()
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
@@ -397,24 +488,8 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
|
||||
|
||||
self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS"))
|
||||
self.collections = {}
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
|
||||
self._parsecache_set(False)
|
||||
|
||||
def setupEventLog(self, eventlog):
|
||||
if self.eventlog and self.eventlog[0] != eventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
self.eventlog = None
|
||||
if not self.eventlog or self.eventlog[0] != eventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
if not os.path.exists(os.path.dirname(eventlog)):
|
||||
bb.utils.mkdirhier(os.path.dirname(eventlog))
|
||||
writer = EventWriter(self, eventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer)
|
||||
self.parsecache_valid = False
|
||||
|
||||
def updateConfigOpts(self, options, environment, cmdline):
|
||||
self.ui_cmdline = cmdline
|
||||
@@ -435,7 +510,14 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
setattr(self.configuration, o, options[o])
|
||||
|
||||
if self.configuration.writeeventlog:
|
||||
self.setupEventLog(self.configuration.writeeventlog)
|
||||
if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog:
|
||||
bb.event.unregister_UIHhandler(self.eventlog[1])
|
||||
if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog:
|
||||
# we log all events to a file if so directed
|
||||
# register the log file writer as UI Handler
|
||||
writer = EventWriter(self, self.configuration.writeeventlog)
|
||||
EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
|
||||
self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)))
|
||||
|
||||
bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel
|
||||
bb.msg.loggerDefaultDomains = self.configuration.debug_domains
|
||||
@@ -465,7 +547,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
# Now update all the variables not in the datastore to match
|
||||
self.configuration.env = environment
|
||||
|
||||
self.revalidateCaches()
|
||||
if not clean:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
@@ -543,14 +624,13 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
if fn:
|
||||
try:
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn))
|
||||
except Exception as e:
|
||||
parselog.exception("Unable to read %s", fn)
|
||||
raise
|
||||
else:
|
||||
if not mc in self.databuilder.mcdata:
|
||||
bb.fatal('No multiconfig named "%s" found' % mc)
|
||||
bb.fatal('Not multiconfig named "%s" found' % mc)
|
||||
envdata = self.databuilder.mcdata[mc]
|
||||
data.expandKeys(envdata)
|
||||
parse.ast.runAnonFuncs(envdata)
|
||||
@@ -621,8 +701,8 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
localdata = {}
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete)
|
||||
localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc])
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
|
||||
bb.data.expandKeys(localdata[mc])
|
||||
|
||||
current = 0
|
||||
@@ -689,14 +769,14 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
|
||||
return taskdata, runlist
|
||||
|
||||
def prepareTreeData(self, pkgs_to_build, task, halt=False):
|
||||
def prepareTreeData(self, pkgs_to_build, task):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
|
||||
# We set halt to False here to prevent unbuildable targets raising
|
||||
# an exception when we're just generating data
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True)
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
|
||||
|
||||
return runlist, taskdata
|
||||
|
||||
@@ -710,7 +790,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if not task.startswith("do_"):
|
||||
task = "do_%s" % task
|
||||
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True)
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
return self.buildDependTree(rq, taskdata)
|
||||
@@ -905,11 +985,10 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
|
||||
|
||||
pns = depgraph["pn"].keys()
|
||||
if pns:
|
||||
with open('pn-buildlist', 'w') as f:
|
||||
f.write("%s\n" % "\n".join(sorted(pns)))
|
||||
logger.info("PN build list saved to 'pn-buildlist'")
|
||||
with open('pn-buildlist', 'w') as f:
|
||||
for pn in depgraph["pn"]:
|
||||
f.write(pn + "\n")
|
||||
logger.info("PN build list saved to 'pn-buildlist'")
|
||||
|
||||
# Remove old format output files to ensure no confusion with stale data
|
||||
try:
|
||||
@@ -943,7 +1022,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
for mc in self.multiconfigs:
|
||||
# First get list of recipes, including skipped
|
||||
recipefns = list(self.recipecaches[mc].pkg_fn.keys())
|
||||
recipefns.extend(self.skiplist_by_mc[mc].keys())
|
||||
recipefns.extend(self.skiplist.keys())
|
||||
|
||||
# Work out list of bbappends that have been applied
|
||||
applied_appends = []
|
||||
@@ -962,7 +1041,13 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
'\n '.join(appends_without_recipes[mc])))
|
||||
|
||||
if msgs:
|
||||
bb.fatal("\n".join(msgs))
|
||||
msg = "\n".join(msgs)
|
||||
warn_only = self.databuilder.mcdata[mc].getVar("BB_DANGLINGAPPENDS_WARNONLY", \
|
||||
False) or "no"
|
||||
if warn_only.lower() in ("1", "yes", "true"):
|
||||
bb.warn(msg)
|
||||
else:
|
||||
bb.fatal(msg)
|
||||
|
||||
def handlePrefProviders(self):
|
||||
|
||||
@@ -1277,8 +1362,8 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if bf.startswith("/") or bf.startswith("../"):
|
||||
bf = os.path.abspath(bf)
|
||||
|
||||
collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
self.collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
|
||||
filelist, masked, searchdirs = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
try:
|
||||
os.stat(bf)
|
||||
bf = os.path.abspath(bf)
|
||||
@@ -1342,10 +1427,9 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.buildSetVars()
|
||||
self.reset_mtime_caches()
|
||||
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array)
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn))
|
||||
infos = dict(infos)
|
||||
|
||||
fn = bb.cache.realfn2virtual(fn, cls, mc)
|
||||
@@ -1390,8 +1474,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME")
|
||||
if fireevents:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
# Execute the runqueue
|
||||
@@ -1403,11 +1485,11 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == State.FORCE_SHUTDOWN:
|
||||
if halt or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
elif self.state == State.SHUTDOWN:
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
msg = "Stopped build"
|
||||
interrupted = 1
|
||||
@@ -1427,7 +1509,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc])
|
||||
bb.event.disable_heartbeat()
|
||||
# We trashed self.recipecaches above
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
self.configuration.limited_deps = False
|
||||
bb.parse.siggen.reset(self.data)
|
||||
if quietlog:
|
||||
@@ -1439,36 +1521,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
self.idleCallBackRegister(buildFileIdle, rq)
|
||||
|
||||
def getTaskSignatures(self, target, tasks):
|
||||
sig = []
|
||||
getAllTaskSignatures = False
|
||||
|
||||
if not tasks:
|
||||
tasks = ["do_build"]
|
||||
getAllTaskSignatures = True
|
||||
|
||||
for task in tasks:
|
||||
taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
|
||||
for l in runlist:
|
||||
mc, pn, taskname, fn = l
|
||||
|
||||
taskdep = rq.rqdata.dataCaches[mc].task_deps[fn]
|
||||
for t in taskdep['tasks']:
|
||||
if t in taskdep['nostamp'] or "setscene" in t:
|
||||
continue
|
||||
tid = bb.runqueue.build_tid(mc, fn, t)
|
||||
|
||||
if t in task or getAllTaskSignatures:
|
||||
try:
|
||||
sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
|
||||
except KeyError:
|
||||
sig.append(self.getTaskSignatures(target, [t])[0])
|
||||
|
||||
return sig
|
||||
|
||||
def buildTargets(self, targets, task):
|
||||
"""
|
||||
Attempt to build the targets specified
|
||||
@@ -1477,12 +1529,12 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
def buildTargetsIdle(server, rq, halt):
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == State.FORCE_SHUTDOWN:
|
||||
if halt or self.state == state.forceshutdown:
|
||||
bb.event._should_exit.set()
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
elif self.state == State.SHUTDOWN:
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
msg = "Stopped build"
|
||||
interrupted = 1
|
||||
@@ -1534,8 +1586,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc])
|
||||
if self.eventlog:
|
||||
self.eventlog[2].write_variables()
|
||||
bb.event.enable_heartbeat()
|
||||
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
@@ -1546,13 +1596,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
|
||||
def getAllKeysWithFlags(self, flaglist):
|
||||
def dummy_autorev(d):
|
||||
return
|
||||
|
||||
dump = {}
|
||||
# Horrible but for now we need to avoid any sideeffects of autorev being called
|
||||
saved = bb.fetch2.get_autorev
|
||||
bb.fetch2.get_autorev = dummy_autorev
|
||||
for k in self.data.keys():
|
||||
try:
|
||||
expand = True
|
||||
@@ -1572,14 +1616,15 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
dump[k][d] = None
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bb.fetch2.get_autorev = saved
|
||||
return dump
|
||||
|
||||
|
||||
def updateCacheSync(self):
|
||||
if self.state == State.RUNNING:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
self.handle_inotify_updates()
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
logger.debug("Reloading base configuration data")
|
||||
self.initConfigurationData()
|
||||
@@ -1587,21 +1632,20 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
# This is called for all async commands when self.state != running
|
||||
def updateCache(self):
|
||||
if self.state == State.RUNNING:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR):
|
||||
if self.state in (state.shutdown, state.forceshutdown, state.error):
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.final_cleanup()
|
||||
raise bb.BBHandledException()
|
||||
|
||||
if self.state != State.PARSING:
|
||||
if self.state != state.parsing:
|
||||
self.updateCacheSync()
|
||||
|
||||
if self.state != State.PARSING and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parsing started")
|
||||
self.parsewatched = {}
|
||||
if self.state != state.parsing and not self.parsecache_valid:
|
||||
self.setupParserWatcher()
|
||||
|
||||
bb.parse.siggen.reset(self.data)
|
||||
self.parseConfiguration ()
|
||||
@@ -1616,27 +1660,29 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
for dep in self.configuration.extra_assume_provided:
|
||||
self.recipecaches[mc].ignored_dependencies.add(dep)
|
||||
|
||||
self.collections = {}
|
||||
|
||||
mcfilelist = {}
|
||||
total_masked = 0
|
||||
searchdirs = set()
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
|
||||
(filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
|
||||
|
||||
mcfilelist[mc] = filelist
|
||||
total_masked += masked
|
||||
searchdirs |= set(search)
|
||||
|
||||
# Add mtimes for directories searched for bb/bbappend files
|
||||
# Add inotify watches for directories searched for bb/bbappend files
|
||||
for dirent in searchdirs:
|
||||
self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))])
|
||||
self.add_filewatch([[dirent]], dirs=True)
|
||||
|
||||
self.parser = CookerParser(self, mcfilelist, total_masked)
|
||||
self._parsecache_set(True)
|
||||
self.parsecache_valid = True
|
||||
|
||||
self.state = State.PARSING
|
||||
self.state = state.parsing
|
||||
|
||||
if not self.parser.parse_next():
|
||||
bb.server.process.serverlog("Parsing completed")
|
||||
collectlog.debug("parsing complete")
|
||||
if self.parser.error:
|
||||
raise bb.BBHandledException()
|
||||
@@ -1644,7 +1690,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.handlePrefProviders()
|
||||
for mc in self.multiconfigs:
|
||||
self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data)
|
||||
self.state = State.RUNNING
|
||||
self.state = state.running
|
||||
|
||||
# Send an event listing all stamps reachable after parsing
|
||||
# which the metadata may use to clean up stale data
|
||||
@@ -1717,10 +1763,10 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
def shutdown(self, force=False):
|
||||
if force:
|
||||
self.state = State.FORCE_SHUTDOWN
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
else:
|
||||
self.state = State.SHUTDOWN
|
||||
self.state = state.shutdown
|
||||
|
||||
if self.parser:
|
||||
self.parser.shutdown(clean=False)
|
||||
@@ -1730,7 +1776,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.final_cleanup()
|
||||
self.state = State.INITIAL
|
||||
self.state = state.initial
|
||||
bb.event._should_exit.clear()
|
||||
|
||||
def reset(self):
|
||||
@@ -1750,7 +1796,8 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.data = self.databuilder.data
|
||||
# In theory tinfoil could have modified the base data before parsing,
|
||||
# ideally need to track if anything did modify the datastore
|
||||
self._parsecache_set(False)
|
||||
self.parsecache_valid = False
|
||||
|
||||
|
||||
class CookerExit(bb.event.Event):
|
||||
"""
|
||||
@@ -1771,10 +1818,10 @@ class CookerCollectFiles(object):
|
||||
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
|
||||
|
||||
def calc_bbfile_priority(self, filename):
|
||||
for layername, _, regex, pri in self.bbfile_config_priorities:
|
||||
for _, _, regex, pri in self.bbfile_config_priorities:
|
||||
if regex.match(filename):
|
||||
return pri, regex, layername
|
||||
return 0, None, None
|
||||
return pri, regex
|
||||
return 0, None
|
||||
|
||||
def get_bbfiles(self):
|
||||
"""Get list of default .bb files by reading out the current directory"""
|
||||
@@ -1793,7 +1840,7 @@ class CookerCollectFiles(object):
|
||||
for ignored in ('SCCS', 'CVS', '.svn'):
|
||||
if ignored in dirs:
|
||||
dirs.remove(ignored)
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))]
|
||||
found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))]
|
||||
|
||||
return found
|
||||
|
||||
@@ -1816,9 +1863,9 @@ class CookerCollectFiles(object):
|
||||
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
# We need to track where we look so that we can know when the cache is invalid. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir()
|
||||
# calls while we run glob().
|
||||
# We need to track where we look so that we can add inotify watches. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir()
|
||||
# (or os.scandir() for python 3.6+) calls while we run glob().
|
||||
origlistdir = os.listdir
|
||||
if hasattr(os, 'scandir'):
|
||||
origscandir = os.scandir
|
||||
@@ -1947,7 +1994,7 @@ class CookerCollectFiles(object):
|
||||
# Calculate priorities for each file
|
||||
for p in pkgfns:
|
||||
realfn, cls, mc = bb.cache.virtualfn2realfn(p)
|
||||
priorities[p], regex, _ = self.calc_bbfile_priority(realfn)
|
||||
priorities[p], regex = self.calc_bbfile_priority(realfn)
|
||||
if regex in unmatched_regex:
|
||||
matched_regex.add(regex)
|
||||
unmatched_regex.remove(regex)
|
||||
@@ -2084,7 +2131,7 @@ class Parser(multiprocessing.Process):
|
||||
self.results.close()
|
||||
self.results.join_thread()
|
||||
|
||||
def parse(self, mc, cache, filename, appends, layername):
|
||||
def parse(self, mc, cache, filename, appends):
|
||||
try:
|
||||
origfilter = bb.event.LogHandler.filter
|
||||
# Record the filename we're parsing into any events generated
|
||||
@@ -2098,10 +2145,11 @@ class Parser(multiprocessing.Process):
|
||||
bb.event.set_class_handlers(self.handlers.copy())
|
||||
bb.event.LogHandler.filter = parse_filter
|
||||
|
||||
return True, mc, cache.parse(filename, appends, layername)
|
||||
return True, mc, cache.parse(filename, appends)
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
return True, None, exc
|
||||
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
|
||||
# and for example a worker thread doesn't just exit on its own in response to
|
||||
@@ -2116,7 +2164,7 @@ class CookerParser(object):
|
||||
self.mcfilelist = mcfilelist
|
||||
self.cooker = cooker
|
||||
self.cfgdata = cooker.data
|
||||
self.cfghash = cooker.databuilder.data_hash
|
||||
self.cfghash = cooker.data_hash
|
||||
self.cfgbuilder = cooker.databuilder
|
||||
|
||||
# Accounting statistics
|
||||
@@ -2137,11 +2185,10 @@ class CookerParser(object):
|
||||
for mc in self.cooker.multiconfigs:
|
||||
for filename in self.mcfilelist[mc]:
|
||||
appends = self.cooker.collections[mc].get_file_appends(filename)
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
if not self.bb_caches[mc].cacheValid(filename, appends):
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.willparse.add((mc, self.bb_caches[mc], filename, appends))
|
||||
else:
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername))
|
||||
self.fromcache.add((mc, self.bb_caches[mc], filename, appends))
|
||||
|
||||
self.total = len(self.fromcache) + len(self.willparse)
|
||||
self.toparse = len(self.willparse)
|
||||
@@ -2228,8 +2275,9 @@ class CookerParser(object):
|
||||
|
||||
for process in self.processes:
|
||||
process.join()
|
||||
# clean up zombies
|
||||
process.close()
|
||||
# Added in 3.7, cleans up zombies
|
||||
if hasattr(process, "close"):
|
||||
process.close()
|
||||
|
||||
bb.codeparser.parser_cache_save()
|
||||
bb.codeparser.parser_cache_savemerge()
|
||||
@@ -2239,20 +2287,19 @@ class CookerParser(object):
|
||||
profiles = []
|
||||
for i in self.process_names:
|
||||
logfile = "profile-parse-%s.log" % i
|
||||
if os.path.exists(logfile) and os.path.getsize(logfile):
|
||||
if os.path.exists(logfile):
|
||||
profiles.append(logfile)
|
||||
|
||||
if profiles:
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
|
||||
def final_cleanup(self):
|
||||
if self.syncthread:
|
||||
self.syncthread.join()
|
||||
|
||||
def load_cached(self):
|
||||
for mc, cache, filename, appends, layername in self.fromcache:
|
||||
for mc, cache, filename, appends in self.fromcache:
|
||||
infos = cache.loadCached(filename, appends)
|
||||
yield False, mc, infos
|
||||
|
||||
@@ -2302,12 +2349,8 @@ class CookerParser(object):
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
|
||||
exc_desc = str(exc)
|
||||
if isinstance(exc, SystemExit) and not isinstance(exc.code, str):
|
||||
exc_desc = 'Exited with "%d"' % exc.code
|
||||
|
||||
logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc))
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
@@ -2316,33 +2359,20 @@ class CookerParser(object):
|
||||
self.shutdown(clean=False, eventmsg=str(exc))
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
def skip_frames(f, fn_prefix):
|
||||
while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix):
|
||||
f = f.tb_next
|
||||
return f
|
||||
|
||||
self.error += 1
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
etype, value, tb = sys.exc_info()
|
||||
|
||||
# Remove any frames where the code comes from bitbake. This
|
||||
# prevents deep (and pretty useless) backtraces for expansion error
|
||||
tb = skip_frames(tb, bbdir)
|
||||
cur = tb
|
||||
while cur:
|
||||
cur.tb_next = skip_frames(cur.tb_next, bbdir)
|
||||
cur = cur.tb_next
|
||||
|
||||
etype, value, _ = sys.exc_info()
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
_, value, _ = sys.exc_info()
|
||||
etype, value, tb = sys.exc_info()
|
||||
if hasattr(value, "recipe"):
|
||||
logger.error('Unable to parse %s' % value.recipe,
|
||||
exc_info=sys.exc_info())
|
||||
exc_info=(etype, value, exc.traceback))
|
||||
else:
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
@@ -2363,7 +2393,7 @@ class CookerParser(object):
|
||||
for virtualfn, info_array in result:
|
||||
if info_array[0].skipped:
|
||||
self.skipped += 1
|
||||
self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0])
|
||||
self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
|
||||
self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
|
||||
parsed=parsed, watcher = self.cooker.add_filewatch)
|
||||
return True
|
||||
@@ -2372,10 +2402,9 @@ class CookerParser(object):
|
||||
bb.cache.SiggenRecipeInfo.reset()
|
||||
to_reparse = set()
|
||||
for mc in self.cooker.multiconfigs:
|
||||
layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername))
|
||||
to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename)))
|
||||
|
||||
for mc, filename, appends, layername in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends, layername)
|
||||
for mc, filename, appends in to_reparse:
|
||||
infos = self.bb_caches[mc].parse(filename, appends)
|
||||
for vfn, info_array in infos:
|
||||
self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
@@ -255,21 +254,14 @@ class CookerDataBuilder(object):
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
|
||||
def calc_datastore_hashes(self):
|
||||
data_hash = hashlib.sha256()
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
data_hash.update(self.mcdata[config].get_hash().encode('utf-8'))
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
mcdata = {}
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
servercontext = self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker
|
||||
bb.fetch.fetcher_init(self.data, servercontext)
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.parse.init_parser(self.data)
|
||||
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
@@ -287,6 +279,7 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
@@ -296,9 +289,11 @@ class CookerDataBuilder(object):
|
||||
parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata)
|
||||
mcdata[config] = parsed_mcdata
|
||||
data_hash.update(parsed_mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
@@ -333,7 +328,6 @@ class CookerDataBuilder(object):
|
||||
for mc in mcdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(mcdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.calc_datastore_hashes()
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
@@ -346,7 +340,7 @@ class CookerDataBuilder(object):
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
|
||||
def parseConfigurationFiles(self, prefiles, postfiles, mc = ""):
|
||||
def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
|
||||
data = bb.data.createCopy(self.basedata)
|
||||
data.setVar("BB_CURRENT_MC", mc)
|
||||
|
||||
@@ -500,19 +494,18 @@ class CookerDataBuilder(object):
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc, layername):
|
||||
def _parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
bb_data.setVar("FILE_LAYERNAME", layername)
|
||||
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
return bb_data
|
||||
|
||||
return bb.parse.handle(bbfile, bb_data)
|
||||
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None, layername=None):
|
||||
def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
@@ -522,31 +515,32 @@ class CookerDataBuilder(object):
|
||||
(bbfile, virtual, mc) = bb.cache.virtualfn2realfn(bbfile)
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
return datastores
|
||||
|
||||
if mc is not None:
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
return self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends, '', layername)
|
||||
datastores = self._parse_recipe(bb_data, bbfile, appends)
|
||||
|
||||
for mc in self.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.mcdata[mc].createCopy()
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc, layername)
|
||||
newstores = self._parse_recipe(bb_data, bbfile, appends, mc)
|
||||
for ns in newstores:
|
||||
datastores["mc:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
def parseRecipe(self, virtualfn, appends, layername):
|
||||
def parseRecipe(self, virtualfn, appends):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = bb.cache.virtualfn2realfn(virtualfn)
|
||||
datastores = self.parseRecipeVariants(virtualfn, appends, virtonly=True, layername=layername)
|
||||
return datastores[virtual]
|
||||
bb_data = self.parseRecipeVariants(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
|
||||
@@ -285,7 +285,6 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
value = handle_contains(value, r2.contains, exclusions, d)
|
||||
return value
|
||||
|
||||
deps = set()
|
||||
@@ -293,7 +292,7 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
value = handle_contains("", moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
|
||||
@@ -16,10 +16,7 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import builtins
|
||||
import copy
|
||||
import re
|
||||
import sys
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
@@ -31,7 +28,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+}")
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
@@ -106,68 +103,66 @@ class VariableParse:
|
||||
self.contains = {}
|
||||
|
||||
def var_sub(self, match):
|
||||
key = match.group()[2:-1]
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
key = match.group()[2:-1]
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
varname = '<expansion>'
|
||||
codeobj = compile(code.strip(), varname, "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
parser.log.flush()
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
|
||||
for k in parser.contains:
|
||||
if k not in self.contains:
|
||||
self.contains[k] = parser.contains[k].copy()
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
self.contains[k].update(parser.contains[k])
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
varname = '<expansion>'
|
||||
codeobj = compile(code.strip(), varname, "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
parser.log.flush()
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
|
||||
for k in parser.contains:
|
||||
if k not in self.contains:
|
||||
self.contains[k] = parser.contains[k].copy()
|
||||
else:
|
||||
self.contains[k].update(parser.contains[k])
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
|
||||
|
||||
class DataContext(dict):
|
||||
excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe'])
|
||||
|
||||
def __init__(self, metadata, **kwargs):
|
||||
self.metadata = metadata
|
||||
dict.__init__(self, **kwargs)
|
||||
self['d'] = metadata
|
||||
self.context = set(bb.utils.get_context())
|
||||
|
||||
def __missing__(self, key):
|
||||
if key in self.excluded or key in self.context:
|
||||
# Skip commonly accessed invalid variables
|
||||
if key in ['bb', 'oe', 'int', 'bool', 'time', 'str', 'os']:
|
||||
raise KeyError(key)
|
||||
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None:
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
@@ -272,9 +267,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +331,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +385,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
@@ -580,10 +577,12 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
loginfo['op'] = keyword
|
||||
self.varhistory.record(**loginfo)
|
||||
# todo make sure keyword is not __doc__ or __module__
|
||||
# pay the cookie monster
|
||||
|
||||
# more cookies for the cookie monster
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
if ':' in var:
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
|
||||
if base in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
@@ -636,7 +635,6 @@ class DataSmart(MutableMapping):
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.overrides = None
|
||||
self.expand_cache = {}
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
@@ -826,8 +824,6 @@ class DataSmart(MutableMapping):
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval"])
|
||||
elif "_defaultval_flag_"+flag in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval_flag_"+flag])
|
||||
|
||||
|
||||
if flag == "_content" and local_var is not None and ":append" in local_var and not parsing:
|
||||
@@ -919,8 +915,6 @@ class DataSmart(MutableMapping):
|
||||
self.varhistory.record(**loginfo)
|
||||
|
||||
del self.dict[var][flag]
|
||||
if ("_defaultval_flag_" + flag) in self.dict[var]:
|
||||
del self.dict[var]["_defaultval_flag_" + flag]
|
||||
|
||||
def appendVarFlag(self, var, flag, value, **loginfo):
|
||||
loginfo['op'] = 'append'
|
||||
@@ -955,22 +949,17 @@ class DataSmart(MutableMapping):
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
for i, val in local_var.items():
|
||||
if i.startswith("_defaultval_flag_") and not internalflags:
|
||||
i = i[len("_defaultval_flag_"):]
|
||||
if i not in local_var:
|
||||
flags[i] = val
|
||||
elif i.startswith(("_", ":")) and not internalflags:
|
||||
for i in local_var:
|
||||
if i.startswith(("_", ":")) and not internalflags:
|
||||
continue
|
||||
else:
|
||||
flags[i] = val
|
||||
|
||||
flags[i] = local_var[i]
|
||||
if expand and i in expand:
|
||||
flags[i] = self.expand(flags[i], var + "[" + i + "]")
|
||||
if len(flags) == 0:
|
||||
return None
|
||||
return flags
|
||||
|
||||
|
||||
def delVarFlags(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
@@ -1120,10 +1109,5 @@ class DataSmart(MutableMapping):
|
||||
value = d.getVar(i, False) or ""
|
||||
data.update({i:value})
|
||||
|
||||
moddeps = bb.codeparser.modulecode_deps
|
||||
for dep in sorted(moddeps):
|
||||
# Ignore visitor code, sort sets
|
||||
data.update({'moddep[%s]' % dep : [sorted(moddeps[dep][0]), sorted(moddeps[dep][1]), sorted(moddeps[dep][2]), sorted(moddeps[dep][3]), moddeps[dep][4]]})
|
||||
|
||||
data_str = str([(k, data[k]) for k in sorted(data.keys())])
|
||||
return hashlib.sha256(data_str.encode("utf-8")).hexdigest()
|
||||
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import bb.exceptions
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -194,12 +195,7 @@ def fire_ui_handlers(event, d):
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout_nocheck(_thread_lock) as lock:
|
||||
if not lock:
|
||||
# If we can't get the lock, we may be recursively called, queue and return
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
@@ -218,9 +214,6 @@ def fire_ui_handlers(event, d):
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
while ui_queue:
|
||||
fire_ui_handlers(ui_queue.pop(), d)
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
|
||||
@@ -264,15 +257,14 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
# handle string containing python code
|
||||
if isinstance(handler, str):
|
||||
tmp = "def %s(e, d):\n%s" % (name, handler)
|
||||
# Inject empty lines to make code match lineno in filename
|
||||
if lineno is not None:
|
||||
tmp = "\n" * (lineno-1) + tmp
|
||||
try:
|
||||
code = bb.methodpool.compile_cache(tmp)
|
||||
if not code:
|
||||
if filename is None:
|
||||
filename = "%s(e, d)" % name
|
||||
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
if lineno is not None:
|
||||
ast.increment_lineno(code, lineno-1)
|
||||
code = compile(code, filename, "exec")
|
||||
bb.methodpool.compile_cache_add(tmp, code)
|
||||
except SyntaxError:
|
||||
@@ -766,7 +758,13 @@ class LogHandler(logging.Handler):
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
@@ -859,14 +857,6 @@ class FindSigInfoResult(Event):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
class GetTaskSignatureResult(Event):
|
||||
"""
|
||||
Event to return results from GetTaskSignatures command
|
||||
"""
|
||||
def __init__(self, sig):
|
||||
Event.__init__(self)
|
||||
self.sig = sig
|
||||
|
||||
class ParseError(Event):
|
||||
"""
|
||||
Event to indicate parse failed
|
||||
|
||||
96
bitbake/lib/bb/exceptions.py
Normal file
96
bitbake/lib/bb/exceptions.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
|
||||
try:
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
# There have been reports from the field of python 2.6 which doesn't
|
||||
# return a namedtuple here but simply a tuple so fallback gracefully if
|
||||
# args isn't present.
|
||||
except AttributeError:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -23,18 +23,17 @@ import collections
|
||||
import subprocess
|
||||
import pickle
|
||||
import errno
|
||||
import bb.utils
|
||||
import bb.persist_data, bb.utils
|
||||
import bb.checksum
|
||||
import bb.process
|
||||
import bb.event
|
||||
|
||||
__version__ = "2"
|
||||
_checksum_cache = bb.checksum.FileChecksumCache()
|
||||
_revisions_cache = bb.checksum.RevisionsCache()
|
||||
|
||||
logger = logging.getLogger("BitBake.Fetcher")
|
||||
|
||||
CHECKSUM_LIST = [ "goh1", "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
CHECKSUM_LIST = [ "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
SHOWN_CHECKSUM_LIST = ["sha256"]
|
||||
|
||||
class BBFetchException(Exception):
|
||||
@@ -238,7 +237,7 @@ class URI(object):
|
||||
# to RFC compliant URL format. E.g.:
|
||||
# file://foo.diff -> file:foo.diff
|
||||
if urlp.scheme in self._netloc_forbidden:
|
||||
uri = re.sub(r"(?<=:)//(?!/)", "", uri, count=1)
|
||||
uri = re.sub("(?<=:)//(?!/)", "", uri, 1)
|
||||
reparse = 1
|
||||
|
||||
if reparse:
|
||||
@@ -291,12 +290,12 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) if kvdelim in x else (x, None) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
def _param_str_join(self, dict_, elmdelim, kvdelim="="):
|
||||
return elmdelim.join([kvdelim.join([k, v]) if v else k for k, v in dict_.items()])
|
||||
return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
|
||||
|
||||
@property
|
||||
def hostport(self):
|
||||
@@ -353,14 +352,6 @@ def decodeurl(url):
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
uri = URI(url)
|
||||
path = uri.path if uri.path else "/"
|
||||
return uri.scheme, uri.hostport, path, uri.username, uri.password, uri.params
|
||||
|
||||
def decodemirrorurl(url):
|
||||
"""Decodes a mirror URL into the tokens (scheme, network location, path,
|
||||
user, password, parameters).
|
||||
"""
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
if not m:
|
||||
raise MalformedUrl(url)
|
||||
@@ -379,9 +370,6 @@ def decodemirrorurl(url):
|
||||
elif type.lower() == 'file':
|
||||
host = ""
|
||||
path = location
|
||||
if user:
|
||||
path = user + '@' + path
|
||||
user = ""
|
||||
else:
|
||||
host = location
|
||||
path = "/"
|
||||
@@ -400,7 +388,7 @@ def decodemirrorurl(url):
|
||||
if s:
|
||||
if not '=' in s:
|
||||
raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
|
||||
s1, s2 = s.split('=', 1)
|
||||
s1, s2 = s.split('=')
|
||||
p[s1] = s2
|
||||
|
||||
return type, host, urllib.parse.unquote(path), user, pswd, p
|
||||
@@ -414,34 +402,32 @@ def encodeurl(decoded):
|
||||
|
||||
if not type:
|
||||
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
|
||||
uri = URI()
|
||||
uri.scheme = type
|
||||
url = ['%s://' % type]
|
||||
if user and type != "file":
|
||||
uri.username = user
|
||||
url.append("%s" % user)
|
||||
if pswd:
|
||||
uri.password = pswd
|
||||
url.append(":%s" % pswd)
|
||||
url.append("@")
|
||||
if host and type != "file":
|
||||
uri.hostname = host
|
||||
url.append("%s" % host)
|
||||
if path:
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
uri.path = path
|
||||
if type == "file":
|
||||
# Use old not IETF compliant style
|
||||
uri.relative = False
|
||||
url.append("%s" % urllib.parse.quote(path))
|
||||
if p:
|
||||
uri.params = p
|
||||
for parm in p:
|
||||
url.append(";%s=%s" % (parm, p[parm]))
|
||||
|
||||
return str(uri)
|
||||
return "".join(url)
|
||||
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
if not ud.url or not uri_find or not uri_replace:
|
||||
logger.error("uri_replace: passed an undefined value, not replacing")
|
||||
return None
|
||||
uri_decoded = list(decodemirrorurl(ud.url))
|
||||
uri_find_decoded = list(decodemirrorurl(uri_find))
|
||||
uri_replace_decoded = list(decodemirrorurl(uri_replace))
|
||||
uri_decoded = list(decodeurl(ud.url))
|
||||
uri_find_decoded = list(decodeurl(uri_find))
|
||||
uri_replace_decoded = list(decodeurl(uri_replace))
|
||||
logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
result_decoded = ['', '', '', '', '', {}]
|
||||
# 0 - type, 1 - host, 2 - path, 3 - user, 4- pswd, 5 - params
|
||||
@@ -474,7 +460,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
for k in replacements:
|
||||
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
|
||||
#bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], count=1)
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1)
|
||||
if loc == 2:
|
||||
# Handle path manipulations
|
||||
basename = None
|
||||
@@ -507,23 +493,18 @@ methods = []
|
||||
urldata_cache = {}
|
||||
saved_headrevs = {}
|
||||
|
||||
def fetcher_init(d, servercontext=True):
|
||||
def fetcher_init(d):
|
||||
"""
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_revisions_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
if not servercontext:
|
||||
return
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = _revisions_cache.get_revs()
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -533,10 +514,11 @@ def fetcher_init(d, servercontext=True):
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
_revisions_cache.clear_cache()
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
@@ -544,11 +526,9 @@ def fetcher_init(d, servercontext=True):
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
_revisions_cache.save_extras()
|
||||
|
||||
def fetcher_parse_done():
|
||||
_checksum_cache.save_merge()
|
||||
_revisions_cache.save_merge()
|
||||
|
||||
def fetcher_compare_revisions(d):
|
||||
"""
|
||||
@@ -556,7 +536,7 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
headrevs = _revisions_cache.get_revs()
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
@@ -773,7 +753,7 @@ def get_autorev(d):
|
||||
d.setVar("__BBAUTOREV_SEEN", True)
|
||||
return "AUTOINC"
|
||||
|
||||
def _get_srcrev(d, method_name='sortable_revision'):
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
@@ -794,7 +774,6 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
d.setVar("__BBINSRCREV", True)
|
||||
|
||||
scms = []
|
||||
revs = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
@@ -802,19 +781,16 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
scms.append(u)
|
||||
|
||||
if not scms:
|
||||
d.delVar("__BBINSRCREV")
|
||||
return "", revs
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
|
||||
|
||||
if len(scms) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].name)
|
||||
revs.append(rev)
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
d.delVar("__BBINSRCREV")
|
||||
if autoinc:
|
||||
return "AUTOINC+" + rev, revs
|
||||
return rev, revs
|
||||
return "AUTOINC+" + rev
|
||||
return rev
|
||||
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
@@ -828,12 +804,12 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
seenautoinc = False
|
||||
for scm in scms:
|
||||
ud = urldata[scm]
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, ud.name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
name_to_rev[ud.name] = rev
|
||||
for name in ud.names:
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
name_to_rev[name] = rev
|
||||
# Replace names by revisions in the SRCREV_FORMAT string. The approach used
|
||||
# here can handle names being prefixes of other names and names appearing
|
||||
# as substrings in revisions (in which case the name should not be
|
||||
@@ -847,21 +823,7 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
d.delVar("__BBINSRCREV")
|
||||
return format, revs
|
||||
|
||||
def get_hashvalue(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return " ".join(revs)
|
||||
|
||||
def get_pkgv_string(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
return pkgv
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
pkgv, revs = _get_srcrev(d, method_name=method_name)
|
||||
if not pkgv:
|
||||
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
return pkgv
|
||||
return format
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
@@ -891,13 +853,8 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'AWS_PROFILE',
|
||||
'AWS_ACCESS_KEY_ID',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_ROLE_ARN',
|
||||
'AWS_WEB_IDENTITY_TOKEN_FILE',
|
||||
'AWS_DEFAULT_REGION',
|
||||
'AWS_SESSION_TOKEN',
|
||||
'GIT_CACHE_PATH',
|
||||
'REMOTE_CONTAINERS_IPC',
|
||||
'GITHUB_TOKEN',
|
||||
'SSL_CERT_DIR']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
@@ -963,10 +920,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
elif e.stderr:
|
||||
output = "output:\n%s" % e.stderr
|
||||
else:
|
||||
if log:
|
||||
output = "see logfile for output"
|
||||
else:
|
||||
output = "no output"
|
||||
output = "no output"
|
||||
error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
|
||||
except bb.process.CmdError as e:
|
||||
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
|
||||
@@ -1138,8 +1092,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
try:
|
||||
if ud.method.cleanup_upon_failure():
|
||||
ud.method.clean(ud, ld)
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
return False
|
||||
@@ -1194,7 +1147,7 @@ def trusted_network(d, url):
|
||||
if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")):
|
||||
return True
|
||||
|
||||
pkgname = d.getVar('PN')
|
||||
pkgname = d.expand(d.getVar('PN', False))
|
||||
trusted_hosts = None
|
||||
if pkgname:
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
@@ -1247,17 +1200,20 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev and srcrev != "INVALID":
|
||||
break
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
parmrev = ud.parm['rev']
|
||||
if 'rev' in ud.parm and 'tag' in ud.parm:
|
||||
raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url))
|
||||
|
||||
if 'rev' in ud.parm or 'tag' in ud.parm:
|
||||
if 'rev' in ud.parm:
|
||||
parmrev = ud.parm['rev']
|
||||
else:
|
||||
parmrev = ud.parm['tag']
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
return parmrev
|
||||
if srcrev != parmrev:
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
|
||||
return parmrev
|
||||
|
||||
if 'tag' in ud.parm and (srcrev == "INVALID" or not srcrev):
|
||||
return ud.parm['tag']
|
||||
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
|
||||
if srcrev == "AUTOINC":
|
||||
@@ -1278,9 +1234,9 @@ def get_checksum_file_list(d):
|
||||
ud = fetch.ud[u]
|
||||
if ud and isinstance(ud.method, local.Local):
|
||||
found = False
|
||||
paths = ud.method.localfile_searchpaths(ud, d)
|
||||
paths = ud.method.localpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.path
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
found = True
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
@@ -1325,28 +1281,23 @@ class FetchData(object):
|
||||
self.setup = False
|
||||
|
||||
def configure_checksum(checksum_id):
|
||||
checksum_plain_name = "%ssum" % checksum_id
|
||||
if "name" in self.parm:
|
||||
checksum_name = "%s.%ssum" % (self.parm["name"], checksum_id)
|
||||
else:
|
||||
checksum_name = checksum_plain_name
|
||||
checksum_name = "%ssum" % checksum_id
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif checksum_plain_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_plain_name]
|
||||
checksum_name = checksum_plain_name
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs", "gomod", "npm"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
setattr(self, "%s_expected" % checksum_id, checksum_expected)
|
||||
|
||||
self.name = self.parm.get("name",'default')
|
||||
if "," in self.name:
|
||||
raise ParameterError("The fetcher no longer supports multiple name parameters in a single url", self.url)
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
|
||||
self.method = None
|
||||
for m in methods:
|
||||
@@ -1398,7 +1349,13 @@ class FetchData(object):
|
||||
self.lockfile = basepath + '.lock'
|
||||
|
||||
def setup_revisions(self, d):
|
||||
self.revision = srcrev_internal_helper(self, d, self.name)
|
||||
self.revisions = {}
|
||||
for name in self.names:
|
||||
self.revisions[name] = srcrev_internal_helper(self, d, name)
|
||||
|
||||
# add compatibility code for non name specified case
|
||||
if len(self.names) == 1:
|
||||
self.revision = self.revisions[self.names[0]]
|
||||
|
||||
def setup_localpath(self, d):
|
||||
if not self.localpath:
|
||||
@@ -1445,9 +1402,6 @@ class FetchMethod(object):
|
||||
Is localpath something that can be represented by a checksum?
|
||||
"""
|
||||
|
||||
# We cannot compute checksums for None
|
||||
if urldata.localpath is None:
|
||||
return False
|
||||
# We cannot compute checksums for directories
|
||||
if os.path.isdir(urldata.localpath):
|
||||
return False
|
||||
@@ -1460,12 +1414,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
"""
|
||||
When a fetch fails, should clean() be called?
|
||||
"""
|
||||
return True
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
"""
|
||||
Verify the donestamp file
|
||||
@@ -1526,7 +1474,7 @@ class FetchMethod(object):
|
||||
(file, urldata.parm.get('unpack')))
|
||||
|
||||
base, ext = os.path.splitext(file)
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz', '.zst']:
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']:
|
||||
efile = os.path.join(rootdir, os.path.basename(base))
|
||||
else:
|
||||
efile = file
|
||||
@@ -1608,7 +1556,6 @@ class FetchMethod(object):
|
||||
unpackdir = rootdir
|
||||
|
||||
if not unpack or not cmd:
|
||||
urldata.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
# If file == dest, then avoid any copies, as we already put the file into dest!
|
||||
dest = os.path.join(unpackdir, os.path.basename(file))
|
||||
if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
|
||||
@@ -1622,9 +1569,7 @@ class FetchMethod(object):
|
||||
if urlpath.find("/") != -1:
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp --force --preserve=timestamps --no-dereference --recursive -H "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
@@ -1678,13 +1623,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
|
||||
rev = _revisions_cache.get_rev(key)
|
||||
if rev is None:
|
||||
rev = self._latest_revision(ud, d, name)
|
||||
_revisions_cache.set_rev(key, rev)
|
||||
return rev
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
@@ -1716,55 +1661,6 @@ class FetchMethod(object):
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class DummyUnpackTracer(object):
|
||||
"""
|
||||
Abstract API definition for a class that traces unpacked source files back
|
||||
to their respective upstream SRC_URI entries, for software composition
|
||||
analysis, license compliance and detailed SBOM generation purposes.
|
||||
User may load their own unpack tracer class (instead of the dummy
|
||||
one) by setting the BB_UNPACK_TRACER_CLASS config parameter.
|
||||
"""
|
||||
def start(self, unpackdir, urldata_dict, d):
|
||||
"""
|
||||
Start tracing the core Fetch.unpack process, using an index to map
|
||||
unpacked files to each SRC_URI entry.
|
||||
This method is called by Fetch.unpack and it may receive nested calls by
|
||||
gitsm and npmsw fetchers, that expand SRC_URI entries by adding implicit
|
||||
URLs and by recursively calling Fetch.unpack from new (nested) Fetch
|
||||
instances.
|
||||
"""
|
||||
return
|
||||
def start_url(self, url):
|
||||
"""Start tracing url unpack process.
|
||||
This method is called by Fetch.unpack before the fetcher-specific unpack
|
||||
method starts, and it may receive nested calls by gitsm and npmsw
|
||||
fetchers.
|
||||
"""
|
||||
return
|
||||
def unpack(self, unpack_type, destdir):
|
||||
"""
|
||||
Set unpack_type and destdir for current url.
|
||||
This method is called by the fetcher-specific unpack method after url
|
||||
tracing started.
|
||||
"""
|
||||
return
|
||||
def finish_url(self, url):
|
||||
"""Finish tracing url unpack process and update the file index.
|
||||
This method is called by Fetch.unpack after the fetcher-specific unpack
|
||||
method finished its job, and it may receive nested calls by gitsm
|
||||
and npmsw fetchers.
|
||||
"""
|
||||
return
|
||||
def complete(self):
|
||||
"""
|
||||
Finish tracing the Fetch.unpack process, and check if all nested
|
||||
Fecth.unpack calls (if any) have been completed; if so, save collected
|
||||
metadata.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
if localonly and cache:
|
||||
@@ -1785,30 +1681,10 @@ class Fetch(object):
|
||||
if key in urldata_cache:
|
||||
self.ud = urldata_cache[key]
|
||||
|
||||
# the unpack_tracer object needs to be made available to possible nested
|
||||
# Fetch instances (when those are created by gitsm and npmsw fetchers)
|
||||
# so we set it as a global variable
|
||||
global unpack_tracer
|
||||
try:
|
||||
unpack_tracer
|
||||
except NameError:
|
||||
class_path = d.getVar("BB_UNPACK_TRACER_CLASS")
|
||||
if class_path:
|
||||
# use user-defined unpack tracer class
|
||||
import importlib
|
||||
module_name, _, class_name = class_path.rpartition(".")
|
||||
module = importlib.import_module(module_name)
|
||||
class_ = getattr(module, class_name)
|
||||
unpack_tracer = class_()
|
||||
else:
|
||||
# fall back to the dummy/abstract class
|
||||
unpack_tracer = DummyUnpackTracer()
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
try:
|
||||
self.ud[url] = FetchData(url, d, localonly)
|
||||
self.ud[url].unpack_tracer = unpack_tracer
|
||||
except NonLocalMethod:
|
||||
if localonly:
|
||||
self.ud[url] = None
|
||||
@@ -1822,7 +1698,7 @@ class Fetch(object):
|
||||
self.ud[url] = FetchData(url, self.d)
|
||||
|
||||
self.ud[url].setup_localpath(self.d)
|
||||
return self.ud[url].localpath
|
||||
return self.d.expand(self.ud[url].localpath)
|
||||
|
||||
def localpaths(self):
|
||||
"""
|
||||
@@ -1875,28 +1751,25 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
done = False
|
||||
|
||||
d = self.d
|
||||
if premirroronly:
|
||||
# Only disable the network in a copy
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
|
||||
firsterr = None
|
||||
verified_stamp = False
|
||||
if done:
|
||||
verified_stamp = m.verify_donestamp(ud, d)
|
||||
if not done and (not verified_stamp or m.need_update(ud, d)):
|
||||
verified_stamp = m.verify_donestamp(ud, self.d)
|
||||
if not done and (not verified_stamp or m.need_update(ud, self.d)):
|
||||
try:
|
||||
if not trusted_network(d, ud.url):
|
||||
if not trusted_network(self.d, ud.url):
|
||||
raise UntrustedUrl(ud.url)
|
||||
logger.debug("Trying Upstream")
|
||||
m.download(ud, d)
|
||||
m.download(ud, self.d)
|
||||
if hasattr(m, "build_mirror_data"):
|
||||
m.build_mirror_data(ud, d)
|
||||
m.build_mirror_data(ud, self.d)
|
||||
done = True
|
||||
# early checksum verify, so that if checksum mismatched,
|
||||
# fetcher still have chance to fetch from mirror
|
||||
m.update_donestamp(ud, d)
|
||||
m.update_donestamp(ud, self.d)
|
||||
|
||||
except bb.fetch2.NetworkAccess:
|
||||
raise
|
||||
@@ -1914,18 +1787,18 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp and m.cleanup_upon_failure():
|
||||
m.clean(ud, d)
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
mirrors = mirror_from_string(d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, d, mirrors)
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
|
||||
if not done or not m.done(ud, d):
|
||||
if not done or not m.done(ud, self.d):
|
||||
if firsterr:
|
||||
logger.error(str(firsterr))
|
||||
raise FetchError("Unable to fetch URL from any source.", u)
|
||||
|
||||
m.update_donestamp(ud, d)
|
||||
m.update_donestamp(ud, self.d)
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [errno.ESTALE]:
|
||||
@@ -1977,7 +1850,7 @@ class Fetch(object):
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
raise FetchError("URL doesn't work", u)
|
||||
raise FetchError("URL %s doesn't work" % u, u)
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
@@ -1987,8 +1860,6 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
unpack_tracer.start(root, self.ud, self.d)
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
@@ -1996,15 +1867,11 @@ class Fetch(object):
|
||||
if ud.lockfile:
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
|
||||
unpack_tracer.start_url(u)
|
||||
ud.method.unpack(ud, root, self.d)
|
||||
unpack_tracer.finish_url(u)
|
||||
|
||||
if ud.lockfile:
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
unpack_tracer.complete()
|
||||
|
||||
def clean(self, urls=None):
|
||||
"""
|
||||
Clean files that the fetcher gets or places
|
||||
@@ -2106,8 +1973,6 @@ from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
from . import gomod
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2129,6 +1994,3 @@ methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
methods.append(gomod.GoMod())
|
||||
methods.append(gomod.GoModGit())
|
||||
|
||||
@@ -66,12 +66,11 @@ class Az(Wget):
|
||||
else:
|
||||
azuri = '%s%s%s' % ('https://', ud.host, ud.path)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again.
|
||||
fetchcmd += " -c -P %s '%s'" % (dldir, azuri)
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % azuri)
|
||||
else:
|
||||
fetchcmd += " -P %s '%s'" % (dldir, azuri)
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % azuri)
|
||||
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
@@ -108,7 +108,7 @@ class ClearCase(FetchMethod):
|
||||
ud.module.replace("/", "."),
|
||||
ud.label.replace("/", "."))
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME"))
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
@@ -130,6 +130,8 @@ class ClearCase(FetchMethod):
|
||||
self.debug("configspecfile = %s" % ud.configspecfile)
|
||||
self.debug("localfile = %s" % ud.localfile)
|
||||
|
||||
ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
|
||||
def _build_ccase_command(self, ud, command):
|
||||
"""
|
||||
Build up a commandline based on ud
|
||||
@@ -194,7 +196,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME")
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
|
||||
return True
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
|
||||
@@ -59,18 +59,17 @@ class Crate(Wget):
|
||||
# version is expected to be the last token
|
||||
# but ignore possible url parameters which will be used
|
||||
# by the top fetcher class
|
||||
version = parts[-1].split(";")[0]
|
||||
version, _, _ = parts[len(parts) -1].partition(";")
|
||||
# second to last field is name
|
||||
name = parts[-2]
|
||||
name = parts[len(parts) - 2]
|
||||
# host (this is to allow custom crate registries to be specified
|
||||
host = '/'.join(parts[2:-2])
|
||||
host = '/'.join(parts[2:len(parts) - 2])
|
||||
|
||||
# if using upstream just fix it up nicely
|
||||
if host == 'crates.io':
|
||||
host = 'crates.io/api/v1/crates'
|
||||
|
||||
ud.url = "https://%s/%s/%s/download" % (host, name, version)
|
||||
ud.versionsurl = "https://%s/%s/versions" % (host, name)
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
if 'name' not in ud.parm:
|
||||
ud.parm['name'] = '%s-%s' % (name, version)
|
||||
@@ -99,13 +98,11 @@ class Crate(Wget):
|
||||
save_cwd = os.getcwd()
|
||||
os.chdir(rootdir)
|
||||
|
||||
bp = d.getVar('BP')
|
||||
if bp == ud.parm.get('name'):
|
||||
pn = d.getVar('BPN')
|
||||
if pn == ud.parm.get('name'):
|
||||
cmd = "tar -xz --no-same-owner -f %s" % thefile
|
||||
ud.unpack_tracer.unpack("crate-extract", rootdir)
|
||||
else:
|
||||
cargo_bitbake = self._cargo_bitbake_path(rootdir)
|
||||
ud.unpack_tracer.unpack("cargo-extract", cargo_bitbake)
|
||||
|
||||
cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake)
|
||||
|
||||
@@ -140,11 +137,3 @@ class Crate(Wget):
|
||||
mdpath = os.path.join(bbpath, cratepath, mdfile)
|
||||
with open(mdpath, "w") as f:
|
||||
json.dump(metadata, f)
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
from functools import cmp_to_key
|
||||
json_data = json.loads(self._fetch_index(ud.versionsurl, ud, d))
|
||||
versions = [(0, i["num"], "") for i in json_data["versions"]]
|
||||
versions = sorted(versions, key=cmp_to_key(bb.utils.vercmp))
|
||||
|
||||
return (versions[-1][1], "")
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Google Cloup Platform Storage.
|
||||
|
||||
Class for fetching files from Google Cloud Storage using the
|
||||
Google Cloud Storage Python Client. The GCS Python Client must
|
||||
be correctly installed, configured and authenticated prior to use.
|
||||
Additionally, gsutil must also be installed.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2023, Snap Inc.
|
||||
#
|
||||
# Based in part on bb.fetch2.s3:
|
||||
# Copyright (C) 2017 Andre McCurdy
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class GCP(FetchMethod):
|
||||
"""
|
||||
Class to fetch urls via GCP's Python API.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.gcp_client = None
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with GCP.
|
||||
"""
|
||||
return ud.type in ['gs']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
self.gcp_client = storage.Client(project=None)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
from google.api_core.exceptions import NotFound
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "blob.download_to_filename", f"gs://{ud.host}{ud.path}")
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
try:
|
||||
blob.download_to_filename(ud.localpath)
|
||||
except NotFound:
|
||||
raise FetchError("The GCP API threw a NotFound exception")
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
# tool with a little healthy suspicion).
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError(f"The GCP API returned success for gs://{ud.host}{ud.path} but {ud.localpath} doesn't exist?!")
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError(f"The downloaded file for gs://{ud.host}{ud.path} resulted in a zero size file?! Deleting and failing since this isn't right.")
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL.
|
||||
"""
|
||||
logger.debug2(f"Checking status of gs://{ud.host}{ud.path}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "gcp_client.bucket(ud.host).blob(path).exists()", f"gs://{ud.host}{ud.path}")
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
if self.gcp_client.bucket(ud.host).blob(path).exists() == False:
|
||||
raise FetchError(f"The GCP API reported that gs://{ud.host}{ud.path} does not exist")
|
||||
else:
|
||||
return True
|
||||
@@ -9,6 +9,15 @@ Supported SRC_URI options are:
|
||||
- branch
|
||||
The git branch to retrieve from. The default is "master"
|
||||
|
||||
This option also supports multiple branch fetching, with branches
|
||||
separated by commas. In multiple branches case, the name option
|
||||
must have the same number of names to match the branches, which is
|
||||
used to specify the SRC_REV for the branch
|
||||
e.g:
|
||||
SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY"
|
||||
SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx"
|
||||
SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY"
|
||||
|
||||
- tag
|
||||
The git tag to retrieve. The default is "master"
|
||||
|
||||
@@ -39,23 +48,10 @@ Supported SRC_URI options are:
|
||||
instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- subpath
|
||||
Limit the checkout to a specific subpath of the tree.
|
||||
By default, checkout the whole tree, set subpath=<path> if needed
|
||||
|
||||
- destsuffix
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is git/, set destsuffix=<suffix> if needed
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
- lfs
|
||||
Enable the checkout to use LFS for large files. This will download all LFS files
|
||||
in the download step, as the unpack step does not have network access.
|
||||
The default is "1", set lfs=0 to skip.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2005 Richard Purdie
|
||||
@@ -69,17 +65,14 @@ import fnmatch
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import urllib
|
||||
import bb
|
||||
import bb.progress
|
||||
from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import trusted_network
|
||||
|
||||
|
||||
sha1_re = re.compile(r'^[0-9a-f]{40}$')
|
||||
@@ -142,9 +135,6 @@ class Git(FetchMethod):
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
def cleanup_upon_failure(self):
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init git specific variable within url data
|
||||
@@ -183,10 +173,13 @@ class Git(FetchMethod):
|
||||
if ud.bareclone:
|
||||
ud.nocheckout = 1
|
||||
|
||||
ud.unresolvedrev = ""
|
||||
ud.branch = ud.parm.get("branch", "")
|
||||
if not ud.branch and not ud.nobranch:
|
||||
raise bb.fetch2.ParameterError("The url does not set any branch parameter or set nobranch=1.", ud.url)
|
||||
ud.unresolvedrev = {}
|
||||
branches = ud.parm.get("branch", "").split(',')
|
||||
if branches == [""] and not ud.nobranch:
|
||||
bb.warn("URL: %s does not set any branch parameter. The future default branch used by tools and repositories is uncertain and we will therefore soon require this is set in all git urls." % ud.url)
|
||||
branches = ["master"]
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.noshared = d.getVar("BB_GIT_NOSHARED") == "1"
|
||||
|
||||
@@ -196,7 +189,6 @@ class Git(FetchMethod):
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
ud.shallow_skip_fast = False
|
||||
ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
|
||||
ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
|
||||
|
||||
@@ -215,27 +207,32 @@ class Git(FetchMethod):
|
||||
|
||||
revs_default = d.getVar("BB_GIT_SHALLOW_REVS")
|
||||
ud.shallow_revs = []
|
||||
ud.branches = {}
|
||||
for pos, name in enumerate(ud.names):
|
||||
branch = branches[pos]
|
||||
ud.branches[name] = branch
|
||||
ud.unresolvedrev[name] = branch
|
||||
|
||||
ud.unresolvedrev = ud.branch
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
ud.shallow_depths[name] = shallow_depth
|
||||
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % ud.name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth))
|
||||
ud.shallow_depths[ud.name] = shallow_depth
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % ud.name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
if ud.shallow and not ud.shallow_revs and ud.shallow_depths[ud.name] == 0:
|
||||
if (ud.shallow and
|
||||
not ud.shallow_revs and
|
||||
all(ud.shallow_depths[n] == 0 for n in ud.names)):
|
||||
# Shallow disabled for this URL
|
||||
ud.shallow = False
|
||||
|
||||
@@ -244,9 +241,10 @@ class Git(FetchMethod):
|
||||
# rev of this repository. This will get resolved into a revision
|
||||
# later. If an actual revision happens to have also been provided
|
||||
# then this setting will be overridden.
|
||||
ud.unresolvedrev = 'HEAD'
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all -c clone.defaultRemoteName=origin"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -254,13 +252,14 @@ class Git(FetchMethod):
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revision or ''):
|
||||
if ud.revision:
|
||||
ud.unresolvedrev = ud.revision
|
||||
ud.revision = self.latest_revision(ud, d, ud.name)
|
||||
for name in ud.names:
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revisions[name] or ''):
|
||||
if ud.revisions[name]:
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_'))
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_'))
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
@@ -269,7 +268,8 @@ class Git(FetchMethod):
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
# contain the revision
|
||||
if ud.rebaseable:
|
||||
gitsrcname = gitsrcname + '_' + ud.revision
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2")
|
||||
@@ -287,14 +287,15 @@ class Git(FetchMethod):
|
||||
if ud.shallow_revs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
|
||||
|
||||
tarballname = "%s_%s" % (tarballname, ud.revision[:7])
|
||||
depth = ud.shallow_depths[ud.name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
for name, revision in sorted(ud.revisions.items()):
|
||||
tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
|
||||
shallow_refs = []
|
||||
if not ud.nobranch:
|
||||
shallow_refs.append(ud.branch)
|
||||
shallow_refs.extend(ud.branches.values())
|
||||
if ud.shallow_extra_refs:
|
||||
shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
|
||||
if shallow_refs:
|
||||
@@ -309,26 +310,16 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) \
|
||||
or self.shallow_tarball_need_update(ud) \
|
||||
or self.tarball_need_update(ud) \
|
||||
or self.lfs_need_update(ud, d)
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
if ud.shallow and ud.write_shallow_tarballs and self.clonedir_need_shallow_revs(ud, d):
|
||||
return True
|
||||
if not self._contains_ref(ud, d, ud.name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
if not self._lfs_objects_downloaded(ud, d, ud.name, ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
@@ -350,16 +341,6 @@ class Git(FetchMethod):
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
return True
|
||||
# If the url is not in trusted network, that is, BB_NO_NETWORK is set to 0
|
||||
# and BB_ALLOWED_NETWORKS does not contain the host that ud.url uses, then
|
||||
# we need to try premirrors first as using upstream is destined to fail.
|
||||
if not trusted_network(d, ud.url):
|
||||
return True
|
||||
# the following check is to ensure incremental fetch in downloads, this is
|
||||
# because the premirror might be old and does not contain the new rev required,
|
||||
# and this will cause a total removal and new clone. So if we can reach to
|
||||
# network, we prefer upstream over premirror, though the premirror might contain
|
||||
# the new rev.
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
return True
|
||||
@@ -380,40 +361,12 @@ class Git(FetchMethod):
|
||||
else:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir)
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if 'mirror' in output:
|
||||
runfetchcmd("%s remote rm mirror" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s remote add --mirror=fetch mirror %s" % (ud.basecmd, tmpdir), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --update-head-ok --progress mirror " % (ud.basecmd)
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s " % (ud.basecmd, shlex.quote(tmpdir))
|
||||
runfetchcmd(fetch_cmd, d, workdir=ud.clonedir)
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
needs_clone = False
|
||||
if os.path.exists(ud.clonedir):
|
||||
# The directory may exist, but not be the top level of a bare git
|
||||
# repository in which case it needs to be deleted and re-cloned.
|
||||
try:
|
||||
# Since clones can be bare, use --absolute-git-dir instead of --show-toplevel
|
||||
output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
toplevel = output.rstrip()
|
||||
|
||||
if not bb.utils.path_is_descendant(toplevel, ud.clonedir):
|
||||
logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir)
|
||||
needs_clone = True
|
||||
except bb.fetch2.FetchError as e:
|
||||
logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e)
|
||||
needs_clone = True
|
||||
except FileNotFoundError as e:
|
||||
logger.warning("%s", e)
|
||||
needs_clone = True
|
||||
|
||||
if needs_clone:
|
||||
shutil.rmtree(ud.clonedir)
|
||||
else:
|
||||
needs_clone = True
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if needs_clone:
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible,
|
||||
# but it doesn't work when git/objects is a symlink, only works when it is a directory.
|
||||
if repourl.startswith("file://"):
|
||||
@@ -425,24 +378,6 @@ class Git(FetchMethod):
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
|
||||
# Try creating a fast initial shallow clone
|
||||
# Enabling ud.shallow_skip_fast will skip this
|
||||
# If the Git error "Server does not allow request for unadvertised object"
|
||||
# occurs, shallow_skip_fast is enabled automatically.
|
||||
# This may happen if the Git server does not allow the request
|
||||
# or if the Git client has issues with this functionality.
|
||||
if ud.shallow and not ud.shallow_skip_fast:
|
||||
try:
|
||||
self.clone_shallow_with_tarball(ud, d)
|
||||
# When the shallow clone has succeeded, use the shallow tarball
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
except:
|
||||
logger.warning("Creating fast initial shallow clone failed, try initial regular clone now.")
|
||||
|
||||
# When skipping fast initial shallow or the fast inital shallow clone failed:
|
||||
# Try again with an initial regular clone
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
|
||||
# Update the checkout if needed
|
||||
@@ -470,15 +405,16 @@ class Git(FetchMethod):
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
if not self._contains_ref(ud, d, ud.name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revision, ud.branch))
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
missing_rev = self.clonedir_need_shallow_revs(ud, d)
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self.lfs_need_update(ud, d):
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
#
|
||||
@@ -501,170 +437,106 @@ class Git(FetchMethod):
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(ud.destdir, ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/.git" % ud.destdir)
|
||||
|
||||
def lfs_fetch(self, ud, d, clonedir, revision, fetchall=False, progresshandler=None):
|
||||
"""Helper method for fetching Git LFS data"""
|
||||
try:
|
||||
if self._need_lfs(ud) and self._contains_lfs(ud, d, clonedir) and self._find_git_lfs(d) and len(revision):
|
||||
# Using worktree with the revision because .lfsconfig may exists
|
||||
worktree_add_cmd = "%s worktree add wt %s" % (ud.basecmd, revision)
|
||||
runfetchcmd(worktree_add_cmd, d, log=progresshandler, workdir=clonedir)
|
||||
lfs_fetch_cmd = "%s lfs fetch %s" % (ud.basecmd, "--all" if fetchall else "")
|
||||
runfetchcmd(lfs_fetch_cmd, d, log=progresshandler, workdir=(clonedir + "/wt"))
|
||||
worktree_rem_cmd = "%s worktree remove -f wt" % ud.basecmd
|
||||
runfetchcmd(worktree_rem_cmd, d, log=progresshandler, workdir=clonedir)
|
||||
except:
|
||||
logger.warning("Fetching LFS did not succeed.")
|
||||
|
||||
@contextmanager
|
||||
def create_atomic(self, filename):
|
||||
"""Create as a temp file and move atomically into position to avoid races"""
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
# Create as a temp file and move atomically into position to avoid races
|
||||
@contextmanager
|
||||
def create_atomic(filename):
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
os.unlink(ud.fullshallow)
|
||||
self.clone_shallow_with_tarball(ud, d)
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with self.create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d,
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_with_tarball(self, ud, d):
|
||||
ret = False
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
except:
|
||||
logger.warning("Fash shallow clone failed, try to skip fast mode now.")
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
os.mkdir(tempdir)
|
||||
ud.shallow_skip_fast = True
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
logger.info("Creating tarball of git repository")
|
||||
with self.create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
ret = True
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
|
||||
return ret
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""
|
||||
Shallow fetch from ud.clonedir (${DL_DIR}/git2/<gitrepo> by default):
|
||||
- For BB_GIT_SHALLOW_DEPTH: git fetch --depth <depth> rev
|
||||
- For BB_GIT_SHALLOW_REVS: git fetch --shallow-exclude=<revs> rev
|
||||
"""
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
progresshandler = GitProgressHandler(d)
|
||||
repourl = self._get_repo_url(ud)
|
||||
bb.utils.mkdirhier(dest)
|
||||
init_cmd = "%s init -q" % ud.basecmd
|
||||
if ud.bareclone:
|
||||
init_cmd += " --bare"
|
||||
runfetchcmd(init_cmd, d, workdir=dest)
|
||||
# Use repourl when creating a fast initial shallow clone
|
||||
# Prefer already existing full bare clones if available
|
||||
if not ud.shallow_skip_fast and not os.path.exists(ud.clonedir):
|
||||
remote = shlex.quote(repourl)
|
||||
else:
|
||||
remote = ud.clonedir
|
||||
runfetchcmd("%s remote add origin %s" % (ud.basecmd, remote), d, workdir=dest)
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
# Check the histories which should be excluded
|
||||
shallow_exclude = ''
|
||||
for revision in ud.shallow_revs:
|
||||
shallow_exclude += " --shallow-exclude=%s" % revision
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
revision = ud.revision
|
||||
depth = ud.shallow_depths[ud.name]
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branches[name]
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
# The --depth and --shallow-exclude can't be used together
|
||||
if depth and shallow_exclude:
|
||||
raise bb.fetch2.FetchError("BB_GIT_SHALLOW_REVS is set, but BB_GIT_SHALLOW_DEPTH is not 0.")
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branch
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % ud.name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
fetch_cmd = "%s fetch origin %s" % (ud.basecmd, revision)
|
||||
if depth:
|
||||
fetch_cmd += " --depth %s" % depth
|
||||
|
||||
if shallow_exclude:
|
||||
fetch_cmd += shallow_exclude
|
||||
|
||||
# Advertise the revision for lower version git such as 2.25.1:
|
||||
# error: Server does not allow request for unadvertised object.
|
||||
# The ud.clonedir is a local temporary dir, will be removed when
|
||||
# fetch is done, so we can do anything on it.
|
||||
adv_cmd = 'git branch -f advertise-%s %s' % (revision, revision)
|
||||
if ud.shallow_skip_fast:
|
||||
runfetchcmd(adv_cmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd(fetch_cmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
# Fetch Git LFS data for fast shallow clones
|
||||
if not ud.shallow_skip_fast:
|
||||
self.lfs_fetch(ud, d, dest, ud.revision)
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs_remote = runfetchcmd("%s ls-remote origin 'refs/*'" % ud.basecmd, \
|
||||
d, workdir=dest).splitlines()
|
||||
all_refs = []
|
||||
for line in all_refs_remote:
|
||||
all_refs.append(line.split()[-1])
|
||||
extra_refs = []
|
||||
if 'tag' in ud.parm:
|
||||
extra_refs.append(ud.parm['tag'])
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
extra_refs.extend(matches)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
extra_refs.append(r)
|
||||
shallow_branches.append(r)
|
||||
|
||||
for ref in extra_refs:
|
||||
ref_fetch = os.path.basename(ref)
|
||||
runfetchcmd("%s fetch origin --depth 1 %s" % (ud.basecmd, ref_fetch), d, workdir=dest)
|
||||
revision = runfetchcmd("%s rev-parse FETCH_HEAD" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# The url is local ud.clonedir, set it to upstream one
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=dest)
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
@@ -692,8 +564,6 @@ class Git(FetchMethod):
|
||||
destdir = ud.destdir = os.path.join(destdir, destsuffix)
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
if not ud.bareclone:
|
||||
ud.unpack_tracer.unpack("git", destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
|
||||
@@ -724,14 +594,6 @@ class Git(FetchMethod):
|
||||
if not source_found:
|
||||
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
|
||||
|
||||
# If there is a tag parameter in the url and we also have a fixed srcrev, check the tag
|
||||
# matches the revision
|
||||
if 'tag' in ud.parm and sha1_re.match(ud.revision):
|
||||
output = runfetchcmd("%s rev-list -n 1 %s" % (ud.basecmd, ud.parm['tag']), d, workdir=destdir)
|
||||
output = output.strip()
|
||||
if output != ud.revision:
|
||||
raise bb.fetch2.FetchError("The revision the git tag '%s' resolved to didn't match the SRCREV in use (%s vs %s)" % (ud.parm['tag'], output, ud.revision), ud.url)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=destdir)
|
||||
|
||||
@@ -740,22 +602,20 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)" % (repourl))
|
||||
elif not need_lfs:
|
||||
bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
|
||||
else:
|
||||
runfetchcmd("%s lfs install --local" % ud.basecmd, d, workdir=destdir)
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revision, readpathspec), d,
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d,
|
||||
workdir=destdir)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir)
|
||||
elif not ud.nobranch:
|
||||
branchname = ud.branch
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||
ud.revision), d, workdir=destdir)
|
||||
ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||
branchname), d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revision), d, workdir=destdir)
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
|
||||
return True
|
||||
|
||||
@@ -769,13 +629,8 @@ class Git(FetchMethod):
|
||||
clonedir = os.path.realpath(ud.localpath)
|
||||
to_remove.append(clonedir)
|
||||
|
||||
# Remove shallow mirror tarball
|
||||
if ud.shallow:
|
||||
to_remove.append(ud.fullshallow)
|
||||
to_remove.append(ud.fullshallow + ".done")
|
||||
|
||||
for r in to_remove:
|
||||
if os.path.exists(r) or os.path.islink(r):
|
||||
if os.path.exists(r):
|
||||
bb.note('Removing %s' % r)
|
||||
bb.utils.remove(r, True)
|
||||
|
||||
@@ -786,10 +641,10 @@ class Git(FetchMethod):
|
||||
cmd = ""
|
||||
if ud.nobranch:
|
||||
cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (
|
||||
ud.basecmd, ud.revision)
|
||||
ud.basecmd, ud.revisions[name])
|
||||
else:
|
||||
cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % (
|
||||
ud.basecmd, ud.revision, ud.branch)
|
||||
ud.basecmd, ud.revisions[name], ud.branches[name])
|
||||
try:
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
|
||||
except bb.fetch2.FetchError:
|
||||
@@ -798,35 +653,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _lfs_objects_downloaded(self, ud, d, name, wd):
|
||||
"""
|
||||
Verifies whether the LFS objects for requested revisions have already been downloaded
|
||||
"""
|
||||
# Bail out early if this repository doesn't use LFS
|
||||
if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
|
||||
return True
|
||||
|
||||
# The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revision)
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
return True
|
||||
|
||||
# Match all lines beginning with the hexadecimal OID
|
||||
oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
|
||||
for line in output.split("\n"):
|
||||
oid = re.search(oid_regex, line)
|
||||
if not oid:
|
||||
bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
|
||||
if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
@@ -835,14 +661,11 @@ class Git(FetchMethod):
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
"""
|
||||
|
||||
if ud.nobranch:
|
||||
# If no branch is specified, use the current git commit
|
||||
refname = self._build_revision(ud, d, ud.name)
|
||||
elif wd == ud.clonedir:
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
refname = ud.branch
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branch
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
|
||||
cmd = "%s grep lfs %s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, refname)
|
||||
@@ -859,6 +682,7 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Return True if git-lfs can be found, False otherwise.
|
||||
"""
|
||||
import shutil
|
||||
return shutil.which("git-lfs", path=d.getVar('PATH')) is not None
|
||||
|
||||
def _get_repo_url(self, ud):
|
||||
@@ -874,14 +698,14 @@ class Git(FetchMethod):
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
return "%s://%s%s%s" % (ud.proto, username, ud.host, urllib.parse.quote(ud.path))
|
||||
return "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev[name]
|
||||
|
||||
def _lsremote(self, ud, d, search):
|
||||
"""
|
||||
@@ -914,26 +738,26 @@ class Git(FetchMethod):
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSRCREV_SEEN"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev, ud.host+ud.path))
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.mark_recipe_nocache(d)
|
||||
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
if ud.unresolvedrev[:5] == "refs/" or ud.usehead:
|
||||
head = ud.unresolvedrev
|
||||
tag = ud.unresolvedrev
|
||||
if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead:
|
||||
head = ud.unresolvedrev[name]
|
||||
tag = ud.unresolvedrev[name]
|
||||
else:
|
||||
head = "refs/heads/%s" % ud.unresolvedrev
|
||||
tag = "refs/tags/%s" % ud.unresolvedrev
|
||||
head = "refs/heads/%s" % ud.unresolvedrev[name]
|
||||
tag = "refs/tags/%s" % ud.unresolvedrev[name]
|
||||
for s in [head, tag + "^{}", tag]:
|
||||
for l in output.strip().split('\n'):
|
||||
sha1, ref = l.split()
|
||||
if s == ref:
|
||||
return sha1
|
||||
raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \
|
||||
(ud.unresolvedrev, ud.host+ud.path))
|
||||
(ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
@@ -943,48 +767,44 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
return pupver
|
||||
|
||||
rev_tag_re = re.compile(r"([0-9a-f]{40})\s+refs/tags/(.*)")
|
||||
pver_re = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
nonrel_re = re.compile(r"(alpha|beta|rc|final)+")
|
||||
|
||||
verstring = ""
|
||||
revision = ""
|
||||
for line in output.split("\n"):
|
||||
if not line:
|
||||
break
|
||||
|
||||
m = rev_tag_re.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
(revision, tag) = m.groups()
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
if nonrel_re.search(tag):
|
||||
m = re.search(r"(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# search for version in the line
|
||||
m = pver_re.search(tag)
|
||||
if not m:
|
||||
tag = tagregex.search(tag_head)
|
||||
if tag is None:
|
||||
continue
|
||||
|
||||
pver = m.group('pver').replace("_", ".")
|
||||
tag = tag.group('pver')
|
||||
tag = tag.replace("_", ".")
|
||||
|
||||
if verstring and bb.utils.vercmp(("0", pver, ""), ("0", verstring, "")) < 0:
|
||||
if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
|
||||
continue
|
||||
|
||||
verstring = pver
|
||||
verstring = tag
|
||||
revision = line.split()[0]
|
||||
pupver = (verstring, revision)
|
||||
|
||||
return pupver
|
||||
|
||||
def _build_revision(self, ud, d, name):
|
||||
return ud.revision
|
||||
return ud.revisions[name]
|
||||
|
||||
def gitpkgv_revision(self, ud, d, name):
|
||||
"""
|
||||
@@ -998,8 +818,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -62,35 +62,36 @@ class GitSM(Git):
|
||||
return modules
|
||||
|
||||
# Collect the defined submodules, and their attributes
|
||||
try:
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revision), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# No submodules to update
|
||||
gitmodules = ""
|
||||
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
for name in ud.names:
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revision, md['path']), d, quiet=True, workdir=workdir)
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
# No submodules to update
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revision
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revisions[name]
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
# Translate the module url into a SRC_URI
|
||||
@@ -122,13 +123,6 @@ class GitSM(Git):
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % module
|
||||
url += ";nobranch=1"
|
||||
url += ";lfs=%s" % self._need_lfs(ud)
|
||||
# Note that adding "user=" here to give credentials to the
|
||||
# submodule is not supported. Since using SRC_URI to give git://
|
||||
# URL a password is not supported, one have to use one of the
|
||||
# recommended way (eg. ~/.netrc or SSH config) which does specify
|
||||
# the user (See comment in git.py).
|
||||
# So, we will not take patches adding "user=" support here.
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
@@ -146,22 +140,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
# Unpack even when ud.clonedir is not available,
|
||||
# which may occur during a fast shallow clone
|
||||
unpack = extra_check or not os.path.exists(ud.clonedir)
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and unpack:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -179,7 +157,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -202,7 +188,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -216,10 +211,6 @@ class GitSM(Git):
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
# modpath is needed by unpack tracer to calculate submodule
|
||||
# checkout dir
|
||||
new_ud = newfetch.ud[url]
|
||||
new_ud.modpath = modpath
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
@@ -245,28 +236,13 @@ class GitSM(Git):
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# All submodules should already be downloaded and configured in the tree. This simply
|
||||
# sets up the configuration and checks out the files. The main project config should
|
||||
# remain unmodified, and no download from the internet should occur. As such, lfs smudge
|
||||
# should also be skipped as these files were already smudged in the fetch stage if lfs
|
||||
# was enabled.
|
||||
runfetchcmd("GIT_LFS_SKIP_SMUDGE=1 %s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
def clean(self, ud, d):
|
||||
def clean_submodule(ud, url, module, modpath, workdir, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.clean()
|
||||
except Exception as e:
|
||||
logger.warning('gitsm: submodule clean failed: %s %s' % (type(e).__name__, str(e)))
|
||||
|
||||
self.call_process_submodules(ud, d, True, clean_submodule)
|
||||
|
||||
# Clean top git dir
|
||||
Git.clean(self, ud, d)
|
||||
# All submodules should already be downloaded and configured in the tree. This simply sets
|
||||
# up the configuration and checks out the files. The main project config should remain
|
||||
# unmodified, and no download from the internet should occur.
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
def implicit_urldata(self, ud, d):
|
||||
import subprocess
|
||||
import shutil, subprocess, tempfile
|
||||
|
||||
urldata = []
|
||||
def add_submodule(ud, url, module, modpath, workdir, d):
|
||||
@@ -274,6 +250,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Go modules
|
||||
|
||||
The gomod/gomodgit fetchers are used to download Go modules to the module cache
|
||||
from a module proxy or directly from a version control repository.
|
||||
|
||||
Example SRC_URI:
|
||||
|
||||
SRC_URI += "gomod://golang.org/x/net;version=v0.9.0;sha256sum=..."
|
||||
SRC_URI += "gomodgit://golang.org/x/net;version=v0.9.0;repo=go.googlesource.com/net;srcrev=..."
|
||||
|
||||
Required SRC_URI parameters:
|
||||
|
||||
- version
|
||||
The version of the module.
|
||||
|
||||
Optional SRC_URI parameters:
|
||||
|
||||
- mod
|
||||
Fetch and unpack the go.mod file only instead of the complete module.
|
||||
The go command may need to download go.mod files for many different modules
|
||||
when computing the build list, and go.mod files are much smaller than
|
||||
module zip files.
|
||||
The default is "0", set mod=1 for the go.mod file only.
|
||||
|
||||
- sha256sum
|
||||
The checksum of the module zip file, or the go.mod file in case of fetching
|
||||
only the go.mod file. Alternatively, set the SRC_URI varible flag for
|
||||
"module@version.sha256sum".
|
||||
|
||||
- protocol
|
||||
The method used when fetching directly from a version control repository.
|
||||
The default is "https" for git.
|
||||
|
||||
- repo
|
||||
The URL when fetching directly from a version control repository. Required
|
||||
when the URL is different from the module path.
|
||||
|
||||
- srcrev
|
||||
The revision identifier used when fetching directly from a version control
|
||||
repository. Alternatively, set the SRCREV varible for "module@version".
|
||||
|
||||
- subdir
|
||||
The module subdirectory when fetching directly from a version control
|
||||
repository. Required when the module is not located in the root of the
|
||||
repository.
|
||||
|
||||
Related variables:
|
||||
|
||||
- GO_MOD_PROXY
|
||||
The module proxy used by the fetcher.
|
||||
|
||||
- GO_MOD_CACHE_DIR
|
||||
The directory where the module cache is located.
|
||||
This must match the exported GOMODCACHE variable for the go command to find
|
||||
the downloaded modules.
|
||||
|
||||
See the Go modules reference, https://go.dev/ref/mod, for more information
|
||||
about the module cache, module proxies and version control systems.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import zipfile
|
||||
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import subprocess_setup
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
def escape(path):
|
||||
"""Escape capital letters using exclamation points."""
|
||||
return re.sub(r'([A-Z])', lambda m: '!' + m.group(1).lower(), path)
|
||||
|
||||
|
||||
class GoMod(Wget):
|
||||
"""Class to fetch Go modules from a Go module proxy via wget"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomod'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the module proxy.
|
||||
|
||||
Set up to download the module zip file to the module cache directory
|
||||
and unpack the go.mod file (unless downloading only the go.mod file):
|
||||
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
proxy = d.getVar('GO_MOD_PROXY') or 'proxy.golang.org'
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
version = ud.parm['version']
|
||||
|
||||
# Set URL and filename for wget download
|
||||
if ud.parm.get('mod', '0') == '1':
|
||||
ext = '.mod'
|
||||
else:
|
||||
ext = '.zip'
|
||||
path = escape(f"{module}/@v/{version}{ext}")
|
||||
ud.url = bb.fetch2.encodeurl(
|
||||
('https', proxy, '/' + path, None, None, None))
|
||||
ud.parm['downloadfilename'] = f"{module.replace('/', '.')}@{version}{ext}"
|
||||
|
||||
# Set name for checksum verification
|
||||
ud.parm['name'] = f"{module}@{version}"
|
||||
|
||||
# Set path for unpack
|
||||
ud.parm['unpackpath'] = os.path.join(moddir, 'cache/download', path)
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the module zip file or go.mod file
|
||||
unpackpath = os.path.join(rootdir, ud.parm['unpackpath'])
|
||||
unpackdir = os.path.dirname(unpackpath)
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
ud.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
cmd = f"cp {ud.localpath} {unpackpath}"
|
||||
path = d.getVar('PATH')
|
||||
if path:
|
||||
cmd = f"PATH={path} {cmd}"
|
||||
name = os.path.basename(unpackpath)
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
subprocess.check_call(cmd, shell=True, preexec_fn=subprocess_setup)
|
||||
|
||||
if name.endswith('.zip'):
|
||||
# Unpack the go.mod file from the zip file
|
||||
module = ud.parm['module']
|
||||
name = name.rsplit('.', 1)[0] + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(ud.localpath) as zf:
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
try:
|
||||
f = module + '@' + ud.parm['version'] + '/go.mod'
|
||||
shutil.copyfileobj(zf.open(f), mf)
|
||||
except KeyError:
|
||||
# If the module does not have a go.mod file, synthesize
|
||||
# one containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
|
||||
class GoModGit(Git):
|
||||
"""Class to fetch Go modules directly from a git repository"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomodgit'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the git repository.
|
||||
|
||||
Set up to download the git repository to the module cache directory and
|
||||
unpack the module zip file and the go.mod file:
|
||||
|
||||
cache/vcs/<hash>: The bare git repository.
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set host, path and srcrev for git download
|
||||
if 'repo' in ud.parm:
|
||||
repo = ud.parm['repo']
|
||||
idx = repo.find('/')
|
||||
if idx != -1:
|
||||
ud.host = repo[:idx]
|
||||
ud.path = repo[idx:]
|
||||
else:
|
||||
ud.host = repo
|
||||
ud.path = ''
|
||||
if 'protocol' not in ud.parm:
|
||||
ud.parm['protocol'] = 'https'
|
||||
ud.name = f"{module}@{ud.parm['version']}"
|
||||
srcrev = d.getVar('SRCREV_' + ud.name)
|
||||
if srcrev:
|
||||
if 'srcrev' not in ud.parm:
|
||||
ud.parm['srcrev'] = srcrev
|
||||
else:
|
||||
if 'srcrev' in ud.parm:
|
||||
d.setVar('SRCREV_' + ud.name, ud.parm['srcrev'])
|
||||
if 'branch' not in ud.parm:
|
||||
ud.parm['nobranch'] = '1'
|
||||
|
||||
# Set subpath, subdir and bareclone for git unpack
|
||||
if 'subdir' in ud.parm:
|
||||
ud.parm['subpath'] = ud.parm['subdir']
|
||||
key = f"git3:{ud.parm['protocol']}://{ud.host}{ud.path}".encode()
|
||||
ud.parm['key'] = key
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/vcs',
|
||||
hashlib.sha256(key).hexdigest())
|
||||
ud.parm['bareclone'] = '1'
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the bare git repository
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
# Create the info file
|
||||
module = ud.parm['module']
|
||||
repodir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
with open(repodir + '.info', 'wb') as f:
|
||||
f.write(ud.parm['key'])
|
||||
|
||||
# Unpack the go.mod file from the repository
|
||||
unpackdir = os.path.join(rootdir, moddir, 'cache/download',
|
||||
escape(module), '@v')
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
srcrev = ud.parm['srcrev']
|
||||
version = ud.parm['version']
|
||||
escaped_version = escape(version)
|
||||
cmd = f"git ls-tree -r --name-only '{srcrev}'"
|
||||
if 'subpath' in ud.parm:
|
||||
cmd += f" '{ud.parm['subpath']}'"
|
||||
files = runfetchcmd(cmd, d, workdir=repodir).split()
|
||||
name = escaped_version + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
f = 'go.mod'
|
||||
if 'subpath' in ud.parm:
|
||||
f = os.path.join(ud.parm['subpath'], f)
|
||||
if f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
subprocess.check_call(cmd, stdout=mf, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
else:
|
||||
# If the module does not have a go.mod file, synthesize one
|
||||
# containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
# Synthesize the module zip file from the repository
|
||||
name = escaped_version + '.zip'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(os.path.join(unpackdir, name), mode='w') as zf:
|
||||
prefix = module + '@' + version + '/'
|
||||
for f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
data = subprocess.check_output(cmd, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
zf.writestr(prefix + f, data)
|
||||
@@ -242,7 +242,6 @@ class Hg(FetchMethod):
|
||||
revflag = "-r %s" % ud.revision
|
||||
subdir = ud.parm.get("destsuffix", ud.module)
|
||||
codir = "%s/%s" % (destdir, subdir)
|
||||
ud.unpack_tracer.unpack("hg", codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
|
||||
@@ -29,10 +29,11 @@ class Local(FetchMethod):
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
# We don't set localfile as for this fetcher the file is already local!
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
ud.basepath = ud.path
|
||||
ud.decodedurl = urllib.parse.unquote(ud.url.split("://")[1].split(";")[0])
|
||||
ud.basename = os.path.basename(ud.decodedurl)
|
||||
ud.basepath = ud.decodedurl
|
||||
ud.needdonestamp = False
|
||||
if "*" in ud.path:
|
||||
if "*" in ud.decodedurl:
|
||||
raise bb.fetch2.ParameterError("file:// urls using globbing are no longer supported. Please place the files in a directory and reference that instead.", ud.url)
|
||||
return
|
||||
|
||||
@@ -40,23 +41,21 @@ class Local(FetchMethod):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
return self.localfile_searchpaths(urldata, d)[-1]
|
||||
return self.localpaths(urldata, d)[-1]
|
||||
|
||||
def localfile_searchpaths(self, urldata, d):
|
||||
def localpaths(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
searched = []
|
||||
path = urldata.path
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
logger.debug2("Using absolute %s" % (path))
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
logger.debug2("Using %s for %s" % (newpath, path))
|
||||
searched.extend(hist)
|
||||
return searched
|
||||
|
||||
|
||||
@@ -42,15 +42,11 @@ from bb.utils import is_semver
|
||||
|
||||
def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# For scoped package names ('@user/package') the '/' is replaced by a '-'.
|
||||
# This is similar to what 'npm pack' does, but 'npm pack' also strips the
|
||||
# leading '@', which can lead to ambiguous package names.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9@]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
if package.startswith("@"):
|
||||
return re.sub("/", "-", package[1:])
|
||||
return package
|
||||
|
||||
def npm_filename(package, version):
|
||||
"""Get the filename of a npm package"""
|
||||
@@ -91,12 +87,6 @@ class NpmEnvironment(object):
|
||||
self.d = d
|
||||
|
||||
self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1)
|
||||
|
||||
hn = self._home_npmrc(d)
|
||||
if hn is not None:
|
||||
with open(hn, 'r') as hnf:
|
||||
self.user_config.write(hnf.read())
|
||||
|
||||
for key, value in configs:
|
||||
self.user_config.write("%s=%s\n" % (key, value))
|
||||
|
||||
@@ -109,20 +99,10 @@ class NpmEnvironment(object):
|
||||
if self.user_config:
|
||||
self.user_config.close()
|
||||
|
||||
def _home_npmrc(self, d):
|
||||
"""Function to return user's HOME .npmrc file (or None if it doesn't exist)"""
|
||||
home_npmrc_file = os.path.join(os.environ.get("HOME"), ".npmrc")
|
||||
if d.getVar("BB_USE_HOME_NPMRC") == "1" and os.path.exists(home_npmrc_file):
|
||||
bb.warn(f"BB_USE_HOME_NPMRC flag set and valid .npmrc detected - "\
|
||||
f"npm fetcher will use {home_npmrc_file}")
|
||||
return home_npmrc_file
|
||||
return None
|
||||
|
||||
def run(self, cmd, args=None, configs=None, workdir=None):
|
||||
"""Run npm command in a controlled environment"""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("PATH", d.getVar("PATH")) # PATH might contain $HOME - evaluate it before patching
|
||||
d.setVar("HOME", tmpdir)
|
||||
|
||||
if not workdir:
|
||||
@@ -181,7 +161,7 @@ class Npm(FetchMethod):
|
||||
# Using the 'downloadfilename' parameter as local filename
|
||||
# or the npm package name.
|
||||
if "downloadfilename" in ud.parm:
|
||||
ud.localfile = npm_localfile(ud.parm["downloadfilename"])
|
||||
ud.localfile = npm_localfile(d.expand(ud.parm["downloadfilename"]))
|
||||
else:
|
||||
ud.localfile = npm_localfile(ud.package, ud.version)
|
||||
|
||||
@@ -314,7 +294,6 @@ class Npm(FetchMethod):
|
||||
destsuffix = ud.parm.get("destsuffix", "npm")
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
npm_unpack(ud.localpath, destdir, d)
|
||||
ud.unpack_tracer.unpack("npm", destdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
"""Clean any existing full or partial download"""
|
||||
|
||||
@@ -37,26 +37,24 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
"""
|
||||
Run a callback for each dependencies of a shrinkwrap file.
|
||||
The callback is using the format:
|
||||
callback(name, data, location)
|
||||
callback(name, params, deptree)
|
||||
with:
|
||||
name = the package name (string)
|
||||
data = the package data (dictionary)
|
||||
location = the location of the package (string)
|
||||
params = the package parameters (dictionary)
|
||||
deptree = the package dependency tree (array of strings)
|
||||
"""
|
||||
packages = shrinkwrap.get("packages")
|
||||
if not packages:
|
||||
raise FetchError("Invalid shrinkwrap file format")
|
||||
def _walk_deps(deps, deptree):
|
||||
for name in deps:
|
||||
subtree = [*deptree, name]
|
||||
_walk_deps(deps[name].get("dependencies", {}), subtree)
|
||||
if callback is not None:
|
||||
if deps[name].get("dev", False) and not dev:
|
||||
continue
|
||||
elif deps[name].get("bundled", False):
|
||||
continue
|
||||
callback(name, deps[name], subtree)
|
||||
|
||||
for location, data in packages.items():
|
||||
# Skip empty main and local link target packages
|
||||
if not location.startswith('node_modules/'):
|
||||
continue
|
||||
elif not dev and data.get("dev", False):
|
||||
continue
|
||||
elif data.get("inBundle", False):
|
||||
continue
|
||||
name = location.split('node_modules/')[-1]
|
||||
callback(name, data, location)
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
|
||||
class NpmShrinkWrap(FetchMethod):
|
||||
"""Class to fetch all package from a shrinkwrap file"""
|
||||
@@ -77,24 +75,20 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# Resolve the dependencies
|
||||
ud.deps = []
|
||||
|
||||
def _resolve_dependency(name, params, destsuffix):
|
||||
def _resolve_dependency(name, params, deptree):
|
||||
url = None
|
||||
localpath = None
|
||||
extrapaths = []
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity")
|
||||
resolved = params.get("resolved")
|
||||
version = params.get("version")
|
||||
link = params.get("link", False)
|
||||
|
||||
# Handle link sources
|
||||
if link:
|
||||
localpath = resolved
|
||||
unpack = False
|
||||
integrity = params.get("integrity", None)
|
||||
resolved = params.get("resolved", None)
|
||||
version = params.get("version", None)
|
||||
|
||||
# Handle registry sources
|
||||
elif version and is_semver(version) and integrity:
|
||||
if is_semver(version) and integrity:
|
||||
# Handle duplicate dependencies without url
|
||||
if not resolved:
|
||||
return
|
||||
@@ -122,10 +116,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
extrapaths.append(resolvefile)
|
||||
|
||||
# Handle http tarball sources
|
||||
elif resolved.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(resolved))
|
||||
elif version.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(version))
|
||||
|
||||
uri = URI(resolved)
|
||||
uri = URI(version)
|
||||
uri.params["downloadfilename"] = localfile
|
||||
|
||||
checksum_name, checksum_expected = npm_integrity(integrity)
|
||||
@@ -135,12 +129,28 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), localfile)
|
||||
|
||||
# Handle local tarball sources
|
||||
elif resolved.startswith("file"):
|
||||
localpath = resolved[5:]
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
# Handle git sources
|
||||
elif resolved.startswith("git"):
|
||||
elif version.startswith(("git", "bitbucket","gist")) or (
|
||||
not version.endswith((".tgz", ".tar", ".tar.gz"))
|
||||
and not version.startswith((".", "@", "/"))
|
||||
and "/" in version
|
||||
):
|
||||
if version.startswith("github:"):
|
||||
version = "git+https://github.com/" + version[len("github:"):]
|
||||
elif version.startswith("gist:"):
|
||||
version = "git+https://gist.github.com/" + version[len("gist:"):]
|
||||
elif version.startswith("bitbucket:"):
|
||||
version = "git+https://bitbucket.org/" + version[len("bitbucket:"):]
|
||||
elif version.startswith("gitlab:"):
|
||||
version = "git+https://gitlab.com/" + version[len("gitlab:"):]
|
||||
elif not version.startswith(("git+","git:")):
|
||||
version = "git+https://github.com/" + version
|
||||
regex = re.compile(r"""
|
||||
^
|
||||
git\+
|
||||
@@ -152,16 +162,16 @@ class NpmShrinkWrap(FetchMethod):
|
||||
$
|
||||
""", re.VERBOSE)
|
||||
|
||||
match = regex.match(resolved)
|
||||
match = regex.match(version)
|
||||
|
||||
if not match:
|
||||
raise ParameterError("Invalid git url: %s" % resolved, ud.url)
|
||||
raise ParameterError("Invalid git url: %s" % version, ud.url)
|
||||
|
||||
groups = match.groupdict()
|
||||
|
||||
uri = URI("git://" + str(groups["url"]))
|
||||
uri.params["protocol"] = str(groups["protocol"])
|
||||
uri.params["rev"] = str(groups["rev"])
|
||||
uri.params["nobranch"] = "1"
|
||||
uri.params["destsuffix"] = destsuffix
|
||||
|
||||
url = str(uri)
|
||||
@@ -169,9 +179,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
else:
|
||||
raise ParameterError("Unsupported dependency: %s" % name, ud.url)
|
||||
|
||||
# name is needed by unpack tracer for module mapping
|
||||
ud.deps.append({
|
||||
"name": name,
|
||||
"url": url,
|
||||
"localpath": localpath,
|
||||
"extrapaths": extrapaths,
|
||||
@@ -205,15 +213,13 @@ class NpmShrinkWrap(FetchMethod):
|
||||
@staticmethod
|
||||
def _foreach_proxy_method(ud, handle):
|
||||
returns = []
|
||||
#Check if there are dependencies before try to fetch them
|
||||
if len(ud.deps) > 0:
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
for proxy_url in ud.proxy.urls:
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
@@ -246,11 +252,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the downloaded dependencies"""
|
||||
destdir = rootdir
|
||||
destdir = d.getVar("S")
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
ud.unpack_tracer.unpack("npm-shrinkwrap", destdir)
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
bb.utils.copyfile(ud.shrinkwrap_file,
|
||||
|
||||
@@ -77,7 +77,7 @@ class S3(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ class SFTP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
@@ -73,7 +73,8 @@ class SSH(FetchMethod):
|
||||
path = m.group('path')
|
||||
path = urllib.parse.unquote(path)
|
||||
host = m.group('host')
|
||||
urldata.localfile = os.path.basename(os.path.normpath(path))
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
|
||||
def download(self, urldata, d):
|
||||
dldir = d.getVar('DL_DIR')
|
||||
|
||||
@@ -210,6 +210,3 @@ class Svn(FetchMethod):
|
||||
|
||||
def _build_revision(self, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
@@ -53,6 +53,11 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
|
||||
# CDNs like CloudFlare may do a 'browser integrity test' which can fail
|
||||
# with the standard wget/urllib User-Agent, so pretend to be a modern
|
||||
# browser.
|
||||
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
|
||||
|
||||
def check_certs(self, d):
|
||||
"""
|
||||
Should certificates be checked?
|
||||
@@ -78,14 +83,11 @@ class Wget(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
if not ud.localfile:
|
||||
ud.localfile = ud.host + ud.path.replace("/", ".")
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget --tries=2 --timeout=100"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
@@ -96,17 +98,16 @@ class Wget(FetchMethod):
|
||||
|
||||
logger.debug2("Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot --verbose', d, quiet, log=progresshandler, workdir=workdir)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " --output-document=%s" % shlex.quote(localpath)
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --auth-no-challenge"
|
||||
@@ -122,18 +123,14 @@ class Wget(FetchMethod):
|
||||
fetchcmd += " --user=%s --password=%s" % (ud.user, ud.pswd)
|
||||
|
||||
uri = ud.url.split(";")[0]
|
||||
fetchcmd += " --continue --directory-prefix=%s '%s'" % (dldir, uri)
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri)
|
||||
|
||||
if os.path.getsize(localpath) == 0:
|
||||
os.remove(localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
@@ -143,6 +140,15 @@ class Wget(FetchMethod):
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
@@ -234,12 +240,7 @@ class Wget(FetchMethod):
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
r = h.getresponse()
|
||||
except TimeoutError as e:
|
||||
if fetch.connection_cache:
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise TimeoutError(e)
|
||||
r = h.getresponse()
|
||||
|
||||
# Pick apart the HTTPResponse object to get the addinfourl
|
||||
# object initialized properly.
|
||||
@@ -300,45 +301,13 @@ class Wget(FetchMethod):
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
urllib2.HTTPRedirectHandler before 3.13 has two flaws:
|
||||
|
||||
It resets the method to GET on redirect when we want to follow
|
||||
redirects using the original method (typically HEAD). This was fixed
|
||||
in 759e8e7.
|
||||
|
||||
It also doesn't handle 308 (Permanent Redirect). This was fixed in
|
||||
c379bc5.
|
||||
|
||||
Until we depend on Python 3.13 onwards, copy the redirect_request
|
||||
method to fix these issues.
|
||||
urllib2.HTTPRedirectHandler resets the method to GET on redirect,
|
||||
when we want to follow redirects using the original method.
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
m = req.get_method()
|
||||
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
|
||||
or code in (301, 302, 303) and m == "POST")):
|
||||
raise urllib.HTTPError(req.full_url, code, msg, headers, fp)
|
||||
|
||||
# Strictly (according to RFC 2616), 301 or 302 in response to
|
||||
# a POST MUST NOT cause a redirection without confirmation
|
||||
# from the user (of urllib.request, in this case). In practice,
|
||||
# essentially all clients do redirect in this case, so we do
|
||||
# the same.
|
||||
|
||||
# Be conciliant with URIs containing a space. This is mainly
|
||||
# redundant with the more complete encoding done in http_error_302(),
|
||||
# but it is kept for compatibility with other callers.
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
|
||||
CONTENT_HEADERS = ("content-length", "content-type")
|
||||
newheaders = {k: v for k, v in req.headers.items()
|
||||
if k.lower() not in CONTENT_HEADERS}
|
||||
return urllib.request.Request(newurl,
|
||||
method="HEAD" if m == "HEAD" else "GET",
|
||||
headers=newheaders,
|
||||
origin_req_host=req.origin_req_host,
|
||||
unverifiable=True)
|
||||
|
||||
http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = req.get_method
|
||||
return newreq
|
||||
|
||||
# We need to update the environment here as both the proxy and HTTPS
|
||||
# handlers need variables set. The proxy needs http_proxy and friends to
|
||||
@@ -371,14 +340,14 @@ class Wget(FetchMethod):
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
try:
|
||||
parts = urllib.parse.urlparse(ud.url.split(";")[0])
|
||||
uri = "{}://{}{}".format(parts.scheme, parts.netloc, parts.path)
|
||||
uri_base = ud.url.split(";")[0]
|
||||
uri = "{}://{}{}".format(urllib.parse.urlparse(uri_base).scheme, ud.host, ud.path)
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", "bitbake/{}".format(bb.__version__))
|
||||
r.add_header("User-Agent", self.user_agent)
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
@@ -398,7 +367,7 @@ class Wget(FetchMethod):
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
if try_again:
|
||||
@@ -406,7 +375,7 @@ class Wget(FetchMethod):
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed for %s: %s" % (uri,e))
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -485,7 +454,7 @@ class Wget(FetchMethod):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " --output-document=%s '%s'" % (f.name, uri)
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
@@ -645,17 +614,13 @@ class Wget(FetchMethod):
|
||||
|
||||
sanity check to ensure same name and type.
|
||||
"""
|
||||
if 'downloadfilename' in ud.parm:
|
||||
package = ud.parm['downloadfilename']
|
||||
else:
|
||||
package = ud.path.split("/")[-1]
|
||||
package = ud.path.split("/")[-1]
|
||||
current_version = ['', d.getVar('PV'), '']
|
||||
|
||||
"""possible to have no version in pkg name, such as spectrum-fw"""
|
||||
if not re.search(r"\d+", package):
|
||||
current_version[1] = re.sub('_', '.', current_version[1])
|
||||
current_version[1] = re.sub('-', '.', current_version[1])
|
||||
bb.debug(3, "latest_versionstring: no version found in %s" % package)
|
||||
return (current_version[1], '')
|
||||
|
||||
package_regex = self._init_regexes(package, ud, d)
|
||||
|
||||
@@ -217,9 +217,7 @@ def create_bitbake_parser():
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means recursively compare the dumped signature with the most recent"
|
||||
" one in a local build or sstate cache (can be used to find out why tasks re-run"
|
||||
" when that is not expected)")
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
exec_group.add_argument("--revisions-changed", action="store_true",
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
|
||||
@@ -234,10 +234,9 @@ class diskMonitor:
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out.
|
||||
# This is reported by the inode count being 0 (btrfs) or the free
|
||||
# inode count being -1 (cephfs).
|
||||
if st.f_files == 0 or st.f_favail == -1:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
if st.f_files == 0:
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
|
||||
@@ -89,6 +89,10 @@ class BBLogFormatter(logging.Formatter):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
|
||||
def colorize(self, record):
|
||||
@@ -226,7 +230,7 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers
|
||||
console = logging.StreamHandler(output)
|
||||
console.addFilter(bb.msg.LogFilterShowOnce())
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty() and os.environ.get('NO_COLOR', '') == ''):
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
|
||||
@@ -49,32 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def check_mtime(f, mtime):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = current_mtime
|
||||
except OSError:
|
||||
current_mtime = 0
|
||||
return current_mtime == mtime
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
|
||||
@@ -43,21 +43,6 @@ class IncludeNode(AstNode):
|
||||
else:
|
||||
bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False)
|
||||
|
||||
class IncludeAllNode(AstNode):
|
||||
def __init__(self, filename, lineno, what_file):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.what_file = what_file
|
||||
|
||||
def eval(self, data):
|
||||
"""
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = data.expand(self.what_file)
|
||||
logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
for path in data.getVar("BBPATH").split(":"):
|
||||
bb.parse.ConfHandler.include(self.filename, os.path.join(path, s), self.lineno, data, False)
|
||||
|
||||
class ExportNode(AstNode):
|
||||
def __init__(self, filename, lineno, var):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -152,10 +137,7 @@ class DataNode(AstNode):
|
||||
|
||||
flag = None
|
||||
if 'flag' in groupd and groupd['flag'] is not None:
|
||||
if groupd["lazyques"]:
|
||||
flag = "_defaultval_flag_"+groupd['flag']
|
||||
else:
|
||||
flag = groupd['flag']
|
||||
flag = groupd['flag']
|
||||
elif groupd["lazyques"]:
|
||||
flag = "_defaultval"
|
||||
|
||||
@@ -229,12 +211,10 @@ class ExportFuncsNode(AstNode):
|
||||
|
||||
def eval(self, data):
|
||||
|
||||
sentinel = " # Export function set\n"
|
||||
for func in self.n:
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
basevar = data.getVar(func, False)
|
||||
if basevar and sentinel not in basevar:
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
continue
|
||||
|
||||
if data.getVar(func, False):
|
||||
@@ -251,23 +231,22 @@ class ExportFuncsNode(AstNode):
|
||||
data.setVarFlag(func, "lineno", 1)
|
||||
|
||||
if data.getVarFlag(calledfunc, "python", False):
|
||||
data.setVar(func, sentinel + " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
else:
|
||||
if "-" in self.classname:
|
||||
bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
data.setVar(func, " " + calledfunc + "\n", parsing=True)
|
||||
data.setVarFlag(func, 'export_func', '1')
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks, before, after):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.tasks = tasks
|
||||
self.func = func
|
||||
self.before = before
|
||||
self.after = after
|
||||
|
||||
def eval(self, data):
|
||||
tasks = self.tasks.split()
|
||||
for task in tasks:
|
||||
bb.build.addtask(task, self.before, self.after, data)
|
||||
bb.build.addtask(self.func, self.before, self.after, data)
|
||||
|
||||
class DelTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks):
|
||||
@@ -334,59 +313,9 @@ class InheritNode(AstNode):
|
||||
def eval(self, data):
|
||||
bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data)
|
||||
|
||||
class InheritDeferredNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.inherit = (classes, filename, lineno)
|
||||
|
||||
def eval(self, data):
|
||||
inherits = data.getVar('__BBDEFINHERITS', False) or []
|
||||
inherits.append(self.inherit)
|
||||
data.setVar('__BBDEFINHERITS', inherits)
|
||||
|
||||
class AddFragmentsNode(AstNode):
|
||||
def __init__(self, filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.fragments_path_prefix = fragments_path_prefix
|
||||
self.fragments_variable = fragments_variable
|
||||
self.flagged_variables_list_variable = flagged_variables_list_variable
|
||||
|
||||
def eval(self, data):
|
||||
# No need to use mark_dependency since we would only match a fragment
|
||||
# from a specific layer and there can only be a single layer with a
|
||||
# given namespace.
|
||||
def find_fragment(layers, layerid, full_fragment_name):
|
||||
for layerpath in layers.split():
|
||||
candidate_fragment_path = os.path.join(layerpath, full_fragment_name)
|
||||
if os.path.exists(candidate_fragment_path) and bb.utils.get_file_layer(candidate_fragment_path, data) == layerid:
|
||||
return candidate_fragment_path
|
||||
return None
|
||||
|
||||
fragments = data.getVar(self.fragments_variable)
|
||||
layers = data.getVar('BBLAYERS')
|
||||
flagged_variables = data.getVar(self.flagged_variables_list_variable).split()
|
||||
|
||||
if not fragments:
|
||||
return
|
||||
for f in fragments.split():
|
||||
layerid, fragment_name = f.split('/', 1)
|
||||
full_fragment_name = data.expand("{}/{}.conf".format(self.fragments_path_prefix, fragment_name))
|
||||
fragment_path = find_fragment(layers, layerid, full_fragment_name)
|
||||
if fragment_path:
|
||||
bb.parse.ConfHandler.include(self.filename, fragment_path, self.lineno, data, "include fragment")
|
||||
for flagged_var in flagged_variables:
|
||||
val = data.getVar(flagged_var)
|
||||
data.setVarFlag(flagged_var, f, val)
|
||||
data.setVar(flagged_var, None)
|
||||
else:
|
||||
bb.error("Could not find fragment {} in enabled layers: {}".format(f, layers))
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
def handleIncludeAll(statements, filename, lineno, m):
|
||||
statements.append(IncludeAllNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleExport(statements, filename, lineno, m):
|
||||
statements.append(ExportNode(filename, lineno, m.group(1)))
|
||||
|
||||
@@ -408,11 +337,21 @@ def handlePythonMethod(statements, filename, lineno, funcname, modulename, body)
|
||||
def handleExportFuncs(statements, filename, lineno, m, classname):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
|
||||
|
||||
def handleAddTask(statements, filename, lineno, tasks, before, after):
|
||||
statements.append(AddTaskNode(filename, lineno, tasks, before, after))
|
||||
def handleAddTask(statements, filename, lineno, m):
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
|
||||
def handleDelTask(statements, filename, lineno, tasks):
|
||||
statements.append(DelTaskNode(filename, lineno, tasks))
|
||||
statements.append(AddTaskNode(filename, lineno, func, before, after))
|
||||
|
||||
def handleDelTask(statements, filename, lineno, m):
|
||||
func = m.group(1)
|
||||
if func is None:
|
||||
return
|
||||
|
||||
statements.append(DelTaskNode(filename, lineno, func))
|
||||
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
@@ -424,46 +363,12 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def handleInheritDeferred(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritDeferredNode(filename, lineno, classes))
|
||||
|
||||
def handleAddFragments(statements, filename, lineno, m):
|
||||
fragments_path_prefix = m.group(1)
|
||||
fragments_variable = m.group(2)
|
||||
flagged_variables_list_variable = m.group(3)
|
||||
statements.append(AddFragmentsNode(filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
|
||||
# Handle recipe level PREFERRED_PROVIDERs
|
||||
def handleVirtRecipeProviders(tasklist, d):
|
||||
depends = (d.getVar("DEPENDS") or "").split()
|
||||
virtprovs = (d.getVar("BB_RECIPE_VIRTUAL_PROVIDERS") or "").split()
|
||||
newdeps = []
|
||||
for dep in depends:
|
||||
if dep in virtprovs:
|
||||
newdep = d.getVar("PREFERRED_PROVIDER_" + dep)
|
||||
if not newdep:
|
||||
bb.fatal("Error, recipe virtual provider PREFERRED_PROVIDER_%s not set" % dep)
|
||||
newdeps.append(newdep)
|
||||
else:
|
||||
newdeps.append(dep)
|
||||
d.setVar("DEPENDS", " ".join(newdeps))
|
||||
for task in tasklist:
|
||||
taskdeps = (d.getVarFlag(task, "depends") or "").split()
|
||||
remapped = []
|
||||
for entry in taskdeps:
|
||||
r, t = entry.split(":")
|
||||
if r in virtprovs:
|
||||
r = d.getVar("PREFERRED_PROVIDER_" + r)
|
||||
remapped.append("%s:%s" % (r, t))
|
||||
d.setVarFlag(task, "depends", " ".join(remapped))
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
@@ -489,7 +394,6 @@ def finalize(fn, d, variant = None):
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
handleVirtRecipeProviders(tasklist, d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
@@ -525,14 +429,6 @@ def multi_finalize(fn, d):
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
while True:
|
||||
inherits = d.getVar('__BBDEFINHERITS', False) or []
|
||||
if not inherits:
|
||||
break
|
||||
inherit, filename, lineno = inherits.pop(0)
|
||||
d.setVar('__BBDEFINHERITS', inherits)
|
||||
bb.parse.BBHandler.inherit(inherit, filename, lineno, d, deferred=True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
safe_d = d
|
||||
|
||||
@@ -21,10 +21,9 @@ from .ConfHandler import include, init
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
|
||||
@@ -34,7 +33,6 @@ __infunc__ = []
|
||||
__inpython__ = False
|
||||
__body__ = []
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
@@ -42,10 +40,8 @@ def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
|
||||
def inherit(files, fn, lineno, d, deferred=False):
|
||||
def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
#if "${" in files and not deferred:
|
||||
# bb.warn("%s:%s has non deferred conditional inherit" % (fn, lineno))
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
classtype = d.getVar("__bbclasstype", False)
|
||||
@@ -81,7 +77,7 @@ def inherit(files, fn, lineno, d, deferred=False):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements, __residue__, __body__
|
||||
global cached_statements
|
||||
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
@@ -101,11 +97,6 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
|
||||
if __residue__:
|
||||
raise ParseError("Unparsed lines %s: %s" % (filename, str(__residue__)), filename, lineno)
|
||||
if __body__:
|
||||
raise ParseError("Unparsed lines from unclosed function %s: %s" % (filename, str(__body__)), filename, lineno)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
return statements
|
||||
@@ -239,38 +230,29 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
m = __addtask_regexp__.match(s)
|
||||
if m:
|
||||
after = ""
|
||||
before = ""
|
||||
if len(m.group().split()) == 2:
|
||||
# Check and warn for "addtask task1 task2"
|
||||
m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
|
||||
if m2 and m2.group('ignores'):
|
||||
logger.warning('addtask ignored: "%s"' % m2.group('ignores'))
|
||||
|
||||
# This code splits on 'before' and 'after' instead of on whitespace so we can defer
|
||||
# evaluation to as late as possible.
|
||||
tasks = m.group(1).split(" before ")[0].split(" after ")[0]
|
||||
|
||||
for exp in m.group(1).split(" before "):
|
||||
exp2 = exp.split(" after ")
|
||||
if len(exp2) > 1:
|
||||
after = after + " ".join(exp2[1:])
|
||||
|
||||
for exp in m.group(1).split(" after "):
|
||||
exp2 = exp.split(" before ")
|
||||
if len(exp2) > 1:
|
||||
before = before + " ".join(exp2[1:])
|
||||
|
||||
# Check and warn for having task with a keyword as part of task name
|
||||
# Check and warn for "addtask task1 before task2 before task3", the
|
||||
# similar to "after"
|
||||
taskexpression = s.split()
|
||||
for word in ('before', 'after'):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
|
||||
if tasks is not None:
|
||||
ast.handleAddTask(statements, fn, lineno, tasks, before, after)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __deltask_regexp__.match(s)
|
||||
if m:
|
||||
task = m.group(1)
|
||||
if task is not None:
|
||||
ast.handleDelTask(statements, fn, lineno, task)
|
||||
ast.handleDelTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addhandler_regexp__.match(s)
|
||||
@@ -283,11 +265,6 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
ast.handleInherit(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __inherit_def_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInheritDeferred(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
return ConfHandler.feeder(lineno, s, fn, statements, conffile=False)
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
@@ -20,8 +20,8 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]*?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@/]*)\])?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]*)\])?
|
||||
|
||||
\s* (
|
||||
(?P<colon>:=) |
|
||||
@@ -43,12 +43,10 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__includeall_regexp__ = re.compile( r"include_all\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]+)\]$" )
|
||||
__addpylib_regexp__ = re.compile(r"addpylib\s+(.+)\s+(.+)" )
|
||||
__addfragments_regexp__ = re.compile(r"addfragments\s+(.+)\s+(.+)\s+(.+)" )
|
||||
|
||||
def init(data):
|
||||
return
|
||||
@@ -166,8 +164,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
if groupd['var'] == "":
|
||||
raise ParseError("Empty variable name in assignment: '%s'" % s, fn, lineno);
|
||||
ast.handleData(statements, fn, lineno, groupd)
|
||||
return
|
||||
|
||||
@@ -181,11 +177,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handleInclude(statements, fn, lineno, m, True)
|
||||
return
|
||||
|
||||
m = __includeall_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleIncludeAll(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __export_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleExport(statements, fn, lineno, m)
|
||||
@@ -206,11 +197,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handlePyLib(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addfragments_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleAddFragments(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
raise ParseError("unparsed line: '%s'" % s, fn, lineno);
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
271
bitbake/lib/bb/persist_data.py
Normal file
271
bitbake/lib/bb/persist_data.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""BitBake Persistent Data Store
|
||||
|
||||
Used to store data in a central location such that other threads/tasks can
|
||||
access them at some future date. Acts as a convenience wrapper around sqlite,
|
||||
currently, providing a key/value store accessed by 'domain'.
|
||||
"""
|
||||
|
||||
# Copyright (C) 2007 Richard Purdie
|
||||
# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import sqlite3
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
|
||||
@functools.total_ordering
|
||||
class SQLTable(collections.abc.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
"""
|
||||
Decorator that restarts a function if a database locked sqlite
|
||||
exception occurs. If reconnect is True, the database connection
|
||||
will be closed and reopened each time a failure occurs
|
||||
"""
|
||||
def retry_wrapper(f):
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Reconnect if necessary
|
||||
if self.connection is None and reconnect:
|
||||
self.reconnect()
|
||||
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if count < 500 and ('is locked' in str(exc) or 'locking protocol' in str(exc)):
|
||||
count = count + 1
|
||||
if reconnect:
|
||||
self.reconnect()
|
||||
continue
|
||||
raise
|
||||
return wrap_func
|
||||
return retry_wrapper
|
||||
|
||||
@staticmethod
|
||||
def transaction(f):
|
||||
"""
|
||||
Decorator that starts a database transaction and creates a database
|
||||
cursor for performing queries. If no exception is thrown, the
|
||||
database results are committed. If an exception occurs, the database
|
||||
is rolled back. In all cases, the cursor is closed after the
|
||||
function ends.
|
||||
|
||||
Note that the cursor is passed as an extra argument to the function
|
||||
after `self` and before any of the normal arguments
|
||||
"""
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Context manager will COMMIT the database on success,
|
||||
# or ROLLBACK on an exception
|
||||
with self.connection:
|
||||
# Automatically close the cursor when done
|
||||
with contextlib.closing(self.connection.cursor()) as cursor:
|
||||
return f(self, cursor, *args, **kwargs)
|
||||
return wrap_func
|
||||
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cachefile, table):
|
||||
self.cachefile = cachefile
|
||||
self.table = table
|
||||
|
||||
self.connection = None
|
||||
self._execute_single("CREATE TABLE IF NOT EXISTS %s(key TEXT PRIMARY KEY NOT NULL, value TEXT);" % table)
|
||||
|
||||
@_Decorators.retry(reconnect=False)
|
||||
@_Decorators.transaction
|
||||
def _setup_database(self, cursor):
|
||||
cursor.execute("pragma synchronous = off;")
|
||||
# Enable WAL and keep the autocheckpoint length small (the default is
|
||||
# usually 1000). Persistent caches are usually read-mostly, so keeping
|
||||
# this short will keep readers running quickly
|
||||
cursor.execute("pragma journal_mode = WAL;")
|
||||
cursor.execute("pragma wal_autocheckpoint = 100;")
|
||||
|
||||
def reconnect(self):
|
||||
if self.connection is not None:
|
||||
self.connection.close()
|
||||
self.connection = sqlite3.connect(self.cachefile, timeout=5)
|
||||
self.connection.text_factory = str
|
||||
self._setup_database()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def _execute_single(self, cursor, *query):
|
||||
"""
|
||||
Executes a single query and discards the results. This correctly closes
|
||||
the database cursor when finished
|
||||
"""
|
||||
cursor.execute(*query)
|
||||
|
||||
@_Decorators.retry()
|
||||
def _row_iter(self, f, *query):
|
||||
"""
|
||||
Helper function that returns a row iterator. Each time __next__ is
|
||||
called on the iterator, the provided function is evaluated to determine
|
||||
the return value
|
||||
"""
|
||||
class CursorIter(object):
|
||||
def __init__(self, cursor):
|
||||
self.cursor = cursor
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
row = self.cursor.fetchone()
|
||||
if row is None:
|
||||
self.cursor.close()
|
||||
raise StopIteration
|
||||
return f(row)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, traceback):
|
||||
self.cursor.close()
|
||||
return False
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(*query)
|
||||
return CursorIter(cursor)
|
||||
except:
|
||||
cursor.close()
|
||||
|
||||
def __enter__(self):
|
||||
self.connection.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __getitem__(self, cursor, key):
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[1]
|
||||
raise KeyError(key)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __delitem__(self, cursor, key):
|
||||
if key not in self:
|
||||
raise KeyError(key)
|
||||
cursor.execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __setitem__(self, cursor, key, value):
|
||||
if not isinstance(key, str):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, str):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
# Ensure the entire transaction (including SELECT) executes under write lock
|
||||
cursor.execute("BEGIN EXCLUSIVE")
|
||||
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
cursor.execute("UPDATE %s SET value=? WHERE key=?;" % self.table, [value, key])
|
||||
else:
|
||||
cursor.execute("INSERT into %s(key, value) values (?, ?);" % self.table, [key, value])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __contains__(self, cursor, key):
|
||||
cursor.execute('SELECT * from %s where key=?;' % self.table, [key])
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __len__(self, cursor):
|
||||
cursor.execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
|
||||
def __iter__(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT key from %s;" % self.table)
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplementedError()
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
def get_by_pattern(self, pattern):
|
||||
return self._row_iter(lambda row: row[1], "SELECT * FROM %s WHERE key LIKE ?;" %
|
||||
self.table, [pattern])
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
|
||||
def itervalues(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT value FROM %s;" %
|
||||
self.table)
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def iteritems(self):
|
||||
return self._row_iter(lambda row: (row[0], row[1]), "SELECT * FROM %s;" %
|
||||
self.table)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def clear(self, cursor):
|
||||
cursor.execute("DELETE FROM %s;" % self.table)
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
|
||||
try:
|
||||
return SQLTable(cachefile, domain)
|
||||
except sqlite3.OperationalError:
|
||||
# Sqlite fails to open database when its path is too long.
|
||||
# After testing, 504 is the biggest path length that can be opened by
|
||||
# sqlite.
|
||||
# Note: This code is called before sanity.bbclass and its path length
|
||||
# check
|
||||
max_len = 504
|
||||
if len(cachefile) > max_len:
|
||||
logger.critical("The path of the cache file is too long "
|
||||
"({0} chars > {1}) to be opened by sqlite! "
|
||||
"Your cache file is \"{2}\"".format(
|
||||
len(cachefile),
|
||||
max_len,
|
||||
cachefile))
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,13 +38,9 @@ logger = logging.getLogger('BitBake')
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
|
||||
def currenttime():
|
||||
return datetime.datetime.now().strftime('%H:%M:%S.%f')
|
||||
|
||||
def serverlog(msg):
|
||||
print(str(os.getpid()) + " " + currenttime() + " " + msg)
|
||||
#Seems a flush here triggers filesytem sync like behaviour and long hangs in the server
|
||||
#sys.stdout.flush()
|
||||
print(str(os.getpid()) + " " + datetime.datetime.now().strftime('%H:%M:%S.%f') + " " + msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
#
|
||||
# When we have lockfile issues, try and find infomation about which process is
|
||||
@@ -293,9 +289,7 @@ class ProcessServer():
|
||||
continue
|
||||
try:
|
||||
serverlog("Running command %s" % command)
|
||||
reply = self.cooker.command.runCommand(command, self)
|
||||
serverlog("Sending reply %s" % repr(reply))
|
||||
self.command_channel_reply.send(reply)
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command, self))
|
||||
serverlog("Command Completed (socket: %s)" % os.path.exists(self.sockname))
|
||||
except Exception as e:
|
||||
stack = traceback.format_exc()
|
||||
@@ -321,22 +315,7 @@ class ProcessServer():
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
seendata = True
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
|
||||
nextsleep = 1.0
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
ready = select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
ready = []
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
if self.idle:
|
||||
self.idle.join()
|
||||
@@ -396,7 +375,7 @@ class ProcessServer():
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False)
|
||||
if not lock:
|
||||
newlockcontents = get_lock_contents(lockfile)
|
||||
if not newlockcontents[0].startswith([f"{os.getpid()}\n", f"{os.getpid()} "]):
|
||||
if not newlockcontents[0].startswith([os.getpid() + "\n", os.getpid() + " "]):
|
||||
# A new server was started, the lockfile contents changed, we can exit
|
||||
serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents))
|
||||
return
|
||||
@@ -417,31 +396,21 @@ class ProcessServer():
|
||||
serverlog("".join(msg))
|
||||
|
||||
def idle_thread(self):
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.idle_thread_internal)
|
||||
|
||||
prof.dump_stats("profile-mainloop.log")
|
||||
bb.utils.process_profilelog("profile-mainloop.log")
|
||||
serverlog("Raw profiling information saved to profile-mainloop.log and processed statistics to profile-mainloop.log.processed")
|
||||
else:
|
||||
self.idle_thread_internal()
|
||||
|
||||
def idle_thread_internal(self):
|
||||
def remove_idle_func(function):
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
del self._idlefuns[function]
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
while not self.quit:
|
||||
nextsleep = 1.0
|
||||
nextsleep = 0.1
|
||||
fds = []
|
||||
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
except Exception as exc:
|
||||
serverlog("Exception %s in inofify updates broke the idle_thread, exiting" % traceback.format_exc())
|
||||
self.quit = True
|
||||
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
items = list(self._idlefuns.items())
|
||||
|
||||
@@ -477,7 +446,7 @@ class ProcessServer():
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if items and bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
if bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
@@ -500,24 +469,43 @@ class ProcessServer():
|
||||
if nextsleep is not None:
|
||||
select.select(fds,[],[],nextsleep)[0]
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
if not fds:
|
||||
fds = []
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
self.connection = connection
|
||||
self.recv = recv
|
||||
|
||||
def runCommand(self, command):
|
||||
try:
|
||||
self.connection.send(command)
|
||||
except BrokenPipeError as e:
|
||||
raise BrokenPipeError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
logger.info("No reply from server in 30s (for command %s at %s)" % (command[0], currenttime()))
|
||||
logger.info("No reply from server in 30s")
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s at %s)" % currenttime())
|
||||
try:
|
||||
ret, exc = self.recv.get()
|
||||
except EOFError as e:
|
||||
raise EOFError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)")
|
||||
ret, exc = self.recv.get()
|
||||
# Should probably turn all exceptions in exc back into exceptions?
|
||||
# For now, at least handle BBHandledException
|
||||
if exc and ("BBHandledException" in exc or "SystemExit" in exc):
|
||||
@@ -632,7 +620,7 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile):
|
||||
|
||||
@@ -872,10 +860,11 @@ class ConnectionWriter(object):
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
|
||||
while len(process.signal_received) > 0:
|
||||
sig = process.signal_received.pop()
|
||||
process.handle_sig(sig, None)
|
||||
try:
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self._send(obj)
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@ from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
import bb
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
@@ -56,7 +54,7 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if interface[1] == -1:
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
@@ -89,12 +87,11 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands:
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
self.event_handle = None
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
@@ -103,8 +100,8 @@ class BitBakeXMLRPCServerCommands:
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if self.server.cooker.state in [bb.cooker.State.PARSING, bb.cooker.State.RUNNING]:
|
||||
return None, f"Cooker is busy: {self.server.cooker.state.name}"
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
@@ -15,7 +15,6 @@ import difflib
|
||||
import simplediff
|
||||
import json
|
||||
import types
|
||||
from contextlib import contextmanager
|
||||
import bb.compress.zstd
|
||||
from bb.checksum import FileChecksumCache
|
||||
from bb import runqueue
|
||||
@@ -25,24 +24,6 @@ import hashserv.client
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv')
|
||||
|
||||
#find_siginfo and find_siginfo_version are set by the metadata siggen
|
||||
# The minimum version of the find_siginfo function we need
|
||||
find_siginfo_minversion = 2
|
||||
|
||||
HASHSERV_ENVVARS = [
|
||||
"SSL_CERT_DIR",
|
||||
"SSL_CERT_FILE",
|
||||
"NO_PROXY",
|
||||
"HTTPS_PROXY",
|
||||
"HTTP_PROXY"
|
||||
]
|
||||
|
||||
def check_siggen_version(siggen):
|
||||
if not hasattr(siggen, "find_siginfo_version"):
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (no version found)")
|
||||
if siggen.find_siginfo_version < siggen.find_siginfo_minversion:
|
||||
bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (%s vs %s)" % (siggen.find_siginfo_version, siggen.find_siginfo_minversion))
|
||||
|
||||
class SetEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, set) or isinstance(obj, frozenset):
|
||||
@@ -111,18 +92,9 @@ class SignatureGenerator(object):
|
||||
if flag:
|
||||
self.datacaches[mc].stamp_extrainfo[mcfn][t] = flag
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
return None
|
||||
|
||||
def get_unihash(self, tid):
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
return unihash
|
||||
return self.taskhash[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
return {tid: self.get_unihash(tid) for tid in tids}
|
||||
|
||||
def prep_taskhash(self, tid, deps, dataCaches):
|
||||
return
|
||||
|
||||
@@ -201,17 +173,15 @@ class SignatureGenerator(object):
|
||||
def save_unitaskhashes(self):
|
||||
return
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
return
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
def exit(self):
|
||||
return
|
||||
|
||||
def build_pnid(mc, pn, taskname):
|
||||
if mc:
|
||||
return "mc:" + mc + ":" + pn + ":" + taskname
|
||||
return pn + ":" + taskname
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -286,6 +256,10 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.warn("Error during finalise of %s" % mcfn)
|
||||
raise
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[mcfn]:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
|
||||
basehashes = {}
|
||||
for task in taskdeps:
|
||||
basehashes[task] = self.basehash[mcfn + ":" + task]
|
||||
@@ -295,11 +269,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
d.setVar("__siggen_varvals", lookupcache)
|
||||
d.setVar("__siggen_taskdeps", taskdeps)
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#self.setup_datacache_from_datastore(mcfn, d)
|
||||
#for task in taskdeps:
|
||||
# self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False)
|
||||
|
||||
def setup_datacache_from_datastore(self, mcfn, d):
|
||||
super().setup_datacache_from_datastore(mcfn, d)
|
||||
|
||||
@@ -340,19 +309,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
recipename = dataCaches[mc].pkg_fn[mcfn]
|
||||
|
||||
self.tidtopn[tid] = recipename
|
||||
# save hashfn for deps into siginfo?
|
||||
for dep in deps:
|
||||
(depmc, _, deptask, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
dep_pn = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, dep_pn, dataCaches):
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
(depmc, _, _, depmcfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
depname = dataCaches[depmc].pkg_fn[depmcfn]
|
||||
if not self.rundep_check(mcfn, recipename, task, dep, depname, dataCaches):
|
||||
continue
|
||||
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
|
||||
dep_pnid = build_pnid(depmc, dep_pn, deptask)
|
||||
self.runtaskdeps[tid].append((dep_pnid, dep))
|
||||
self.runtaskdeps[tid].append(dep)
|
||||
|
||||
if task in dataCaches[mc].file_checksums[mcfn]:
|
||||
if self.checksum_cache:
|
||||
@@ -378,15 +343,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taints[tid] = taint
|
||||
logger.warning("%s is tainted from a forced run" % tid)
|
||||
|
||||
return set(dep for _, dep in self.runtaskdeps[tid])
|
||||
return
|
||||
|
||||
def get_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in sorted(self.runtaskdeps[tid]):
|
||||
data += self.get_unihash(dep[1])
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data += self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data += "./" + f.split("/./")[1]
|
||||
@@ -415,6 +380,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def save_unitaskhashes(self):
|
||||
self.unihash_cache.save(self.unitaskhashes)
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
self.unihash_cache.copyfile(targetdir)
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
tid = mcfn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
@@ -441,21 +409,21 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data['varvals'][task] = self.datacaches[mc].siggen_varvals[mcfn][task]
|
||||
for dep in self.datacaches[mc].siggen_taskdeps[mcfn][task]:
|
||||
if dep in self.basehash_ignore_vars:
|
||||
continue
|
||||
continue
|
||||
data['gendeps'][dep] = self.datacaches[mc].siggen_gendeps[mcfn][dep]
|
||||
data['varvals'][dep] = self.datacaches[mc].siggen_varvals[mcfn][dep]
|
||||
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = [dep[0] for dep in sorted(self.runtaskdeps[tid])]
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
data['file_checksum_values'].append((os.path.basename(f), cs))
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data['runtaskhashes'][dep[0]] = self.get_unihash(dep[1])
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
data['taskhash'] = self.taskhash[tid]
|
||||
data['unihash'] = self.get_unihash(tid)
|
||||
|
||||
@@ -533,79 +501,32 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queried from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.env = {}
|
||||
|
||||
origenv = data.getVar("BB_ORIGENV")
|
||||
for e in HASHSERV_ENVVARS:
|
||||
value = data.getVar(e)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(e)
|
||||
if value:
|
||||
self.env[e] = value
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.username, self.password, self.env = data[:6]
|
||||
super().set_taskdata(data[6:])
|
||||
self.server, self.method, self.extramethod = data[:3]
|
||||
super().set_taskdata(data[3:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
return {
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
@contextmanager
|
||||
def _client_env(self):
|
||||
orig_env = os.environ.copy()
|
||||
try:
|
||||
for k, v in self.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
yield
|
||||
finally:
|
||||
for k, v in self.env.items():
|
||||
if k in orig_env:
|
||||
os.environ[k] = orig_env[k]
|
||||
else:
|
||||
del os.environ[k]
|
||||
|
||||
@contextmanager
|
||||
def client(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
if getattr(self, '_client', None) is None:
|
||||
self._client = hashserv.create_client(self.server)
|
||||
return self._client
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().reset(data)
|
||||
|
||||
def exit(self):
|
||||
self.__close_clients()
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
return super().exit()
|
||||
|
||||
def __close_clients(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client', None) is not None:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
if getattr(self, '_client_pool', None) is not None:
|
||||
self._client_pool.close()
|
||||
self._client_pool = None
|
||||
|
||||
def get_stampfile_hash(self, tid):
|
||||
if tid in self.taskhash:
|
||||
# If a unique hash is reported, use it as the stampfile hash. This
|
||||
@@ -637,7 +558,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
return None
|
||||
return unihash
|
||||
|
||||
def get_cached_unihash(self, tid):
|
||||
def get_unihash(self, tid):
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
# If its not a setscene task we can return
|
||||
@@ -652,96 +573,40 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
return None
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
unihash = taskhash
|
||||
|
||||
def _get_method(self, tid):
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
return method
|
||||
|
||||
def unihashes_exist(self, query):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
query_keys = []
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
query_keys.append(key)
|
||||
|
||||
if query_keys:
|
||||
with self.client() as client:
|
||||
query_result = client.unihash_exists_batch(query[k] for k in query_keys)
|
||||
|
||||
for idx, key in enumerate(query_keys):
|
||||
exists = query_result[idx]
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
|
||||
return result
|
||||
|
||||
def get_unihash(self, tid):
|
||||
return self.get_unihashes([tid])[tid]
|
||||
|
||||
def get_unihashes(self, tids):
|
||||
"""
|
||||
For a iterable of tids, returns a dictionary that maps each tid to a
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
query_tids = []
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
query_tids.append(tid)
|
||||
|
||||
if query_tids:
|
||||
unihashes = []
|
||||
try:
|
||||
with self.client() as client:
|
||||
unihashes = client.get_unihash_batch((self._get_method(tid), self.taskhash[tid]) for tid in query_tids)
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
for idx, tid in enumerate(query_tids):
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
# making it match the taskhash has a few advantages:
|
||||
#
|
||||
# 1) All of the sstate code that assumes hashes can be the same
|
||||
# 2) It provides maximal compatibility with builders that don't use
|
||||
# an equivalency server
|
||||
# 3) The value is easy for multiple independent builders to derive the
|
||||
# same unique hash from the same input. This means that if the
|
||||
# independent builders find the same taskhash, but it isn't reported
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
if unihashes and unihashes[idx]:
|
||||
unihash = unihashes[idx]
|
||||
try:
|
||||
method = self.method
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
data = self.client().get_unihash(method, self.taskhash[tid])
|
||||
if data:
|
||||
unihash = data
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.bbdebug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
|
||||
return result
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
return unihash
|
||||
|
||||
def report_unihash(self, path, task, d):
|
||||
import importlib
|
||||
@@ -805,9 +670,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash(taskhash, method, outhash, unihash, extra_data)
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
@@ -817,7 +680,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
if sigfile:
|
||||
@@ -838,9 +701,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if tid in self.extramethod:
|
||||
method = method + self.extramethod[tid]
|
||||
|
||||
with self.client() as client:
|
||||
data = client.report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
|
||||
data = self.client().report_unihash_equiv(taskhash, method, wanted_unihash, extra_data)
|
||||
hashequiv_logger.verbose('Reported task %s as unihash %s to %s (%s)' % (tid, wanted_unihash, self.server, str(data)))
|
||||
|
||||
if data is None:
|
||||
@@ -859,7 +720,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# TODO: What to do here?
|
||||
hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash))
|
||||
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
return False
|
||||
@@ -874,12 +735,6 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return os.path.basename(f)
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
mcfn = d.getVar("BB_FILENAME")
|
||||
@@ -938,6 +793,39 @@ def list_inline_diff(oldlist, newlist, colors=None):
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(basepath):
|
||||
basepath, dir, recipe_task = basepath.rsplit("/", 2)
|
||||
cleaned = dir + '/' + recipe_task
|
||||
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
mc_suffix = ''
|
||||
|
||||
# mc stuff now removed from basepath. Whatever was next, if present will be the first
|
||||
# suffix. ':/', recipe path start, marks the end of this. Something like
|
||||
# 'virtual:a[:b[:c]]:/path...' (b and c being optional)
|
||||
if basepath[0] != '/':
|
||||
cleaned += ':' + basepath.split(':/', 1)[0]
|
||||
|
||||
return cleaned + mc_suffix
|
||||
|
||||
def clean_basepaths(a):
|
||||
b = {}
|
||||
for x in a:
|
||||
b[clean_basepath(x)] = a[x]
|
||||
return b
|
||||
|
||||
def clean_basepaths_list(a):
|
||||
b = []
|
||||
for x in a:
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
# Handled renamed fields
|
||||
def handle_renames(data):
|
||||
if 'basewhitelist' in data:
|
||||
@@ -968,18 +856,10 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
try:
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (b, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
b_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
for data in [a_data, b_data]:
|
||||
handle_renames(data)
|
||||
@@ -1114,11 +994,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = a_data['runtaskdeps']
|
||||
clean_b = b_data['runtaskdeps']
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
@@ -1127,8 +1007,8 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
a = a_data['runtaskhashes']
|
||||
b = b_data['runtaskhashes']
|
||||
a = clean_basepaths(a_data['runtaskhashes'])
|
||||
b = clean_basepaths(b_data['runtaskhashes'])
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
@@ -1217,12 +1097,8 @@ def calc_taskhash(sigdata):
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
try:
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
except (TypeError, OSError) as err:
|
||||
bb.error("Failed to open sigdata file '%s': %s" % (a, str(err)))
|
||||
raise err
|
||||
with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f:
|
||||
a_data = json.load(f, object_hook=SetDecoder)
|
||||
|
||||
handle_renames(a_data)
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
def parseExpression(self, exp):
|
||||
parsedvar = self.d.expandWithRefs(exp, None)
|
||||
self.references = parsedvar.references
|
||||
self.execs = parsedvar.execs
|
||||
|
||||
def test_simple_reference(self):
|
||||
self.setEmptyVars(["FOO"])
|
||||
@@ -62,11 +61,6 @@ class VariableReferenceTest(ReferenceTest):
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
def test_python_exec_reference(self):
|
||||
self.parseExpression("${@eval('3 * 5')}")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -106,46 +100,6 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.parseExpression("foo=$(echo bar)")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_quotes(self):
|
||||
self.parseExpression('foo="$(echo bar)"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_nested(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2 bar$(func3))")"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_multiple(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2)") $(func3)"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o$(func1)"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_expansion_empty(self):
|
||||
self.parseExpression('foo="bar$()foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_assign_subshell_backticks(self):
|
||||
self.parseExpression("foo=`echo bar`")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_quotes(self):
|
||||
self.parseExpression('foo="`echo bar`"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_multiple(self):
|
||||
self.parseExpression('foo="`func1 bar` `func2`"')
|
||||
self.assertExecs(set(["func1", "func2"]))
|
||||
|
||||
def test_assign_subshell_backticks_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o`func1`"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_backticks_empty(self):
|
||||
self.parseExpression('foo="bar``foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_shell_unexpanded(self):
|
||||
self.setEmptyVars(["QT_BASE_NAME"])
|
||||
self.parseExpression('echo "${QT_BASE_NAME}"')
|
||||
@@ -476,37 +430,11 @@ esac
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
def test_contains_vardeps_override_operators(self):
|
||||
# Check override operators handle dependencies correctly with the contains functionality
|
||||
expr_plain = 'testval'
|
||||
expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
|
||||
expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
|
||||
expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr_plain)
|
||||
self.d.prependVar('ANOTHERVAR', expr_prepend)
|
||||
self.d.appendVar('ANOTHERVAR', expr_append)
|
||||
self.d.setVar('ANOTHERVAR:remove', expr_remove)
|
||||
self.d.setVar('TESTVAR1', 'blah')
|
||||
self.d.setVar('TESTVAR2', 'testval2')
|
||||
self.d.setVar('TESTVAR3', 'no-testval')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([
|
||||
expr_prepend + expr_plain + expr_append,
|
||||
'_remove of ' + expr_remove,
|
||||
'TESTVAR1{testval1} = Unset',
|
||||
'TESTVAR2{testval2} = Set',
|
||||
'TESTVAR3{no-testval} = Set',
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
# self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
|
||||
# self.d.setVarFlag("FOO", "vardeps", "oe_*")
|
||||
# self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
# self.assertEquals(deps, set(["oe_libinstall"]))
|
||||
|
||||
|
||||
|
||||
@@ -66,8 +66,8 @@ class CompressionTests(object):
|
||||
|
||||
class LZ4Tests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("lz4") is None:
|
||||
self.skipTest("'lz4' not found")
|
||||
if shutil.which("lz4c") is None:
|
||||
self.skipTest("'lz4c' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
@@ -77,18 +77,6 @@ class DataExpansions(unittest.TestCase):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_snippet_function_reference(self):
|
||||
self.d.setVar("TESTVAL", "testvalue")
|
||||
self.d.setVar("testfunc", 'd.getVar("TESTVAL")')
|
||||
context = bb.utils.get_context()
|
||||
context["testfunc"] = lambda d: d.getVar("TESTVAL")
|
||||
val = self.d.expand("${@testfunc(d)}")
|
||||
self.assertEqual(str(val), "testvalue")
|
||||
|
||||
def test_python_snippet_builtin_metadata(self):
|
||||
self.d.setVar("eval", "INVALID")
|
||||
self.d.expand("${@eval('3')}")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
@@ -395,16 +383,6 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override_2(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3 testvalue5")
|
||||
@@ -426,6 +404,16 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST:bar:append", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_append_and_unused_override(self):
|
||||
# Had a bug where an unused override append could return "" instead of None
|
||||
self.d.setVar("BAR:append:unusedoverride", "testvalue2")
|
||||
@@ -450,64 +438,17 @@ class TestFlags(unittest.TestCase):
|
||||
self.d = bb.data.init()
|
||||
self.d.setVar("foo", "value of foo")
|
||||
self.d.setVarFlag("foo", "flag1", "value of flag1")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flag1", "default of flag1")
|
||||
self.d.setVarFlag("foo", "flag2", "value of flag2")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flag2", "default of flag2")
|
||||
self.d.setVarFlag("foo", "flag3", "value of flag3")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flagnovalue", "default of flagnovalue")
|
||||
|
||||
def test_setflag(self):
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), "value of flag2")
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo"),
|
||||
{
|
||||
"flag1": "value of flag1",
|
||||
"flag2": "value of flag2",
|
||||
"flag3": "value of flag3",
|
||||
"flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo", internalflags=True),
|
||||
{
|
||||
"_content": "value of foo",
|
||||
"flag1": "value of flag1",
|
||||
"flag2": "value of flag2",
|
||||
"flag3": "value of flag3",
|
||||
"_defaultval_flag_flag1": "default of flag1",
|
||||
"_defaultval_flag_flag2": "default of flag2",
|
||||
"_defaultval_flag_flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
|
||||
def test_delflag(self):
|
||||
self.d.delVarFlag("foo", "flag2")
|
||||
self.d.delVarFlag("foo", "flag3")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo"),
|
||||
{
|
||||
"flag1": "value of flag1",
|
||||
"flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo", internalflags=True),
|
||||
{
|
||||
"_content": "value of foo",
|
||||
"flag1": "value of flag1",
|
||||
"_defaultval_flag_flag1": "default of flag1",
|
||||
"_defaultval_flag_flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
|
||||
def test_delvar(self):
|
||||
self.d.delVar("foo")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), None)
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
|
||||
self.assertEqual(self.d.getVarFlags("foo", internalflags=True), None)
|
||||
|
||||
class Contains(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
||||
@@ -13,7 +13,6 @@ import pickle
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import tempfile
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
|
||||
@@ -469,8 +468,6 @@ class EventClassesTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
@@ -953,24 +950,3 @@ class EventClassesTest(unittest.TestCase):
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_lineno_in_eventhandler(self):
|
||||
# The error lineno is 5, not 4 since the first line is '\n'
|
||||
error_line = """
|
||||
# Comment line1
|
||||
# Comment line2
|
||||
python test_lineno_in_eventhandler() {
|
||||
This is an error line
|
||||
}
|
||||
addhandler test_lineno_in_eventhandler
|
||||
test_lineno_in_eventhandler[eventmask] = "bb.event.ConfigParsed"
|
||||
"""
|
||||
|
||||
with self.assertLogs() as logs:
|
||||
f = tempfile.NamedTemporaryFile(suffix = '.bb')
|
||||
f.write(bytes(error_line, "utf-8"))
|
||||
f.flush()
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue(" line 5\n" in output)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -75,59 +75,6 @@ unset B[flag]
|
||||
self.assertEqual(d.getVarFlag("A","flag"), None)
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
|
||||
defaulttest = """
|
||||
A = "set value"
|
||||
A ??= "default value"
|
||||
|
||||
A[flag_set_vs_question] = "set flag"
|
||||
A[flag_set_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_vs_default] = "set flag"
|
||||
A[flag_set_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_question] ?= "question flag"
|
||||
|
||||
A[flag_default] ??= "default flag"
|
||||
|
||||
A[flag_question_vs_default] ?= "question flag"
|
||||
A[flag_question_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_default_vs_question] ??= "default flag"
|
||||
A[flag_default_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_question_default] = "set flag"
|
||||
A[flag_set_question_default] ?= "question flag"
|
||||
A[flag_set_question_default] ??= "default flag"
|
||||
|
||||
A[flag_set_default_question] = "set flag"
|
||||
A[flag_set_default_question] ??= "default flag"
|
||||
A[flag_set_default_question] ?= "question flag"
|
||||
|
||||
A[flag_set_twice] = "set flag first"
|
||||
A[flag_set_twice] = "set flag second"
|
||||
|
||||
A[flag_question_twice] ?= "question flag first"
|
||||
A[flag_question_twice] ?= "question flag second"
|
||||
|
||||
A[flag_default_twice] ??= "default flag first"
|
||||
A[flag_default_twice] ??= "default flag second"
|
||||
"""
|
||||
def test_parse_defaulttest(self):
|
||||
f = self.parsehelper(self.defaulttest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "set value")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default"), "default flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_vs_default"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_vs_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_question_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_default_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_twice"), "set flag second")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_twice"), "question flag first")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_twice"), "default flag second")
|
||||
|
||||
exporttest = """
|
||||
A = "a"
|
||||
export B = "b"
|
||||
@@ -230,19 +177,7 @@ python () {
|
||||
|
||||
addtask_deltask = """
|
||||
addtask do_patch after do_foo after do_unpack before do_configure before do_compile
|
||||
addtask do_fetch2 do_patch2
|
||||
|
||||
addtask do_myplaintask
|
||||
addtask do_myplaintask2
|
||||
deltask do_myplaintask2
|
||||
addtask do_mytask# comment
|
||||
addtask do_mytask2 # comment2
|
||||
addtask do_mytask3
|
||||
deltask do_mytask3# comment
|
||||
deltask do_mytask4 # comment2
|
||||
|
||||
# Ensure a missing task prefix on after works
|
||||
addtask do_mytask5 after mytask
|
||||
addtask do_fetch do_patch
|
||||
|
||||
MYVAR = "do_patch"
|
||||
EMPTYVAR = ""
|
||||
@@ -250,12 +185,15 @@ deltask do_fetch ${MYVAR} ${EMPTYVAR}
|
||||
deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
|
||||
import sys
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
self.assertSequenceEqual(['do_fetch2', 'do_patch2', 'do_myplaintask', 'do_mytask', 'do_mytask2', 'do_mytask5'], bb.build.listtasks(d))
|
||||
self.assertEqual(['do_mytask'], d.getVarFlag("do_mytask5", "deps"))
|
||||
stdout = sys.stdout.getvalue()
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in stdout)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in stdout)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
@@ -303,163 +241,3 @@ unset A[flag@.service]
|
||||
with self.assertRaises(bb.parse.ParseError):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
export_function_recipe = """
|
||||
inherit someclass
|
||||
"""
|
||||
|
||||
export_function_recipe2 = """
|
||||
inherit someclass
|
||||
|
||||
do_compile () {
|
||||
false
|
||||
}
|
||||
|
||||
python do_compilepython () {
|
||||
bb.note("Something else")
|
||||
}
|
||||
|
||||
"""
|
||||
export_function_class = """
|
||||
someclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python someclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
export_function_class2 = """
|
||||
secondclass_do_compile() {
|
||||
true
|
||||
}
|
||||
|
||||
python secondclass_do_compilepython () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
"""
|
||||
|
||||
def test_parse_export_functions(self):
|
||||
def check_function_flags(d):
|
||||
self.assertEqual(d.getVarFlag("do_compile", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "func"), 1)
|
||||
self.assertEqual(d.getVarFlag("do_compile", "python"), None)
|
||||
self.assertEqual(d.getVarFlag("do_compilepython", "python"), "1")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
self.d.setVar("__bbclasstype", "recipe")
|
||||
recipename = tempdir + "/recipe.bb"
|
||||
os.makedirs(tempdir + "/classes")
|
||||
with open(tempdir + "/classes/someclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class)
|
||||
f.flush()
|
||||
with open(tempdir + "/classes/secondclass.bbclass", "w") as f:
|
||||
f.write(self.export_function_class2)
|
||||
f.flush()
|
||||
|
||||
with open(recipename, "w") as f:
|
||||
f.write(self.export_function_recipe)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
recipename2 = tempdir + "/recipe2.bb"
|
||||
with open(recipename2, "w") as f:
|
||||
f.write(self.export_function_recipe2)
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
with open(recipename, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
with open(recipename2, "a+") as f:
|
||||
f.write("\ninherit secondclass\n")
|
||||
f.flush()
|
||||
|
||||
d = bb.parse.handle(recipename, bb.data.createCopy(self.d))['']
|
||||
self.assertIn("secondclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertIn("secondclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))['']
|
||||
self.assertNotIn("someclass_do_compile", d.getVar("do_compile"))
|
||||
self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython"))
|
||||
self.assertIn("false", d.getVar("do_compile"))
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
export_function_unclosed_tab = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
\t}
|
||||
"""
|
||||
export_function_unclosed_space = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
}
|
||||
"""
|
||||
export_function_residue = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
include \\
|
||||
"""
|
||||
|
||||
def test_unclosed_functions(self):
|
||||
def test_helper(content, expected_error):
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
recipename = tempdir + "/recipe_unclosed.bb"
|
||||
with open(recipename, "w") as f:
|
||||
f.write(content)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
with self.assertRaises(bb.parse.ParseError) as error:
|
||||
bb.parse.handle(recipename, bb.data.createCopy(self.d))
|
||||
self.assertIn(expected_error, str(error.exception))
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
test_helper(self.export_function_unclosed_tab, "Unparsed lines from unclosed function")
|
||||
test_helper(self.export_function_unclosed_space, "Unparsed lines from unclosed function")
|
||||
test_helper(self.export_function_residue, "Unparsed lines")
|
||||
|
||||
recipename_closed = tempdir + "/recipe_closed.bb"
|
||||
with open(recipename_closed, "w") as in_file:
|
||||
lines = self.export_function_unclosed_tab.split("\n")
|
||||
lines[3] = "}"
|
||||
in_file.write("\n".join(lines))
|
||||
in_file.flush()
|
||||
bb.parse.handle(recipename_closed, bb.data.createCopy(self.d))
|
||||
|
||||
special_character_assignment = """
|
||||
A+="a"
|
||||
A+ = "b"
|
||||
+ = "c"
|
||||
"""
|
||||
ambigous_assignment = """
|
||||
+= "d"
|
||||
"""
|
||||
def test_parse_special_character_assignment(self):
|
||||
f = self.parsehelper(self.special_character_assignment)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), " a")
|
||||
self.assertEqual(d.getVar("A+"), "b")
|
||||
self.assertEqual(d.getVar("+"), "c")
|
||||
|
||||
f = self.parsehelper(self.ambigous_assignment)
|
||||
with self.assertRaises(bb.parse.ParseError) as error:
|
||||
bb.parse.handle(f.name, self.d)
|
||||
self.assertIn("Empty variable name in assignment", str(error.exception))
|
||||
|
||||
129
bitbake/lib/bb/tests/persist_data.py
Normal file
129
bitbake/lib/bb/tests/persist_data.py
Normal file
@@ -0,0 +1,129 @@
|
||||
#
|
||||
# BitBake Test for lib/bb/persist_data/
|
||||
#
|
||||
# Copyright (C) 2018 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import unittest
|
||||
import bb.data
|
||||
import bb.persist_data
|
||||
import tempfile
|
||||
import threading
|
||||
|
||||
class PersistDataTest(unittest.TestCase):
|
||||
def _create_data(self):
|
||||
return bb.persist_data.persist('TEST_PERSIST_DATA', self.d)
|
||||
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
self.tempdir = tempfile.TemporaryDirectory()
|
||||
self.d['PERSISTENT_DIR'] = self.tempdir.name
|
||||
self.data = self._create_data()
|
||||
self.items = {
|
||||
'A1': '1',
|
||||
'B1': '2',
|
||||
'C2': '3'
|
||||
}
|
||||
self.stress_count = 10000
|
||||
self.thread_count = 5
|
||||
|
||||
for k,v in self.items.items():
|
||||
self.data[k] = v
|
||||
|
||||
def tearDown(self):
|
||||
self.tempdir.cleanup()
|
||||
|
||||
def _iter_helper(self, seen, iterator):
|
||||
with iter(iterator):
|
||||
for v in iterator:
|
||||
self.assertTrue(v in seen)
|
||||
seen.remove(v)
|
||||
self.assertEqual(len(seen), 0, '%s not seen' % seen)
|
||||
|
||||
def test_get(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v)
|
||||
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_set(self):
|
||||
for k, v in self.items.items():
|
||||
self.data[k] += '-foo'
|
||||
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + '-foo')
|
||||
|
||||
def test_delete(self):
|
||||
self.data['D'] = '4'
|
||||
self.assertEqual(self.data['D'], '4')
|
||||
del self.data['D']
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_contains(self):
|
||||
for k in self.items:
|
||||
self.assertTrue(k in self.data)
|
||||
self.assertTrue(self.data.has_key(k))
|
||||
self.assertFalse('NotFound' in self.data)
|
||||
self.assertFalse(self.data.has_key('NotFound'))
|
||||
|
||||
def test_len(self):
|
||||
self.assertEqual(len(self.data), len(self.items))
|
||||
|
||||
def test_iter(self):
|
||||
self._iter_helper(set(self.items.keys()), self.data)
|
||||
|
||||
def test_itervalues(self):
|
||||
self._iter_helper(set(self.items.values()), self.data.itervalues())
|
||||
|
||||
def test_iteritems(self):
|
||||
self._iter_helper(set(self.items.items()), self.data.iteritems())
|
||||
|
||||
def test_get_by_pattern(self):
|
||||
self._iter_helper({'1', '2'}, self.data.get_by_pattern('_1'))
|
||||
|
||||
def _stress_read(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k in self.items:
|
||||
data[k]
|
||||
|
||||
def _stress_write(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k, v in self.items.items():
|
||||
data[k] = v + str(i)
|
||||
|
||||
def _validate_stress(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + str(self.stress_count - 1))
|
||||
|
||||
def test_stress(self):
|
||||
self._stress_read(self.data)
|
||||
self._stress_write(self.data)
|
||||
self._validate_stress()
|
||||
|
||||
def test_stress_threads(self):
|
||||
def read_thread():
|
||||
data = self._create_data()
|
||||
self._stress_read(data)
|
||||
|
||||
def write_thread():
|
||||
data = self._create_data()
|
||||
self._stress_write(data)
|
||||
|
||||
threads = []
|
||||
for i in range(self.thread_count):
|
||||
threads.append(threading.Thread(target=read_thread))
|
||||
threads.append(threading.Thread(target=write_thread))
|
||||
|
||||
for t in threads:
|
||||
t.start()
|
||||
self._stress_read(self.data)
|
||||
for t in threads:
|
||||
t.join()
|
||||
self._validate_stress()
|
||||
|
||||
@@ -9,7 +9,7 @@ def stamptask(d):
|
||||
with open(stampname, "a+") as f:
|
||||
f.write(d.getVar("BB_UNIHASH") + "\n")
|
||||
|
||||
if d.getVar("BB_CURRENT_MC") != "":
|
||||
if d.getVar("BB_CURRENT_MC") != "default":
|
||||
thistask = d.expand("${BB_CURRENT_MC}:${PN}:${BB_CURRENTTASK}")
|
||||
if thistask in d.getVar("SLOWTASKS").split():
|
||||
bb.note("Slowing task %s" % thistask)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
do_build[mcdepends] = "mc::mc-1:h1:do_invalid"
|
||||
|
||||
@@ -26,7 +26,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
a1_sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
|
||||
b1_sstatevalid = "b1:do_package b1:do_package_qa b1:do_packagedata b1:do_package_write_ipk b1:do_package_write_rpm b1:do_populate_lic b1:do_populate_sysroot"
|
||||
|
||||
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False, allowfailure=False):
|
||||
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False):
|
||||
env = os.environ.copy()
|
||||
env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests"))
|
||||
env["BB_ENV_PASSTHROUGH_ADDITIONS"] = "SSTATEVALID SLOWTASKS TOPDIR"
|
||||
@@ -41,8 +41,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir)
|
||||
print(output)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if allowfailure:
|
||||
return e.output
|
||||
self.fail("Command %s failed with %s" % (cmd, e.output))
|
||||
tasks = []
|
||||
tasklog = builddir + "/task.log"
|
||||
@@ -316,13 +314,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
["mc_2:a1:%s" % t for t in rerun_tasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
# Check that a multiconfig that doesn't exist rasies a correct error message
|
||||
error_output = self.run_bitbakecmd(["bitbake", "g1"], tempdir, "", extraenv=extraenv, cleanup=True, allowfailure=True)
|
||||
self.assertIn("non-existent task", error_output)
|
||||
# If the word 'Traceback' or 'KeyError' is in the output we've regressed
|
||||
self.assertNotIn("Traceback", error_output)
|
||||
self.assertNotIn("KeyError", error_output)
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_hashserv_single(self):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user