mirror of
https://git.yoctoproject.org/poky
synced 2026-02-16 05:33:03 +01:00
Compare commits
94 Commits
yocto-3.4.
...
yocto-3.2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
943ef2fad8 | ||
|
|
76dac9d657 | ||
|
|
333f24caec | ||
|
|
e5bd9b93b4 | ||
|
|
a4ff9dd2dc | ||
|
|
2d3224bf20 | ||
|
|
e6f6420d98 | ||
|
|
f0b8b3a960 | ||
|
|
fef73fcd3a | ||
|
|
d12e2d67c9 | ||
|
|
eeb98ec6ae | ||
|
|
3f2bc0a2e1 | ||
|
|
cbd023e0db | ||
|
|
307146220b | ||
|
|
d754cd3a49 | ||
|
|
3d5309b736 | ||
|
|
369b6e0192 | ||
|
|
e03e489758 | ||
|
|
321e17803e | ||
|
|
086ed4af2a | ||
|
|
67ff1d9ffb | ||
|
|
8de9b33e14 | ||
|
|
afe59c8e1d | ||
|
|
f6434fde67 | ||
|
|
e46465c718 | ||
|
|
e4156f232b | ||
|
|
bfa254bd1a | ||
|
|
4315a12330 | ||
|
|
9b58e1d1a8 | ||
|
|
f4ff33fd11 | ||
|
|
f9f50c5638 | ||
|
|
23eef02eff | ||
|
|
bef1f4761e | ||
|
|
8b9bdf1d1e | ||
|
|
1a4b81a392 | ||
|
|
c111b692cc | ||
|
|
701e43727a | ||
|
|
dedca9ecb7 | ||
|
|
d890775c90 | ||
|
|
fd3e68b355 | ||
|
|
678eafa74d | ||
|
|
c2014927f2 | ||
|
|
c5b7872dab | ||
|
|
2691a54e91 | ||
|
|
e2de476001 | ||
|
|
45c8a7e583 | ||
|
|
4d2fd8ddd3 | ||
|
|
ea0af53e2a | ||
|
|
2d342da2a3 | ||
|
|
f1b304df93 | ||
|
|
b569f2a414 | ||
|
|
411f541288 | ||
|
|
83477f0280 | ||
|
|
7e7893983f | ||
|
|
e3a67d60cc | ||
|
|
23a0428069 | ||
|
|
b74901b816 | ||
|
|
010625f35a | ||
|
|
0647439a0a | ||
|
|
87a05c7316 | ||
|
|
5c33ee311c | ||
|
|
3ad92d4d09 | ||
|
|
5e5a7fd73d | ||
|
|
3269613984 | ||
|
|
b955cbdcfb | ||
|
|
58e47e1b70 | ||
|
|
bb0524e189 | ||
|
|
7d58c8bed6 | ||
|
|
5232b03e22 | ||
|
|
e2312cd887 | ||
|
|
f552970178 | ||
|
|
d59e28ea73 | ||
|
|
61642ef429 | ||
|
|
7f6f1519b9 | ||
|
|
528de6bc4f | ||
|
|
0ccf16fab3 | ||
|
|
4e513e2b86 | ||
|
|
1272d1b8fc | ||
|
|
686396e3dc | ||
|
|
2fa7fde32f | ||
|
|
72050b72e2 | ||
|
|
2fa97151cd | ||
|
|
e67a7af07c | ||
|
|
2306702899 | ||
|
|
f652c4d1b8 | ||
|
|
ca1ed50ab3 | ||
|
|
46db037b1f | ||
|
|
70761072f5 | ||
|
|
efa68c6490 | ||
|
|
3daa976efb | ||
|
|
4d35e4b168 | ||
|
|
dff89518bd | ||
|
|
cdae385f7d | ||
|
|
b7a7dde44a |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -30,5 +30,4 @@ hob-image-*.bb
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
@@ -1,72 +0,0 @@
|
||||
OpenEmbedded-Core and Yocto Project Maintainer Information
|
||||
==========================================================
|
||||
|
||||
OpenEmbedded and Yocto Project work jointly together to maintain the metadata,
|
||||
layers, tools and sub-projects that make up their ecosystems.
|
||||
|
||||
The projects operate through collaborative development. This currently takes
|
||||
place on mailing lists for many components as the "pull request on github"
|
||||
workflow works well for single or small numbers of maintainers but we have
|
||||
a large number, all with different specialisms and benefit from the mailing
|
||||
list review process. Changes therefore undergo peer review through mailing
|
||||
lists in many cases.
|
||||
|
||||
This file aims to acknowledge people with specific skills/knowledge/interest
|
||||
both to recognise their contributions but also empower them to help lead and
|
||||
curate those components. Where we have people with specialist knowledge in
|
||||
particular areas, during review patches/feedback from these people in these
|
||||
areas would generally carry weight.
|
||||
|
||||
This file is maintained in OE-Core but may refer to components that are separate
|
||||
to it if that makes sense in the context of maintainership. The README of specific
|
||||
layers and components should ultimately be definitive about the patch process and
|
||||
maintainership for the component.
|
||||
|
||||
Recipe Maintainers
|
||||
------------------
|
||||
|
||||
See meta/conf/distro/include/maintainers.inc
|
||||
|
||||
Component/Subsystem Maintainers
|
||||
-------------------------------
|
||||
|
||||
* Kernel (inc. linux-yocto, perf): Bruce Ashfield
|
||||
* Reproducible Builds: Joshua Watt
|
||||
* Toaster: David Reyna
|
||||
* Hash-Equivalence: Joshua Watt
|
||||
* Recipe upgrade infrastructure: Alex Kanavin
|
||||
* Toolchain: Khem Raj
|
||||
* ptest-runner: Aníbal Limón
|
||||
* opkg: Alex Stewart
|
||||
* devtool: Saul Wold
|
||||
* eSDK: Saul Wold
|
||||
* overlayfs: Vyacheslav Yurkov
|
||||
|
||||
Maintainers needed
|
||||
------------------
|
||||
|
||||
* Pseudo
|
||||
* Layer Index
|
||||
* recipetool
|
||||
* QA framework/automated testing
|
||||
* error reporting system/web UI
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Prelink-cross
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
|
||||
Layer Maintainers needed
|
||||
------------------------
|
||||
|
||||
* meta-gplv2 (ideally new strategy but active maintainer welcome)
|
||||
|
||||
Shadow maintainers/development needed
|
||||
--------------------------------------
|
||||
|
||||
* toaster
|
||||
* bitbake
|
||||
|
||||
|
||||
29
README.OE-Core
Normal file
29
README.OE-Core
Normal file
@@ -0,0 +1,29 @@
|
||||
OpenEmbedded-Core
|
||||
=================
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
http://lists.openembedded.org/mailman/listinfo/openembedded-core
|
||||
|
||||
Source code:
|
||||
|
||||
http://git.openembedded.org/openembedded-core/
|
||||
@@ -1,29 +0,0 @@
|
||||
OpenEmbedded-Core
|
||||
=================
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
https://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
https://docs.yoctoproject.org/
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/openembedded-core
|
||||
|
||||
Source code:
|
||||
|
||||
https://git.openembedded.org/openembedded-core/
|
||||
1
README.hardware
Symbolic link
1
README.hardware
Symbolic link
@@ -0,0 +1 @@
|
||||
meta-yocto-bsp/README.hardware
|
||||
@@ -1 +0,0 @@
|
||||
meta-yocto-bsp/README.hardware.md
|
||||
1
README.poky
Symbolic link
1
README.poky
Symbolic link
@@ -0,0 +1 @@
|
||||
meta-poky/README.poky
|
||||
@@ -1 +0,0 @@
|
||||
meta-poky/README.poky.md
|
||||
@@ -7,17 +7,17 @@ One of BitBake's main users, OpenEmbedded, takes this core and builds embedded L
|
||||
stacks using a task-oriented approach.
|
||||
|
||||
For information about Bitbake, see the OpenEmbedded website:
|
||||
https://www.openembedded.org/
|
||||
http://www.openembedded.org/
|
||||
|
||||
Bitbake plain documentation can be found under the doc directory or its integrated
|
||||
html version at the Yocto Project website:
|
||||
https://docs.yoctoproject.org
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
@@ -28,16 +28,8 @@ branch, type:
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/bitbake-devel
|
||||
http://lists.openembedded.org/mailman/listinfo/bitbake-devel
|
||||
|
||||
Source code:
|
||||
|
||||
https://git.openembedded.org/bitbake/
|
||||
|
||||
Testing:
|
||||
|
||||
Bitbake has a testsuite located in lib/bb/tests/ whichs aim to try and prevent regressions.
|
||||
You can run this with "bitbake-selftest". In particular the fetcher is well covered since
|
||||
it has so many corner cases. The datastore has many tests too. Testing with the testsuite is
|
||||
recommended before submitting patches, particularly to the fetcher and datastore. We also
|
||||
appreciate new test cases and may require them for more obscure issues.
|
||||
http://git.openembedded.org/bitbake/
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
|
||||
'lib'))
|
||||
@@ -28,7 +26,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "1.52.0"
|
||||
__version__ = "1.48.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
import argparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2021 Richard Purdie
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import argparse
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Bitbake Query Variable")
|
||||
parser.add_argument("variable", help="variable name to query")
|
||||
parser.add_argument("-r", "--recipe", help="Recipe name to query", default=None, required=False)
|
||||
parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.unexpand and not args.value:
|
||||
print("--unexpand only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
if args.flag and not args.value:
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
d = tinfoil.config_data
|
||||
if args.flag:
|
||||
print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
|
||||
elif args.value:
|
||||
print(str(d.getVar(args.variable, expand=(not args.unexpand))))
|
||||
else:
|
||||
bb.data.emit_var(args.variable, d=d, all=True)
|
||||
@@ -13,8 +13,6 @@ import pprint
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
import tqdm
|
||||
@@ -153,6 +151,9 @@ def main():
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
client = hashserv.create_client(args.address)
|
||||
# Try to establish a connection to the server now to detect failures
|
||||
# early
|
||||
client.connect()
|
||||
|
||||
return func(args, client)
|
||||
|
||||
|
||||
@@ -10,8 +10,6 @@ import sys
|
||||
import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
@@ -32,11 +30,9 @@ def main():
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
parser.add_argument('--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -51,7 +47,7 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server = hashserv.create_server(args.bind, args.database)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
import os
|
||||
import sys,logging
|
||||
import optparse
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
@@ -38,14 +36,12 @@ def main():
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default: 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
parser.add_option("-r", "--read-only", help="open database in read-only mode",
|
||||
action="store_true")
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if options.start:
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile), options.read_only)
|
||||
ret=prserv.serv.start_daemon(options.dbfile, options.host, options.port,os.path.abspath(options.logfile))
|
||||
elif options.stop:
|
||||
ret=prserv.serv.stop_daemon(options.host, options.port)
|
||||
else:
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
|
||||
import os
|
||||
import sys, logging
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
|
||||
|
||||
import unittest
|
||||
@@ -31,7 +29,6 @@ tests = ["bb.tests.codeparser",
|
||||
"bb.tests.runqueue",
|
||||
"bb.tests.siggen",
|
||||
"bb.tests.utils",
|
||||
"bb.tests.compression",
|
||||
"hashserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
@@ -27,10 +26,12 @@ readypipeinfd = int(sys.argv[3])
|
||||
logfile = sys.argv[4]
|
||||
lockname = sys.argv[5]
|
||||
sockname = sys.argv[6]
|
||||
timeout = float(sys.argv[7])
|
||||
timeout = sys.argv[7]
|
||||
xmlrpcinterface = (sys.argv[8], int(sys.argv[9]))
|
||||
if xmlrpcinterface[0] == "None":
|
||||
xmlrpcinterface = (None, xmlrpcinterface[1])
|
||||
if timeout == "None":
|
||||
timeout = None
|
||||
|
||||
# Replace standard fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
from bb import fetch2
|
||||
import logging
|
||||
@@ -17,8 +16,6 @@ import signal
|
||||
import pickle
|
||||
import traceback
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
@@ -121,9 +118,7 @@ def worker_child_fire(event, d):
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
try:
|
||||
worker_pipe_lock.acquire()
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe.write(data)
|
||||
worker_pipe_lock.release()
|
||||
except IOError:
|
||||
sigterm_handler(None, None)
|
||||
@@ -148,27 +143,21 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
envbackup = {}
|
||||
fakeroot = False
|
||||
fakeenv = {}
|
||||
umask = None
|
||||
|
||||
taskdep = workerdata["taskdeps"][fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
umask = taskdep['umask'][taskname]
|
||||
elif workerdata["umask"]:
|
||||
umask = workerdata["umask"]
|
||||
if umask:
|
||||
# umask might come in as a number or text string..
|
||||
try:
|
||||
umask = int(umask, 8)
|
||||
umask = int(taskdep['umask'][taskname],8)
|
||||
except TypeError:
|
||||
pass
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
@@ -178,7 +167,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
fakedirs = (workerdata["fakerootdirs"][fn] or "").split()
|
||||
for p in fakedirs:
|
||||
bb.utils.mkdirhier(p)
|
||||
logger.debug2('Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
logger.debug(2, 'Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (workerdata["fakerootnoenv"][fn] or "").split()
|
||||
@@ -287,13 +276,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
try:
|
||||
ret = bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
finally:
|
||||
if fakeroot:
|
||||
fakerootcmd = shlex.split(the_data.getVar("FAKEROOTCMD"))
|
||||
subprocess.run(fakerootcmd + ['-S'], check=True, stdout=subprocess.PIPE)
|
||||
return ret
|
||||
return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
except:
|
||||
os._exit(1)
|
||||
if not profiling:
|
||||
@@ -338,9 +321,7 @@ class runQueueWorkerPipe():
|
||||
end = len(self.queue)
|
||||
index = self.queue.find(b"</event>")
|
||||
while index != -1:
|
||||
msg = self.queue[:index+8]
|
||||
assert msg.startswith(b"<event>") and msg.count(b"<event>") == 1
|
||||
worker_fire_prepickled(msg)
|
||||
worker_fire_prepickled(self.queue[:index+8])
|
||||
self.queue = self.queue[index+8:]
|
||||
index = self.queue.find(b"</event>")
|
||||
return (end > start)
|
||||
@@ -417,11 +398,7 @@ class BitbakeWorker(object):
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
while index != -1:
|
||||
try:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
except pickle.UnpicklingError:
|
||||
workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue))
|
||||
raise
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
|
||||
@@ -528,11 +505,9 @@ except BaseException as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
sys.stderr.write(str(e))
|
||||
finally:
|
||||
worker_thread_exit = True
|
||||
worker_thread.join()
|
||||
|
||||
workerlog_write("exiting")
|
||||
if not normalexit:
|
||||
sys.exit(1)
|
||||
worker_thread_exit = True
|
||||
worker_thread.join()
|
||||
|
||||
workerlog_write("exitting")
|
||||
sys.exit(0)
|
||||
|
||||
@@ -16,8 +16,6 @@ import itertools
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
version = 1.0
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ import sys
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Copyright (c) 2021 Joshua Watt <JPEWhacker@gmail.com>
|
||||
#
|
||||
# Dockerfile to build a bitbake hash equivalence server container
|
||||
#
|
||||
# From the root of the bitbake repository, run:
|
||||
#
|
||||
# docker build -f contrib/hashserv/Dockerfile .
|
||||
#
|
||||
|
||||
FROM alpine:3.13.1
|
||||
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
|
||||
ENTRYPOINT ["/opt/bbhashserv/bin/bitbake-hashserv"]
|
||||
@@ -20,7 +20,7 @@ fun! NewBBAppendTemplate()
|
||||
set nopaste
|
||||
|
||||
" New bbappend template
|
||||
0 put ='FILESEXTRAPATHS:prepend := \"${THISDIR}/${PN}:\"'
|
||||
0 put ='FILESEXTRAPATHS_prepend := \"${THISDIR}/${PN}:\"'
|
||||
2
|
||||
|
||||
if paste == 1
|
||||
|
||||
@@ -51,9 +51,9 @@ syn region bbString matchgroup=bbQuote start=+'+ skip=+\\$+ end=+'+
|
||||
syn match bbExport "^export" nextgroup=bbIdentifier skipwhite
|
||||
syn keyword bbExportFlag export contained nextgroup=bbIdentifier skipwhite
|
||||
syn match bbIdentifier "[a-zA-Z0-9\-_\.\/\+]\+" display contained
|
||||
syn match bbVarDeref "${[a-zA-Z0-9\-_:\.\/\+]\+}" contained
|
||||
syn match bbVarDeref "${[a-zA-Z0-9\-_\.\/\+]\+}" contained
|
||||
syn match bbVarEq "\(:=\|+=\|=+\|\.=\|=\.\|?=\|??=\|=\)" contained nextgroup=bbVarValue
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.\/\+][${}a-zA-Z0-9\-_:\.\/\+]*\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|??=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbOverrideOperator,bbVarDeref nextgroup=bbVarEq
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.\/\+]\+\(_[${}a-zA-Z0-9\-_\.\/\+]\+\)\?\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|??=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbVarDeref nextgroup=bbVarEq
|
||||
syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn region bbVarPyValue start=+${@+ skip=+\\$+ end=+}+ contained contains=@python
|
||||
|
||||
@@ -77,15 +77,13 @@ syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_comp
|
||||
" Generic Functions
|
||||
syn match bbFunction "\h[0-9A-Za-z_\-\.]*" display contained contains=bbOEFunctions
|
||||
|
||||
syn keyword bbOverrideOperator append prepend remove contained
|
||||
|
||||
" BitBake shell metadata
|
||||
syn include @shell syntax/sh.vim
|
||||
if exists("b:current_syntax")
|
||||
unlet b:current_syntax
|
||||
endif
|
||||
syn keyword bbShFakeRootFlag fakeroot contained
|
||||
syn match bbShFuncDef "^\(fakeroot\s*\)\?\([\.0-9A-Za-z_:${}\-\.]\+\)\(python\)\@<!\(\s*()\s*\)\({\)\@=" contains=bbShFakeRootFlag,bbFunction,bbOverrideOperator,bbVarDeref,bbDelimiter nextgroup=bbShFuncRegion skipwhite
|
||||
syn match bbShFuncDef "^\(fakeroot\s*\)\?\([\.0-9A-Za-z_${}\-\.]\+\)\(python\)\@<!\(\s*()\s*\)\({\)\@=" contains=bbShFakeRootFlag,bbFunction,bbVarDeref,bbDelimiter nextgroup=bbShFuncRegion skipwhite
|
||||
syn region bbShFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" contained contains=@shell
|
||||
|
||||
" Python value inside shell functions
|
||||
@@ -93,7 +91,7 @@ syn region shDeref start=+${@+ skip=+\\$+ excludenl end=+}+ contained co
|
||||
|
||||
" BitBake python metadata
|
||||
syn keyword bbPyFlag python contained
|
||||
syn match bbPyFuncDef "^\(fakeroot\s*\)\?\(python\)\(\s\+[0-9A-Za-z_:${}\-\.]\+\)\?\(\s*()\s*\)\({\)\@=" contains=bbShFakeRootFlag,bbPyFlag,bbFunction,bbOverrideOperator,bbVarDeref,bbDelimiter nextgroup=bbPyFuncRegion skipwhite
|
||||
syn match bbPyFuncDef "^\(fakeroot\s*\)\?\(python\)\(\s\+[0-9A-Za-z_${}\-\.]\+\)\?\(\s*()\s*\)\({\)\@=" contains=bbShFakeRootFlag,bbPyFlag,bbFunction,bbVarDeref,bbDelimiter nextgroup=bbPyFuncRegion skipwhite
|
||||
syn region bbPyFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" contained contains=@python
|
||||
|
||||
" BitBake 'def'd python functions
|
||||
@@ -124,6 +122,5 @@ hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
hi def link bbOverrideOperator Operator
|
||||
|
||||
let b:current_syntax = "bb"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?= -W --keep-going -j auto
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
@@ -13,7 +13,7 @@ Folders exist for individual manuals as follows:
|
||||
Each folder is self-contained regarding content and figures.
|
||||
|
||||
If you want to find HTML versions of the BitBake manuals on the web,
|
||||
go to https://www.openembedded.org/wiki/Documentation.
|
||||
go to http://www.openembedded.org/wiki/Documentation.
|
||||
|
||||
Sphinx
|
||||
======
|
||||
|
||||
@@ -16,7 +16,7 @@ data, or simply return information about the execution environment.
|
||||
|
||||
This chapter describes BitBake's execution process from start to finish
|
||||
when you use it to create an image. The execution process is launched
|
||||
using the following command form::
|
||||
using the following command form: ::
|
||||
|
||||
$ bitbake target
|
||||
|
||||
@@ -32,7 +32,7 @@ the BitBake command and its options, see ":ref:`The BitBake Command
|
||||
your project's ``local.conf`` configuration file.
|
||||
|
||||
A common method to determine this value for your build host is to run
|
||||
the following::
|
||||
the following: ::
|
||||
|
||||
$ grep processor /proc/cpuinfo
|
||||
|
||||
@@ -40,7 +40,7 @@ the BitBake command and its options, see ":ref:`The BitBake Command
|
||||
the number of processors, which takes into account hyper-threading.
|
||||
Thus, a quad-core build host with hyper-threading most likely shows
|
||||
eight processors, which is the value you would then assign to
|
||||
:term:`BB_NUMBER_THREADS`.
|
||||
``BB_NUMBER_THREADS``.
|
||||
|
||||
A possibly simpler solution is that some Linux distributions (e.g.
|
||||
Debian and Ubuntu) provide the ``ncpus`` command.
|
||||
@@ -65,13 +65,13 @@ data itself is of various types:
|
||||
|
||||
The ``layer.conf`` files are used to construct key variables such as
|
||||
:term:`BBPATH` and :term:`BBFILES`.
|
||||
:term:`BBPATH` is used to search for configuration and class files under the
|
||||
``conf`` and ``classes`` directories, respectively. :term:`BBFILES` is used
|
||||
``BBPATH`` is used to search for configuration and class files under the
|
||||
``conf`` and ``classes`` directories, respectively. ``BBFILES`` is used
|
||||
to locate both recipe and recipe append files (``.bb`` and
|
||||
``.bbappend``). If there is no ``bblayers.conf`` file, it is assumed the
|
||||
user has set the :term:`BBPATH` and :term:`BBFILES` directly in the environment.
|
||||
user has set the ``BBPATH`` and ``BBFILES`` directly in the environment.
|
||||
|
||||
Next, the ``bitbake.conf`` file is located using the :term:`BBPATH` variable
|
||||
Next, the ``bitbake.conf`` file is located using the ``BBPATH`` variable
|
||||
that was just constructed. The ``bitbake.conf`` file may also include
|
||||
other configuration files using the ``include`` or ``require``
|
||||
directives.
|
||||
@@ -104,7 +104,7 @@ BitBake first searches the current working directory for an optional
|
||||
contain a :term:`BBLAYERS` variable that is a
|
||||
space-delimited list of 'layer' directories. Recall that if BitBake
|
||||
cannot find a ``bblayers.conf`` file, then it is assumed the user has
|
||||
set the :term:`BBPATH` and :term:`BBFILES` variables directly in the
|
||||
set the ``BBPATH`` and ``BBFILES`` variables directly in the
|
||||
environment.
|
||||
|
||||
For each directory (layer) in this list, a ``conf/layer.conf`` file is
|
||||
@@ -114,7 +114,7 @@ files automatically set up :term:`BBPATH` and other
|
||||
variables correctly for a given build directory.
|
||||
|
||||
BitBake then expects to find the ``conf/bitbake.conf`` file somewhere in
|
||||
the user-specified :term:`BBPATH`. That configuration file generally has
|
||||
the user-specified ``BBPATH``. That configuration file generally has
|
||||
include directives to pull in any other metadata such as files specific
|
||||
to the architecture, the machine, the local environment, and so forth.
|
||||
|
||||
@@ -135,11 +135,11 @@ The ``base.bbclass`` file is always included. Other classes that are
|
||||
specified in the configuration using the
|
||||
:term:`INHERIT` variable are also included. BitBake
|
||||
searches for class files in a ``classes`` subdirectory under the paths
|
||||
in :term:`BBPATH` in the same way as configuration files.
|
||||
in ``BBPATH`` in the same way as configuration files.
|
||||
|
||||
A good way to get an idea of the configuration files and the class files
|
||||
used in your execution environment is to run the following BitBake
|
||||
command::
|
||||
command: ::
|
||||
|
||||
$ bitbake -e > mybb.log
|
||||
|
||||
@@ -155,7 +155,7 @@ execution environment.
|
||||
pair of curly braces in a shell function, the closing curly brace
|
||||
must not be located at the start of the line without leading spaces.
|
||||
|
||||
Here is an example that causes BitBake to produce a parsing error::
|
||||
Here is an example that causes BitBake to produce a parsing error: ::
|
||||
|
||||
fakeroot create_shar() {
|
||||
cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
|
||||
@@ -184,13 +184,13 @@ Locating and Parsing Recipes
|
||||
During the configuration phase, BitBake will have set
|
||||
:term:`BBFILES`. BitBake now uses it to construct a
|
||||
list of recipes to parse, along with any append files (``.bbappend``) to
|
||||
apply. :term:`BBFILES` is a space-separated list of available files and
|
||||
supports wildcards. An example would be::
|
||||
apply. ``BBFILES`` is a space-separated list of available files and
|
||||
supports wildcards. An example would be: ::
|
||||
|
||||
BBFILES = "/path/to/bbfiles/*.bb /path/to/appends/*.bbappend"
|
||||
|
||||
BitBake parses each
|
||||
recipe and append file located with :term:`BBFILES` and stores the values of
|
||||
recipe and append file located with ``BBFILES`` and stores the values of
|
||||
various variables into the datastore.
|
||||
|
||||
.. note::
|
||||
@@ -201,18 +201,18 @@ For each file, a fresh copy of the base configuration is made, then the
|
||||
recipe is parsed line by line. Any inherit statements cause BitBake to
|
||||
find and then parse class files (``.bbclass``) using
|
||||
:term:`BBPATH` as the search path. Finally, BitBake
|
||||
parses in order any append files found in :term:`BBFILES`.
|
||||
parses in order any append files found in ``BBFILES``.
|
||||
|
||||
One common convention is to use the recipe filename to define pieces of
|
||||
metadata. For example, in ``bitbake.conf`` the recipe name and version
|
||||
are used to set the variables :term:`PN` and
|
||||
:term:`PV`::
|
||||
:term:`PV`: ::
|
||||
|
||||
PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
PV = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}"
|
||||
PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}"
|
||||
|
||||
In this example, a recipe called "something_1.2.3.bb" would set
|
||||
:term:`PN` to "something" and :term:`PV` to "1.2.3".
|
||||
``PN`` to "something" and ``PV`` to "1.2.3".
|
||||
|
||||
By the time parsing is complete for a recipe, BitBake has a list of
|
||||
tasks that the recipe defines and a set of data consisting of keys and
|
||||
@@ -238,14 +238,13 @@ Recipe file collections exist to allow the user to have multiple
|
||||
repositories of ``.bb`` files that contain the same exact package. For
|
||||
example, one could easily use them to make one's own local copy of an
|
||||
upstream repository, but with custom modifications that one does not
|
||||
want upstream. Here is an example::
|
||||
want upstream. Here is an example: ::
|
||||
|
||||
BBFILES = "/stuff/openembedded/*/*.bb /stuff/openembedded.modified/*/*.bb"
|
||||
BBFILE_COLLECTIONS = "upstream local"
|
||||
BBFILE_PATTERN_upstream = "^/stuff/openembedded/"
|
||||
BBFILE_PATTERN_local = "^/stuff/openembedded.modified/"
|
||||
BBFILE_PRIORITY_upstream = "5"
|
||||
BBFILE_PRIORITY_local = "10"
|
||||
BBFILE_PRIORITY_upstream = "5" BBFILE_PRIORITY_local = "10"
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -260,21 +259,21 @@ Providers
|
||||
|
||||
Assuming BitBake has been instructed to execute a target and that all
|
||||
the recipe files have been parsed, BitBake starts to figure out how to
|
||||
build the target. BitBake looks through the :term:`PROVIDES` list for each
|
||||
of the recipes. A :term:`PROVIDES` list is the list of names by which the
|
||||
recipe can be known. Each recipe's :term:`PROVIDES` list is created
|
||||
build the target. BitBake looks through the ``PROVIDES`` list for each
|
||||
of the recipes. A ``PROVIDES`` list is the list of names by which the
|
||||
recipe can be known. Each recipe's ``PROVIDES`` list is created
|
||||
implicitly through the recipe's :term:`PN` variable and
|
||||
explicitly through the recipe's :term:`PROVIDES`
|
||||
variable, which is optional.
|
||||
|
||||
When a recipe uses :term:`PROVIDES`, that recipe's functionality can be
|
||||
found under an alternative name or names other than the implicit :term:`PN`
|
||||
When a recipe uses ``PROVIDES``, that recipe's functionality can be
|
||||
found under an alternative name or names other than the implicit ``PN``
|
||||
name. As an example, suppose a recipe named ``keyboard_1.0.bb``
|
||||
contained the following::
|
||||
contained the following: ::
|
||||
|
||||
PROVIDES += "fullkeyboard"
|
||||
|
||||
The :term:`PROVIDES`
|
||||
The ``PROVIDES``
|
||||
list for this recipe becomes "keyboard", which is implicit, and
|
||||
"fullkeyboard", which is explicit. Consequently, the functionality found
|
||||
in ``keyboard_1.0.bb`` can be found under two different names.
|
||||
@@ -284,14 +283,14 @@ in ``keyboard_1.0.bb`` can be found under two different names.
|
||||
Preferences
|
||||
===========
|
||||
|
||||
The :term:`PROVIDES` list is only part of the solution for figuring out a
|
||||
The ``PROVIDES`` list is only part of the solution for figuring out a
|
||||
target's recipes. Because targets might have multiple providers, BitBake
|
||||
needs to prioritize providers by determining provider preferences.
|
||||
|
||||
A common example in which a target has multiple providers is
|
||||
"virtual/kernel", which is on the :term:`PROVIDES` list for each kernel
|
||||
"virtual/kernel", which is on the ``PROVIDES`` list for each kernel
|
||||
recipe. Each machine often selects the best kernel provider by using a
|
||||
line similar to the following in the machine configuration file::
|
||||
line similar to the following in the machine configuration file: ::
|
||||
|
||||
PREFERRED_PROVIDER_virtual/kernel = "linux-yocto"
|
||||
|
||||
@@ -309,10 +308,10 @@ specify a particular version. You can influence the order by using the
|
||||
:term:`DEFAULT_PREFERENCE` variable.
|
||||
|
||||
By default, files have a preference of "0". Setting
|
||||
:term:`DEFAULT_PREFERENCE` to "-1" makes the recipe unlikely to be used
|
||||
unless it is explicitly referenced. Setting :term:`DEFAULT_PREFERENCE` to
|
||||
"1" makes it likely the recipe is used. :term:`PREFERRED_VERSION` overrides
|
||||
any :term:`DEFAULT_PREFERENCE` setting. :term:`DEFAULT_PREFERENCE` is often used
|
||||
``DEFAULT_PREFERENCE`` to "-1" makes the recipe unlikely to be used
|
||||
unless it is explicitly referenced. Setting ``DEFAULT_PREFERENCE`` to
|
||||
"1" makes it likely the recipe is used. ``PREFERRED_VERSION`` overrides
|
||||
any ``DEFAULT_PREFERENCE`` setting. ``DEFAULT_PREFERENCE`` is often used
|
||||
to mark newer and more experimental recipe versions until they have
|
||||
undergone sufficient testing to be considered stable.
|
||||
|
||||
@@ -331,7 +330,7 @@ If the first recipe is named ``a_1.1.bb``, then the
|
||||
|
||||
Thus, if a recipe named ``a_1.2.bb`` exists, BitBake will choose 1.2 by
|
||||
default. However, if you define the following variable in a ``.conf``
|
||||
file that BitBake parses, you can change that preference::
|
||||
file that BitBake parses, you can change that preference: ::
|
||||
|
||||
PREFERRED_VERSION_a = "1.1"
|
||||
|
||||
@@ -394,7 +393,7 @@ ready to run, those tasks have all their dependencies met, and the
|
||||
thread threshold has not been exceeded.
|
||||
|
||||
It is worth noting that you can greatly speed up the build time by
|
||||
properly setting the :term:`BB_NUMBER_THREADS` variable.
|
||||
properly setting the ``BB_NUMBER_THREADS`` variable.
|
||||
|
||||
As each task completes, a timestamp is written to the directory
|
||||
specified by the :term:`STAMP` variable. On subsequent
|
||||
@@ -498,7 +497,7 @@ to the task.
|
||||
|
||||
Like the working directory case, situations exist where dependencies
|
||||
should be ignored. For these cases, you can instruct the build process
|
||||
to ignore a dependency by using a line like the following::
|
||||
to ignore a dependency by using a line like the following: ::
|
||||
|
||||
PACKAGE_ARCHS[vardepsexclude] = "MACHINE"
|
||||
|
||||
@@ -508,7 +507,7 @@ even if it does reference it.
|
||||
|
||||
Equally, there are cases where we need to add dependencies BitBake is
|
||||
not able to find. You can accomplish this by using a line like the
|
||||
following::
|
||||
following: ::
|
||||
|
||||
PACKAGE_ARCHS[vardeps] = "MACHINE"
|
||||
|
||||
@@ -536,7 +535,7 @@ configuration file, we can give BitBake some extra information to help
|
||||
it construct the basehash. The following statement effectively results
|
||||
in a list of global variable dependency excludes - variables never
|
||||
included in any checksum. This example uses variables from OpenEmbedded
|
||||
to help illustrate the concept::
|
||||
to help illustrate the concept: ::
|
||||
|
||||
BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \
|
||||
SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL \
|
||||
@@ -557,11 +556,11 @@ OpenEmbedded-Core uses: "OEBasic" and "OEBasicHash". By default, there
|
||||
is a dummy "noop" signature handler enabled in BitBake. This means that
|
||||
behavior is unchanged from previous versions. ``OE-Core`` uses the
|
||||
"OEBasicHash" signature handler by default through this setting in the
|
||||
``bitbake.conf`` file::
|
||||
``bitbake.conf`` file: ::
|
||||
|
||||
BB_SIGNATURE_HANDLER ?= "OEBasicHash"
|
||||
|
||||
The "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is the same as the "OEBasic"
|
||||
The "OEBasicHash" ``BB_SIGNATURE_HANDLER`` is the same as the "OEBasic"
|
||||
version but adds the task hash to the stamp files. This results in any
|
||||
metadata change that changes the task hash, automatically causing the
|
||||
task to be run again. This removes the need to bump
|
||||
@@ -578,7 +577,10 @@ the build. This information includes:
|
||||
- ``BB_BASEHASH_``\ *filename:taskname*: The base hashes for each
|
||||
dependent task.
|
||||
|
||||
- :term:`BB_TASKHASH`: The hash of the currently running task.
|
||||
- ``BBHASHDEPS_``\ *filename:taskname*: The task dependencies for
|
||||
each task.
|
||||
|
||||
- ``BB_TASKHASH``: The hash of the currently running task.
|
||||
|
||||
It is worth noting that BitBake's "-S" option lets you debug BitBake's
|
||||
processing of signatures. The options passed to -S allow different
|
||||
@@ -645,6 +647,13 @@ compiled binary. To handle this, BitBake calls the
|
||||
each successful setscene task to know whether or not it needs to obtain
|
||||
the dependencies of that task.
|
||||
|
||||
Finally, after all the setscene tasks have executed, BitBake calls the
|
||||
function listed in
|
||||
:term:`BB_SETSCENE_VERIFY_FUNCTION2`
|
||||
with the list of tasks BitBake thinks has been "covered". The metadata
|
||||
can then ensure that this list is correct and can inform BitBake that it
|
||||
wants specific tasks to be run regardless of the setscene result.
|
||||
|
||||
You can find more information on setscene metadata in the
|
||||
:ref:`bitbake-user-manual/bitbake-user-manual-metadata:task checksums and setscene`
|
||||
section.
|
||||
|
||||
@@ -27,7 +27,7 @@ and unpacking the files is often optionally followed by patching.
|
||||
Patching, however, is not covered by this module.
|
||||
|
||||
The code to execute the first part of this process, a fetch, looks
|
||||
something like the following::
|
||||
something like the following: ::
|
||||
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
@@ -37,7 +37,7 @@ This code sets up an instance of the fetch class. The instance uses a
|
||||
space-separated list of URLs from the :term:`SRC_URI`
|
||||
variable and then calls the ``download`` method to download the files.
|
||||
|
||||
The instantiation of the fetch class is usually followed by::
|
||||
The instantiation of the fetch class is usually followed by: ::
|
||||
|
||||
rootdir = l.getVar('WORKDIR')
|
||||
fetcher.unpack(rootdir)
|
||||
@@ -51,7 +51,7 @@ This code unpacks the downloaded files to the specified by ``WORKDIR``.
|
||||
examine the OpenEmbedded class file ``base.bbclass``
|
||||
.
|
||||
|
||||
The :term:`SRC_URI` and ``WORKDIR`` variables are not hardcoded into the
|
||||
The ``SRC_URI`` and ``WORKDIR`` variables are not hardcoded into the
|
||||
fetcher, since those fetcher methods can be (and are) called with
|
||||
different variable names. In OpenEmbedded for example, the shared state
|
||||
(sstate) code uses the fetch module to fetch the sstate files.
|
||||
@@ -64,24 +64,24 @@ URLs by looking for source files in a specific search order:
|
||||
:term:`PREMIRRORS` variable.
|
||||
|
||||
- *Source URI:* If pre-mirrors fail, BitBake uses the original URL (e.g
|
||||
from :term:`SRC_URI`).
|
||||
from ``SRC_URI``).
|
||||
|
||||
- *Mirror Sites:* If fetch failures occur, BitBake next uses mirror
|
||||
locations as defined by the :term:`MIRRORS` variable.
|
||||
|
||||
For each URL passed to the fetcher, the fetcher calls the submodule that
|
||||
handles that particular URL type. This behavior can be the source of
|
||||
some confusion when you are providing URLs for the :term:`SRC_URI` variable.
|
||||
Consider the following two URLs::
|
||||
some confusion when you are providing URLs for the ``SRC_URI`` variable.
|
||||
Consider the following two URLs: ::
|
||||
|
||||
https://git.yoctoproject.org/git/poky;protocol=git
|
||||
http://git.yoctoproject.org/git/poky;protocol=git
|
||||
git://git.yoctoproject.org/git/poky;protocol=http
|
||||
|
||||
In the former case, the URL is passed to the ``wget`` fetcher, which does not
|
||||
understand "git". Therefore, the latter case is the correct form since the Git
|
||||
fetcher does know how to use HTTP as a transport.
|
||||
|
||||
Here are some examples that show commonly used mirror definitions::
|
||||
Here are some examples that show commonly used mirror definitions: ::
|
||||
|
||||
PREMIRRORS ?= "\
|
||||
bzr://.*/.\* http://somemirror.org/sources/ \\n \
|
||||
@@ -110,26 +110,26 @@ which is specified by the :term:`DL_DIR` variable.
|
||||
File integrity is of key importance for reproducing builds. For
|
||||
non-local archive downloads, the fetcher code can verify SHA-256 and MD5
|
||||
checksums to ensure the archives have been downloaded correctly. You can
|
||||
specify these checksums by using the :term:`SRC_URI` variable with the
|
||||
appropriate varflags as follows::
|
||||
specify these checksums by using the ``SRC_URI`` variable with the
|
||||
appropriate varflags as follows: ::
|
||||
|
||||
SRC_URI[md5sum] = "value"
|
||||
SRC_URI[sha256sum] = "value"
|
||||
|
||||
You can also specify the checksums as
|
||||
parameters on the :term:`SRC_URI` as shown below::
|
||||
parameters on the ``SRC_URI`` as shown below: ::
|
||||
|
||||
SRC_URI = "http://example.com/foobar.tar.bz2;md5sum=4a8e0f237e961fd7785d19d07fdb994d"
|
||||
|
||||
If multiple URIs exist, you can specify the checksums either directly as
|
||||
in the previous example, or you can name the URLs. The following syntax
|
||||
shows how you name the URIs::
|
||||
shows how you name the URIs: ::
|
||||
|
||||
SRC_URI = "http://example.com/foobar.tar.bz2;name=foo"
|
||||
SRC_URI[foo.md5sum] = 4a8e0f237e961fd7785d19d07fdb994d
|
||||
|
||||
After a file has been downloaded and
|
||||
has had its checksum checked, a ".done" stamp is placed in :term:`DL_DIR`.
|
||||
has had its checksum checked, a ".done" stamp is placed in ``DL_DIR``.
|
||||
BitBake uses this stamp during subsequent builds to avoid downloading or
|
||||
comparing a checksum for the file again.
|
||||
|
||||
@@ -144,10 +144,6 @@ download without a checksum triggers an error message. The
|
||||
make any attempted network access a fatal error, which is useful for
|
||||
checking that mirrors are complete as well as other things.
|
||||
|
||||
If :term:`BB_CHECK_SSL_CERTS` is set to ``0`` then SSL certificate checking will
|
||||
be disabled. This variable defaults to ``1`` so SSL certificates are normally
|
||||
checked.
|
||||
|
||||
.. _bb-the-unpack:
|
||||
|
||||
The Unpack
|
||||
@@ -167,6 +163,9 @@ govern the behavior of the unpack stage:
|
||||
- *dos:* Applies to ``.zip`` and ``.jar`` files and specifies whether
|
||||
to use DOS line ending conversion on text files.
|
||||
|
||||
- *basepath:* Instructs the unpack stage to strip the specified
|
||||
directories from the source path when unpacking.
|
||||
|
||||
- *subdir:* Unpacks the specific URL to the specified subdirectory
|
||||
within the root directory.
|
||||
|
||||
@@ -205,7 +204,7 @@ time the ``download()`` method is called.
|
||||
If you specify a directory, the entire directory is unpacked.
|
||||
|
||||
Here are a couple of example URLs, the first relative and the second
|
||||
absolute::
|
||||
absolute: ::
|
||||
|
||||
SRC_URI = "file://relativefile.patch"
|
||||
SRC_URI = "file:///Users/ich/very_important_software"
|
||||
@@ -226,7 +225,7 @@ downloaded file is useful for avoiding collisions in
|
||||
:term:`DL_DIR` when dealing with multiple files that
|
||||
have the same name.
|
||||
|
||||
Some example URLs are as follows::
|
||||
Some example URLs are as follows: ::
|
||||
|
||||
SRC_URI = "http://oe.handhelds.org/not_there.aac"
|
||||
SRC_URI = "ftp://oe.handhelds.org/not_there_as_well.aac"
|
||||
@@ -236,13 +235,15 @@ Some example URLs are as follows::
|
||||
|
||||
Because URL parameters are delimited by semi-colons, this can
|
||||
introduce ambiguity when parsing URLs that also contain semi-colons,
|
||||
for example::
|
||||
for example:
|
||||
::
|
||||
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git;a=snapshot;h=a5dd47"
|
||||
|
||||
|
||||
Such URLs should should be modified by replacing semi-colons with '&'
|
||||
characters::
|
||||
characters:
|
||||
::
|
||||
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47"
|
||||
|
||||
@@ -250,7 +251,8 @@ Some example URLs are as follows::
|
||||
In most cases this should work. Treating semi-colons and '&' in
|
||||
queries identically is recommended by the World Wide Web Consortium
|
||||
(W3C). Note that due to the nature of the URL, you may have to
|
||||
specify the name of the downloaded file as well::
|
||||
specify the name of the downloaded file as well:
|
||||
::
|
||||
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47;downloadfilename=myfile.bz2"
|
||||
|
||||
@@ -319,7 +321,7 @@ The supported parameters are as follows:
|
||||
|
||||
- *"port":* The port to which the CVS server connects.
|
||||
|
||||
Some example URLs are as follows::
|
||||
Some example URLs are as follows: ::
|
||||
|
||||
SRC_URI = "cvs://CVSROOT;module=mymodule;tag=some-version;method=ext"
|
||||
SRC_URI = "cvs://CVSROOT;module=mymodule;date=20060126;localdir=usethat"
|
||||
@@ -361,7 +363,7 @@ The supported parameters are as follows:
|
||||
username is different than the username used in the main URL, which
|
||||
is passed to the subversion command.
|
||||
|
||||
Following are three examples using svn::
|
||||
Following are three examples using svn: ::
|
||||
|
||||
SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667"
|
||||
SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh"
|
||||
@@ -434,27 +436,11 @@ This fetcher supports the following parameters:
|
||||
parameter implies no branch and only works when the transfer protocol
|
||||
is ``file://``.
|
||||
|
||||
Here are some example URLs::
|
||||
Here are some example URLs: ::
|
||||
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;tag=version-1"
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;protocol=http"
|
||||
|
||||
.. note::
|
||||
|
||||
When using ``git`` as the fetcher of the main source code of your software,
|
||||
``S`` should be set accordingly::
|
||||
|
||||
S = "${WORKDIR}/git"
|
||||
|
||||
.. note::
|
||||
|
||||
Specifying passwords directly in ``git://`` urls is not supported.
|
||||
There are several reasons: :term:`SRC_URI` is often written out to logs and
|
||||
other places, and that could easily leak passwords; it is also all too
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
Git Submodule Fetcher (``gitsm://``)
|
||||
@@ -489,7 +475,7 @@ repository.
|
||||
|
||||
To use this fetcher, make sure your recipe has proper
|
||||
:term:`SRC_URI`, :term:`SRCREV`, and
|
||||
:term:`PV` settings. Here is an example::
|
||||
:term:`PV` settings. Here is an example: ::
|
||||
|
||||
SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module"
|
||||
SRCREV = "EXAMPLE_CLEARCASE_TAG"
|
||||
@@ -498,7 +484,7 @@ To use this fetcher, make sure your recipe has proper
|
||||
The fetcher uses the ``rcleartool`` or
|
||||
``cleartool`` remote client, depending on which one is available.
|
||||
|
||||
Following are options for the :term:`SRC_URI` statement:
|
||||
Following are options for the ``SRC_URI`` statement:
|
||||
|
||||
- *vob*: The name, which must include the prepending "/" character,
|
||||
of the ClearCase VOB. This option is required.
|
||||
@@ -511,7 +497,7 @@ Following are options for the :term:`SRC_URI` statement:
|
||||
The module and vob options are combined to create the load rule in the
|
||||
view config spec. As an example, consider the vob and module values from
|
||||
the SRC_URI statement at the start of this section. Combining those values
|
||||
results in the following::
|
||||
results in the following: ::
|
||||
|
||||
load /example_vob/example_module
|
||||
|
||||
@@ -560,10 +546,10 @@ password if you do not wish to keep those values in a recipe itself. If
|
||||
you choose not to use ``P4CONFIG``, or to explicitly set variables that
|
||||
``P4CONFIG`` can contain, you can specify the ``P4PORT`` value, which is
|
||||
the server's URL and port number, and you can specify a username and
|
||||
password directly in your recipe within :term:`SRC_URI`.
|
||||
password directly in your recipe within ``SRC_URI``.
|
||||
|
||||
Here is an example that relies on ``P4CONFIG`` to specify the server URL
|
||||
and port, username, and password, and fetches the Head Revision::
|
||||
and port, username, and password, and fetches the Head Revision: ::
|
||||
|
||||
SRC_URI = "p4://example-depot/main/source/..."
|
||||
SRCREV = "${AUTOREV}"
|
||||
@@ -571,7 +557,7 @@ and port, username, and password, and fetches the Head Revision::
|
||||
S = "${WORKDIR}/p4"
|
||||
|
||||
Here is an example that specifies the server URL and port, username, and
|
||||
password, and fetches a Revision based on a Label::
|
||||
password, and fetches a Revision based on a Label: ::
|
||||
|
||||
P4PORT = "tcp:p4server.example.net:1666"
|
||||
SRC_URI = "p4://user:passwd@example-depot/main/source/..."
|
||||
@@ -597,7 +583,7 @@ paths locally is desirable, the fetcher supports two parameters:
|
||||
paths locally for the specified location, even in combination with the
|
||||
``module`` parameter.
|
||||
|
||||
Here is an example use of the the ``module`` parameter::
|
||||
Here is an example use of the the ``module`` parameter: ::
|
||||
|
||||
SRC_URI = "p4://user:passwd@example-depot/main;module=source/..."
|
||||
|
||||
@@ -605,7 +591,7 @@ In this case, the content of the top-level directory ``source/`` will be fetched
|
||||
to ``${P4DIR}``, including the directory itself. The top-level directory will
|
||||
be accesible at ``${P4DIR}/source/``.
|
||||
|
||||
Here is an example use of the the ``remotepath`` parameter::
|
||||
Here is an example use of the the ``remotepath`` parameter: ::
|
||||
|
||||
SRC_URI = "p4://user:passwd@example-depot/main;module=source/...;remotepath=keep"
|
||||
|
||||
@@ -633,39 +619,11 @@ This fetcher supports the following parameters:
|
||||
|
||||
- *"manifest":* Name of the manifest file (default: ``default.xml``).
|
||||
|
||||
Here are some example URLs::
|
||||
Here are some example URLs: ::
|
||||
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
|
||||
.. _az-fetcher:
|
||||
|
||||
Az Fetcher (``az://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from an
|
||||
`Azure Storage account <https://docs.microsoft.com/en-us/azure/storage/>`__ ,
|
||||
it inherits its functionality from the HTTP wget fetcher, but modifies its
|
||||
behavior to accomodate the usage of a
|
||||
`Shared Access Signature (SAS) <https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview>`__
|
||||
for non-public data.
|
||||
|
||||
Such functionality is set by the variable:
|
||||
|
||||
- :term:`AZ_SAS`: The Azure Storage Shared Access Signature provides secure
|
||||
delegate access to resources, if this variable is set, the Az Fetcher will
|
||||
use it when fetching artifacts from the cloud.
|
||||
|
||||
You can specify the AZ_SAS variable as shown below::
|
||||
|
||||
AZ_SAS = "se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>"
|
||||
|
||||
Here is an example URL::
|
||||
|
||||
SRC_URI = "az://<azure-storage-account>.blob.core.windows.net/<foo_container>/<bar_file>"
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
Other Fetchers
|
||||
--------------
|
||||
|
||||
@@ -691,4 +649,4 @@ submodules. However, you might find the code helpful and readable.
|
||||
Auto Revisions
|
||||
==============
|
||||
|
||||
We need to document ``AUTOREV`` and :term:`SRCREV_FORMAT` here.
|
||||
We need to document ``AUTOREV`` and ``SRCREV_FORMAT`` here.
|
||||
|
||||
@@ -20,7 +20,7 @@ Obtaining BitBake
|
||||
|
||||
See the :ref:`bitbake-user-manual/bitbake-user-manual-hello:obtaining bitbake` section for
|
||||
information on how to obtain BitBake. Once you have the source code on
|
||||
your machine, the BitBake directory appears as follows::
|
||||
your machine, the BitBake directory appears as follows: ::
|
||||
|
||||
$ ls -al
|
||||
total 100
|
||||
@@ -49,7 +49,7 @@ Setting Up the BitBake Environment
|
||||
|
||||
First, you need to be sure that you can run BitBake. Set your working
|
||||
directory to where your local BitBake files are and run the following
|
||||
command::
|
||||
command: ::
|
||||
|
||||
$ ./bin/bitbake --version
|
||||
BitBake Build Tool Core version 1.23.0, bitbake version 1.23.0
|
||||
@@ -61,14 +61,14 @@ The recommended method to run BitBake is from a directory of your
|
||||
choice. To be able to run BitBake from any directory, you need to add
|
||||
the executable binary to your binary to your shell's environment
|
||||
``PATH`` variable. First, look at your current ``PATH`` variable by
|
||||
entering the following::
|
||||
entering the following: ::
|
||||
|
||||
$ echo $PATH
|
||||
|
||||
Next, add the directory location
|
||||
for the BitBake binary to the ``PATH``. Here is an example that adds the
|
||||
``/home/scott-lenovo/bitbake/bin`` directory to the front of the
|
||||
``PATH`` variable::
|
||||
``PATH`` variable: ::
|
||||
|
||||
$ export PATH=/home/scott-lenovo/bitbake/bin:$PATH
|
||||
|
||||
@@ -99,7 +99,7 @@ discussion mailing list about the BitBake build tool.
|
||||
|
||||
This example was inspired by and drew heavily from
|
||||
`Mailing List post - The BitBake equivalent of "Hello, World!"
|
||||
<https://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html>`_.
|
||||
<http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html>`_.
|
||||
|
||||
As stated earlier, the goal of this example is to eventually compile
|
||||
"Hello World". However, it is unknown what BitBake needs and what you
|
||||
@@ -116,7 +116,7 @@ Following is the complete "Hello World" example.
|
||||
|
||||
#. **Create a Project Directory:** First, set up a directory for the
|
||||
"Hello World" project. Here is how you can do so in your home
|
||||
directory::
|
||||
directory: ::
|
||||
|
||||
$ mkdir ~/hello
|
||||
$ cd ~/hello
|
||||
@@ -127,7 +127,7 @@ Following is the complete "Hello World" example.
|
||||
directory is a good way to isolate your project.
|
||||
|
||||
#. **Run BitBake:** At this point, you have nothing but a project
|
||||
directory. Run the ``bitbake`` command and see what it does::
|
||||
directory. Run the ``bitbake`` command and see what it does: ::
|
||||
|
||||
$ bitbake
|
||||
The BBPATH variable is not set and bitbake did not
|
||||
@@ -145,23 +145,23 @@ Following is the complete "Hello World" example.
|
||||
|
||||
The majority of this output is specific to environment variables that
|
||||
are not directly relevant to BitBake. However, the very first
|
||||
message regarding the :term:`BBPATH` variable and the
|
||||
message regarding the ``BBPATH`` variable and the
|
||||
``conf/bblayers.conf`` file is relevant.
|
||||
|
||||
When you run BitBake, it begins looking for metadata files. The
|
||||
:term:`BBPATH` variable is what tells BitBake where
|
||||
to look for those files. :term:`BBPATH` is not set and you need to set
|
||||
it. Without :term:`BBPATH`, BitBake cannot find any configuration files
|
||||
to look for those files. ``BBPATH`` is not set and you need to set
|
||||
it. Without ``BBPATH``, BitBake cannot find any configuration files
|
||||
(``.conf``) or recipe files (``.bb``) at all. BitBake also cannot
|
||||
find the ``bitbake.conf`` file.
|
||||
|
||||
#. **Setting BBPATH:** For this example, you can set :term:`BBPATH` in
|
||||
#. **Setting BBPATH:** For this example, you can set ``BBPATH`` in
|
||||
the same manner that you set ``PATH`` earlier in the appendix. You
|
||||
should realize, though, that it is much more flexible to set the
|
||||
:term:`BBPATH` variable up in a configuration file for each project.
|
||||
``BBPATH`` variable up in a configuration file for each project.
|
||||
|
||||
From your shell, enter the following commands to set and export the
|
||||
:term:`BBPATH` variable::
|
||||
``BBPATH`` variable: ::
|
||||
|
||||
$ BBPATH="projectdirectory"
|
||||
$ export BBPATH
|
||||
@@ -175,8 +175,8 @@ Following is the complete "Hello World" example.
|
||||
("~") character as BitBake does not expand that character as the
|
||||
shell would.
|
||||
|
||||
#. **Run BitBake:** Now that you have :term:`BBPATH` defined, run the
|
||||
``bitbake`` command again::
|
||||
#. **Run BitBake:** Now that you have ``BBPATH`` defined, run the
|
||||
``bitbake`` command again: ::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
@@ -205,18 +205,18 @@ Following is the complete "Hello World" example.
|
||||
recipe files. For this example, you need to create the file in your
|
||||
project directory and define some key BitBake variables. For more
|
||||
information on the ``bitbake.conf`` file, see
|
||||
https://git.openembedded.org/bitbake/tree/conf/bitbake.conf.
|
||||
http://git.openembedded.org/bitbake/tree/conf/bitbake.conf.
|
||||
|
||||
Use the following commands to create the ``conf`` directory in the
|
||||
project directory::
|
||||
project directory: ::
|
||||
|
||||
$ mkdir conf
|
||||
|
||||
From within the ``conf`` directory,
|
||||
use some editor to create the ``bitbake.conf`` so that it contains
|
||||
the following::
|
||||
the following: ::
|
||||
|
||||
PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
|
||||
TMPDIR = "${TOPDIR}/tmp"
|
||||
CACHE = "${TMPDIR}/cache"
|
||||
@@ -251,7 +251,7 @@ Following is the complete "Hello World" example.
|
||||
glossary.
|
||||
|
||||
#. **Run BitBake:** After making sure that the ``conf/bitbake.conf`` file
|
||||
exists, you can run the ``bitbake`` command again::
|
||||
exists, you can run the ``bitbake`` command again: ::
|
||||
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
@@ -278,7 +278,7 @@ Following is the complete "Hello World" example.
|
||||
in the ``classes`` directory of the project (i.e ``hello/classes``
|
||||
in this example).
|
||||
|
||||
Create the ``classes`` directory as follows::
|
||||
Create the ``classes`` directory as follows: ::
|
||||
|
||||
$ cd $HOME/hello
|
||||
$ mkdir classes
|
||||
@@ -291,7 +291,7 @@ Following is the complete "Hello World" example.
|
||||
environments BitBake is supporting.
|
||||
|
||||
#. **Run BitBake:** After making sure that the ``classes/base.bbclass``
|
||||
file exists, you can run the ``bitbake`` command again::
|
||||
file exists, you can run the ``bitbake`` command again: ::
|
||||
|
||||
$ bitbake
|
||||
Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.
|
||||
@@ -314,7 +314,7 @@ Following is the complete "Hello World" example.
|
||||
Minimally, you need a recipe file and a layer configuration file in
|
||||
your layer. The configuration file needs to be in the ``conf``
|
||||
directory inside the layer. Use these commands to set up the layer
|
||||
and the ``conf`` directory::
|
||||
and the ``conf`` directory: ::
|
||||
|
||||
$ cd $HOME
|
||||
$ mkdir mylayer
|
||||
@@ -322,12 +322,12 @@ Following is the complete "Hello World" example.
|
||||
$ mkdir conf
|
||||
|
||||
Move to the ``conf`` directory and create a ``layer.conf`` file that has the
|
||||
following::
|
||||
following: ::
|
||||
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
BBFILES += "${LAYERDIR}/*.bb"
|
||||
BBFILES += "${LAYERDIR}/\*.bb"
|
||||
BBFILE_COLLECTIONS += "mylayer"
|
||||
BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/"
|
||||
`BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/"
|
||||
|
||||
For information on these variables, click on :term:`BBFILES`,
|
||||
:term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS` or :term:`BBFILE_PATTERN_mylayer <BBFILE_PATTERN>`
|
||||
@@ -335,7 +335,7 @@ Following is the complete "Hello World" example.
|
||||
|
||||
You need to create the recipe file next. Inside your layer at the
|
||||
top-level, use an editor and create a recipe file named
|
||||
``printhello.bb`` that has the following::
|
||||
``printhello.bb`` that has the following: ::
|
||||
|
||||
DESCRIPTION = "Prints Hello World"
|
||||
PN = 'printhello'
|
||||
@@ -356,7 +356,7 @@ Following is the complete "Hello World" example.
|
||||
follow the links to the glossary.
|
||||
|
||||
#. **Run BitBake With a Target:** Now that a BitBake target exists, run
|
||||
the command and provide that target::
|
||||
the command and provide that target: ::
|
||||
|
||||
$ cd $HOME/hello
|
||||
$ bitbake printhello
|
||||
@@ -376,7 +376,7 @@ Following is the complete "Hello World" example.
|
||||
``hello/conf`` for this example).
|
||||
|
||||
Set your working directory to the ``hello/conf`` directory and then
|
||||
create the ``bblayers.conf`` file so that it contains the following::
|
||||
create the ``bblayers.conf`` file so that it contains the following: ::
|
||||
|
||||
BBLAYERS ?= " \
|
||||
/home/<you>/mylayer \
|
||||
@@ -386,7 +386,7 @@ Following is the complete "Hello World" example.
|
||||
|
||||
#. **Run BitBake With a Target:** Now that you have supplied the
|
||||
``bblayers.conf`` file, run the ``bitbake`` command and provide the
|
||||
target::
|
||||
target: ::
|
||||
|
||||
$ bitbake printhello
|
||||
Parsing recipes: 100% |##################################################################################|
|
||||
|
||||
@@ -27,7 +27,7 @@ Linux software stacks using a task-oriented approach.
|
||||
Conceptually, BitBake is similar to GNU Make in some regards but has
|
||||
significant differences:
|
||||
|
||||
- BitBake executes tasks according to the provided metadata that builds up
|
||||
- BitBake executes tasks according to provided metadata that builds up
|
||||
the tasks. Metadata is stored in recipe (``.bb``) and related recipe
|
||||
"append" (``.bbappend``) files, configuration (``.conf``) and
|
||||
underlying include (``.inc``) files, and in class (``.bbclass``)
|
||||
@@ -60,11 +60,11 @@ member Chris Larson split the project into two distinct pieces:
|
||||
- OpenEmbedded, a metadata set utilized by BitBake
|
||||
|
||||
Today, BitBake is the primary basis of the
|
||||
`OpenEmbedded <https://www.openembedded.org/>`__ project, which is being
|
||||
`OpenEmbedded <http://www.openembedded.org/>`__ project, which is being
|
||||
used to build and maintain Linux distributions such as the `Angstrom
|
||||
Distribution <http://www.angstrom-distribution.org/>`__, and which is
|
||||
also being used as the build tool for Linux projects such as the `Yocto
|
||||
Project <https://www.yoctoproject.org>`__.
|
||||
Project <http://www.yoctoproject.org>`__.
|
||||
|
||||
Prior to BitBake, no other build tool adequately met the needs of an
|
||||
aspiring embedded Linux distribution. All of the build systems used by
|
||||
@@ -248,13 +248,13 @@ underlying, similarly-named recipe files.
|
||||
|
||||
When you name an append file, you can use the "``%``" wildcard character
|
||||
to allow for matching recipe names. For example, suppose you have an
|
||||
append file named as follows::
|
||||
append file named as follows: ::
|
||||
|
||||
busybox_1.21.%.bbappend
|
||||
|
||||
That append file
|
||||
would match any ``busybox_1.21.``\ x\ ``.bb`` version of the recipe. So,
|
||||
the append file would match the following recipe names::
|
||||
the append file would match the following recipe names: ::
|
||||
|
||||
busybox_1.21.1.bb
|
||||
busybox_1.21.2.bb
|
||||
@@ -290,7 +290,7 @@ You can obtain BitBake several different ways:
|
||||
are using. The metadata is generally backwards compatible but not
|
||||
forward compatible.
|
||||
|
||||
Here is an example that clones the BitBake repository::
|
||||
Here is an example that clones the BitBake repository: ::
|
||||
|
||||
$ git clone git://git.openembedded.org/bitbake
|
||||
|
||||
@@ -298,7 +298,7 @@ You can obtain BitBake several different ways:
|
||||
Git repository into a directory called ``bitbake``. Alternatively,
|
||||
you can designate a directory after the ``git clone`` command if you
|
||||
want to call the new directory something other than ``bitbake``. Here
|
||||
is an example that names the directory ``bbdev``::
|
||||
is an example that names the directory ``bbdev``: ::
|
||||
|
||||
$ git clone git://git.openembedded.org/bitbake bbdev
|
||||
|
||||
@@ -317,9 +317,9 @@ You can obtain BitBake several different ways:
|
||||
method for getting BitBake. Cloning the repository makes it easier
|
||||
to update as patches are added to the stable branches.
|
||||
|
||||
The following example downloads a snapshot of BitBake version 1.17.0::
|
||||
The following example downloads a snapshot of BitBake version 1.17.0: ::
|
||||
|
||||
$ wget https://git.openembedded.org/bitbake/snapshot/bitbake-1.17.0.tar.gz
|
||||
$ wget http://git.openembedded.org/bitbake/snapshot/bitbake-1.17.0.tar.gz
|
||||
$ tar zxpvf bitbake-1.17.0.tar.gz
|
||||
|
||||
After extraction of the tarball using
|
||||
@@ -347,7 +347,7 @@ execution examples.
|
||||
Usage and syntax
|
||||
----------------
|
||||
|
||||
Following is the usage and syntax for BitBake::
|
||||
Following is the usage and syntax for BitBake: ::
|
||||
|
||||
$ bitbake -h
|
||||
Usage: bitbake [options] [recipename/target recipe:do_task ...]
|
||||
@@ -417,8 +417,8 @@ Following is the usage and syntax for BitBake::
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses, taskexp or
|
||||
teamcity - default knotty).
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp
|
||||
- default knotty).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
@@ -433,9 +433,6 @@ Following is the usage and syntax for BitBake::
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
@@ -472,11 +469,11 @@ default task, which is "build". BitBake obeys inter-task dependencies
|
||||
when doing so.
|
||||
|
||||
The following command runs the build task, which is the default task, on
|
||||
the ``foo_1.0.bb`` recipe file::
|
||||
the ``foo_1.0.bb`` recipe file: ::
|
||||
|
||||
$ bitbake -b foo_1.0.bb
|
||||
|
||||
The following command runs the clean task on the ``foo.bb`` recipe file::
|
||||
The following command runs the clean task on the ``foo.bb`` recipe file: ::
|
||||
|
||||
$ bitbake -b foo.bb -c clean
|
||||
|
||||
@@ -500,13 +497,13 @@ functionality, or when there are multiple versions of a recipe.
|
||||
The ``bitbake`` command, when not using "--buildfile" or "-b" only
|
||||
accepts a "PROVIDES". You cannot provide anything else. By default, a
|
||||
recipe file generally "PROVIDES" its "packagename" as shown in the
|
||||
following example::
|
||||
following example: ::
|
||||
|
||||
$ bitbake foo
|
||||
|
||||
This next example "PROVIDES" the
|
||||
package name and also uses the "-c" option to tell BitBake to just
|
||||
execute the ``do_clean`` task::
|
||||
execute the ``do_clean`` task: ::
|
||||
|
||||
$ bitbake -c clean foo
|
||||
|
||||
@@ -517,7 +514,7 @@ The BitBake command line supports specifying different tasks for
|
||||
individual targets when you specify multiple targets. For example,
|
||||
suppose you had two targets (or recipes) ``myfirstrecipe`` and
|
||||
``mysecondrecipe`` and you needed BitBake to run ``taskA`` for the first
|
||||
recipe and ``taskB`` for the second recipe::
|
||||
recipe and ``taskB`` for the second recipe: ::
|
||||
|
||||
$ bitbake myfirstrecipe:do_taskA mysecondrecipe:do_taskB
|
||||
|
||||
@@ -540,10 +537,10 @@ current working directory:
|
||||
To stop depending on common depends, use the "-I" depend option and
|
||||
BitBake omits them from the graph. Leaving this information out can
|
||||
produce more readable graphs. This way, you can remove from the graph
|
||||
:term:`DEPENDS` from inherited classes such as ``base.bbclass``.
|
||||
``DEPENDS`` from inherited classes such as ``base.bbclass``.
|
||||
|
||||
Here are two examples that create dependency graphs. The second example
|
||||
omits depends common in OpenEmbedded from the graph::
|
||||
omits depends common in OpenEmbedded from the graph: ::
|
||||
|
||||
$ bitbake -g foo
|
||||
|
||||
@@ -567,7 +564,7 @@ for two separate targets:
|
||||
.. image:: figures/bb_multiconfig_files.png
|
||||
:align: center
|
||||
|
||||
The reason for this required file hierarchy is because the :term:`BBPATH`
|
||||
The reason for this required file hierarchy is because the ``BBPATH``
|
||||
variable is not constructed until the layers are parsed. Consequently,
|
||||
using the configuration file as a pre-configuration file is not possible
|
||||
unless it is located in the current working directory.
|
||||
@@ -585,17 +582,17 @@ accomplished by setting the
|
||||
configuration files for ``target1`` and ``target2`` defined in the build
|
||||
directory. The following statement in the ``local.conf`` file both
|
||||
enables BitBake to perform multiple configuration builds and specifies
|
||||
the two extra multiconfigs::
|
||||
the two extra multiconfigs: ::
|
||||
|
||||
BBMULTICONFIG = "target1 target2"
|
||||
|
||||
Once the target configuration files are in place and BitBake has been
|
||||
enabled to perform multiple configuration builds, use the following
|
||||
command form to start the builds::
|
||||
command form to start the builds: ::
|
||||
|
||||
$ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ]
|
||||
|
||||
Here is an example for two extra multiconfigs: ``target1`` and ``target2``::
|
||||
Here is an example for two extra multiconfigs: ``target1`` and ``target2``: ::
|
||||
|
||||
$ bitbake mc::target mc:target1:target mc:target2:target
|
||||
|
||||
@@ -616,12 +613,12 @@ multiconfig.
|
||||
|
||||
To enable dependencies in a multiple configuration build, you must
|
||||
declare the dependencies in the recipe using the following statement
|
||||
form::
|
||||
form: ::
|
||||
|
||||
task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend"
|
||||
|
||||
To better show how to use this statement, consider an example with two
|
||||
multiconfigs: ``target1`` and ``target2``::
|
||||
multiconfigs: ``target1`` and ``target2``: ::
|
||||
|
||||
image_task[mcdepends] = "mc:target1:target2:image2:rootfs_task"
|
||||
|
||||
@@ -632,7 +629,7 @@ completion of the rootfs_task used to build out image2, which is
|
||||
associated with the "target2" multiconfig.
|
||||
|
||||
Once you set up this dependency, you can build the "target1" multiconfig
|
||||
using a BitBake command as follows::
|
||||
using a BitBake command as follows: ::
|
||||
|
||||
$ bitbake mc:target1:image1
|
||||
|
||||
@@ -642,7 +639,7 @@ the ``rootfs_task`` for the "target2" multiconfig build.
|
||||
|
||||
Having a recipe depend on the root filesystem of another build might not
|
||||
seem that useful. Consider this change to the statement in the image1
|
||||
recipe::
|
||||
recipe: ::
|
||||
|
||||
image_task[mcdepends] = "mc:target1:target2:image2:image_task"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,32 +23,22 @@ overview of their function and contents.
|
||||
systems extend the functionality of the variable as it is
|
||||
described here in this glossary.
|
||||
|
||||
- Finally, there are variables mentioned in this glossary that do
|
||||
not appear in the BitBake glossary. These other variables are
|
||||
variables used in systems that use BitBake.
|
||||
|
||||
.. glossary::
|
||||
:sorted:
|
||||
|
||||
:term:`ASSUME_PROVIDED`
|
||||
Lists recipe names (:term:`PN` values) BitBake does not
|
||||
attempt to build. Instead, BitBake assumes these recipes have already
|
||||
been built.
|
||||
|
||||
In OpenEmbedded-Core, :term:`ASSUME_PROVIDED` mostly specifies native
|
||||
In OpenEmbedded-Core, ``ASSUME_PROVIDED`` mostly specifies native
|
||||
tools that should not be built. An example is ``git-native``, which
|
||||
when specified allows for the Git binary from the host to be used
|
||||
rather than building ``git-native``.
|
||||
|
||||
:term:`AZ_SAS`
|
||||
Azure Storage Shared Access Signature, when using the
|
||||
:ref:`Azure Storage fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
This variable can be defined to be used by the fetcher to authenticate
|
||||
and gain access to non-public artifacts.
|
||||
::
|
||||
|
||||
AZ_SAS = ""se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>""
|
||||
|
||||
For more information see Microsoft's Azure Storage documentation at
|
||||
https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview
|
||||
|
||||
|
||||
:term:`B`
|
||||
The directory in which BitBake executes functions during a recipe's
|
||||
build process.
|
||||
@@ -84,18 +74,14 @@ overview of their function and contents.
|
||||
|
||||
- Attempts to access networks not in the host list cause a failure.
|
||||
|
||||
Using :term:`BB_ALLOWED_NETWORKS` in conjunction with
|
||||
Using ``BB_ALLOWED_NETWORKS`` in conjunction with
|
||||
:term:`PREMIRRORS` is very useful. Adding the
|
||||
host you want to use to :term:`PREMIRRORS` results in the source code
|
||||
host you want to use to ``PREMIRRORS`` results in the source code
|
||||
being fetched from an allowed location and avoids raising an error
|
||||
when a host that is not allowed is in a
|
||||
:term:`SRC_URI` statement. This is because the
|
||||
fetcher does not attempt to use the host listed in :term:`SRC_URI` after
|
||||
a successful fetch from the :term:`PREMIRRORS` occurs.
|
||||
|
||||
:term:`BB_CHECK_SSL_CERTS`
|
||||
Specifies if SSL certificates should be checked when fetching. The default
|
||||
value is ``1`` and certificates are not checked if the value is set to ``0``.
|
||||
fetcher does not attempt to use the host listed in ``SRC_URI`` after
|
||||
a successful fetch from the ``PREMIRRORS`` occurs.
|
||||
|
||||
:term:`BB_CONSOLELOG`
|
||||
Specifies the path to a log file into which BitBake's user interface
|
||||
@@ -122,16 +108,12 @@ overview of their function and contents.
|
||||
command line option). The task name specified should not include the
|
||||
``do_`` prefix.
|
||||
|
||||
:term:`BB_DEFAULT_UMASK`
|
||||
The default umask to apply to tasks if specified and no task specific
|
||||
umask flag is set.
|
||||
|
||||
:term:`BB_DISKMON_DIRS`
|
||||
Monitors disk space and available inodes during the build and allows
|
||||
you to control the build based on these parameters.
|
||||
|
||||
Disk space monitoring is disabled by default. When setting this
|
||||
variable, use the following form::
|
||||
variable, use the following form: ::
|
||||
|
||||
BB_DISKMON_DIRS = "<action>,<dir>,<threshold> [...]"
|
||||
|
||||
@@ -167,7 +149,7 @@ overview of their function and contents.
|
||||
not specify G, M, or K, Kbytes is assumed by
|
||||
default. Do not use GB, MB, or KB.
|
||||
|
||||
Here are some examples::
|
||||
Here are some examples: ::
|
||||
|
||||
BB_DISKMON_DIRS = "ABORT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
|
||||
BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G"
|
||||
@@ -182,7 +164,7 @@ overview of their function and contents.
|
||||
issues a warning when the disk space in the ``${SSTATE_DIR}``
|
||||
directory drops below 1 Gbyte or the number of free inodes drops
|
||||
below 100 Kbytes. Subsequent warnings are issued during intervals as
|
||||
defined by the :term:`BB_DISKMON_WARNINTERVAL` variable.
|
||||
defined by the ``BB_DISKMON_WARNINTERVAL`` variable.
|
||||
|
||||
The second example stops the build after all currently executing
|
||||
tasks complete when the minimum disk space in the ``${TMPDIR}``
|
||||
@@ -196,19 +178,19 @@ overview of their function and contents.
|
||||
:term:`BB_DISKMON_WARNINTERVAL`
|
||||
Defines the disk space and free inode warning intervals.
|
||||
|
||||
If you are going to use the :term:`BB_DISKMON_WARNINTERVAL` variable, you
|
||||
If you are going to use the ``BB_DISKMON_WARNINTERVAL`` variable, you
|
||||
must also use the :term:`BB_DISKMON_DIRS`
|
||||
variable and define its action as "WARN". During the build,
|
||||
subsequent warnings are issued each time disk space or number of free
|
||||
inodes further reduces by the respective interval.
|
||||
|
||||
If you do not provide a :term:`BB_DISKMON_WARNINTERVAL` variable and you
|
||||
do use :term:`BB_DISKMON_DIRS` with the "WARN" action, the disk
|
||||
If you do not provide a ``BB_DISKMON_WARNINTERVAL`` variable and you
|
||||
do use ``BB_DISKMON_DIRS`` with the "WARN" action, the disk
|
||||
monitoring interval defaults to the following:
|
||||
BB_DISKMON_WARNINTERVAL = "50M,5K"
|
||||
|
||||
When specifying the variable in your configuration file, use the
|
||||
following form::
|
||||
following form: ::
|
||||
|
||||
BB_DISKMON_WARNINTERVAL = "<disk_space_interval>,<disk_inode_interval>"
|
||||
|
||||
@@ -224,7 +206,7 @@ overview of their function and contents.
|
||||
G, M, or K for Gbytes, Mbytes, or Kbytes,
|
||||
respectively. You cannot use GB, MB, or KB.
|
||||
|
||||
Here is an example::
|
||||
Here is an example: ::
|
||||
|
||||
BB_DISKMON_DIRS = "WARN,${SSTATE_DIR},1G,100K"
|
||||
BB_DISKMON_WARNINTERVAL = "50M,5K"
|
||||
@@ -236,17 +218,6 @@ overview of their function and contents.
|
||||
based on the interval occur each time a respective interval is
|
||||
reached beyond the initial warning (i.e. 1 Gbytes and 100 Kbytes).
|
||||
|
||||
:term:`BB_ENV_EXTRAWHITE`
|
||||
Specifies an additional set of variables to allow through (whitelist)
|
||||
from the external environment into BitBake's datastore. This list of
|
||||
variables are on top of the internal list set in
|
||||
:term:`BB_ENV_WHITELIST`.
|
||||
|
||||
.. note::
|
||||
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_ENV_WHITELIST`
|
||||
Specifies the internal whitelist of variables to allow through from
|
||||
the external environment into BitBake's datastore. If the value of
|
||||
@@ -259,6 +230,17 @@ overview of their function and contents.
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_ENV_EXTRAWHITE`
|
||||
Specifies an additional set of variables to allow through (whitelist)
|
||||
from the external environment into BitBake's datastore. This list of
|
||||
variables are on top of the internal list set in
|
||||
:term:`BB_ENV_WHITELIST`.
|
||||
|
||||
.. note::
|
||||
|
||||
You must set this variable in the external environment in order
|
||||
for it to work.
|
||||
|
||||
:term:`BB_FETCH_PREMIRRORONLY`
|
||||
When set to "1", causes BitBake's fetcher module to only search
|
||||
:term:`PREMIRRORS` for files. BitBake will not
|
||||
@@ -268,9 +250,48 @@ overview of their function and contents.
|
||||
:term:`BB_FILENAME`
|
||||
Contains the filename of the recipe that owns the currently running
|
||||
task. For example, if the ``do_fetch`` task that resides in the
|
||||
``my-recipe.bb`` is executing, the :term:`BB_FILENAME` variable contains
|
||||
``my-recipe.bb`` is executing, the ``BB_FILENAME`` variable contains
|
||||
"/foo/path/my-recipe.bb".
|
||||
|
||||
:term:`BBFILES_DYNAMIC`
|
||||
Activates content depending on presence of identified layers. You
|
||||
identify the layers by the collections that the layers define.
|
||||
|
||||
Use the ``BBFILES_DYNAMIC`` variable to avoid ``.bbappend`` files whose
|
||||
corresponding ``.bb`` file is in a layer that attempts to modify other
|
||||
layers through ``.bbappend`` but does not want to introduce a hard
|
||||
dependency on those other layers.
|
||||
|
||||
Additionally you can prefix the rule with "!" to add ``.bbappend`` and
|
||||
``.bb`` files in case a layer is not present. Use this avoid hard
|
||||
dependency on those other layers.
|
||||
|
||||
Use the following form for ``BBFILES_DYNAMIC``: ::
|
||||
|
||||
collection_name:filename_pattern
|
||||
|
||||
The following example identifies two collection names and two filename
|
||||
patterns: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \
|
||||
core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \
|
||||
"
|
||||
|
||||
When the collection name is prefixed with "!" it will add the file pattern in case
|
||||
the layer is absent: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
!clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \
|
||||
"
|
||||
|
||||
This next example shows an error message that occurs because invalid
|
||||
entries are found, which cause parsing to abort: ::
|
||||
|
||||
ERROR: BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:
|
||||
/work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend
|
||||
/work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend
|
||||
|
||||
:term:`BB_GENERATE_MIRROR_TARBALLS`
|
||||
Causes tarballs of the Git repositories, including the Git metadata,
|
||||
to be placed in the :term:`DL_DIR` directory. Anyone
|
||||
@@ -281,6 +302,18 @@ overview of their function and contents.
|
||||
|
||||
BB_GENERATE_MIRROR_TARBALLS = "1"
|
||||
|
||||
:term:`BB_HASHCONFIG_WHITELIST`
|
||||
Lists variables that are excluded from base configuration checksum,
|
||||
which is used to determine if the cache can be reused.
|
||||
|
||||
One of the ways BitBake determines whether to re-parse the main
|
||||
metadata is through checksums of the variables in the datastore of
|
||||
the base configuration data. There are variables that you typically
|
||||
want to exclude when checking whether or not to re-parse and thus
|
||||
rebuild the cache. As an example, you would usually exclude ``TIME``
|
||||
and ``DATE`` because these variables are always changing. If you did
|
||||
not exclude them, BitBake would never reuse the cache.
|
||||
|
||||
:term:`BB_HASHBASE_WHITELIST`
|
||||
Lists variables that are excluded from checksum and dependency data.
|
||||
Variables that are excluded can therefore change without affecting
|
||||
@@ -302,28 +335,6 @@ overview of their function and contents.
|
||||
However, the more accurate the data returned, the more efficient the
|
||||
build will be.
|
||||
|
||||
:term:`BB_HASHCONFIG_WHITELIST`
|
||||
Lists variables that are excluded from base configuration checksum,
|
||||
which is used to determine if the cache can be reused.
|
||||
|
||||
One of the ways BitBake determines whether to re-parse the main
|
||||
metadata is through checksums of the variables in the datastore of
|
||||
the base configuration data. There are variables that you typically
|
||||
want to exclude when checking whether or not to re-parse and thus
|
||||
rebuild the cache. As an example, you would usually exclude ``TIME``
|
||||
and ``DATE`` because these variables are always changing. If you did
|
||||
not exclude them, BitBake would never reuse the cache.
|
||||
|
||||
:term:`BB_HASHSERVE`
|
||||
Specifies the Hash Equivalence server to use.
|
||||
|
||||
If set to ``auto``, BitBake automatically starts its own server
|
||||
over a UNIX domain socket.
|
||||
|
||||
If set to ``host:port``, BitBake will use a remote server on the
|
||||
specified host. This allows multiple clients to share the same
|
||||
hash equivalence data.
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
re-parsing the base metadata (i.e. all the recipes). The
|
||||
@@ -338,9 +349,9 @@ overview of their function and contents.
|
||||
|
||||
:term:`BB_LOGFMT`
|
||||
Specifies the name of the log files saved into
|
||||
``${``\ :term:`T`\ ``}``. By default, the :term:`BB_LOGFMT`
|
||||
variable is undefined and the log filenames get created using the
|
||||
following form::
|
||||
``${``\ :term:`T`\ ``}``. By default, the ``BB_LOGFMT``
|
||||
variable is undefined and the log file names get created using the
|
||||
following form: ::
|
||||
|
||||
log.{task}.{pid}
|
||||
|
||||
@@ -362,15 +373,15 @@ overview of their function and contents.
|
||||
running builds when not connected to the Internet, and when operating
|
||||
in certain kinds of firewall environments.
|
||||
|
||||
:term:`BB_NUMBER_PARSE_THREADS`
|
||||
Sets the number of threads BitBake uses when parsing. By default, the
|
||||
number of threads is equal to the number of cores on the system.
|
||||
|
||||
:term:`BB_NUMBER_THREADS`
|
||||
The maximum number of tasks BitBake should run in parallel at any one
|
||||
time. If your host development system supports multiple cores, a good
|
||||
rule of thumb is to set this variable to twice the number of cores.
|
||||
|
||||
:term:`BB_NUMBER_PARSE_THREADS`
|
||||
Sets the number of threads BitBake uses when parsing. By default, the
|
||||
number of threads is equal to the number of cores on the system.
|
||||
|
||||
:term:`BB_ORIGENV`
|
||||
Contains a copy of the original external environment in which BitBake
|
||||
was run. The copy is taken before any whitelisted variable values are
|
||||
@@ -393,8 +404,8 @@ overview of their function and contents.
|
||||
:term:`BB_RUNFMT`
|
||||
Specifies the name of the executable script files (i.e. run files)
|
||||
saved into ``${``\ :term:`T`\ ``}``. By default, the
|
||||
:term:`BB_RUNFMT` variable is undefined and the run filenames get
|
||||
created using the following form::
|
||||
``BB_RUNFMT`` variable is undefined and the run file names get
|
||||
created using the following form: ::
|
||||
|
||||
run.{task}.{pid}
|
||||
|
||||
@@ -437,6 +448,17 @@ overview of their function and contents.
|
||||
The function specified by this variable returns a "True" or "False"
|
||||
depending on whether the dependency needs to be met.
|
||||
|
||||
:term:`BB_SETSCENE_VERIFY_FUNCTION2`
|
||||
Specifies a function to call that verifies the list of planned task
|
||||
execution before the main task execution happens. The function is
|
||||
called once BitBake has a list of setscene tasks that have run and
|
||||
either succeeded or failed.
|
||||
|
||||
The function allows for a task list check to see if they make sense.
|
||||
Even if BitBake was planning to skip a task, the returned value of
|
||||
the function can force BitBake to run the task, which is necessary
|
||||
under certain metadata defined circumstances.
|
||||
|
||||
:term:`BB_SIGNATURE_EXCLUDE_FLAGS`
|
||||
Lists variable flags (varflags) that can be safely excluded from
|
||||
checksum and dependency data for keys in the datastore. When
|
||||
@@ -459,7 +481,7 @@ overview of their function and contents.
|
||||
:term:`BB_SRCREV_POLICY`
|
||||
Defines the behavior of the fetcher when it interacts with source
|
||||
control systems and dynamic source revisions. The
|
||||
:term:`BB_SRCREV_POLICY` variable is useful when working without a
|
||||
``BB_SRCREV_POLICY`` variable is useful when working without a
|
||||
network.
|
||||
|
||||
The variable can be set using one of two policies:
|
||||
@@ -503,7 +525,7 @@ overview of their function and contents.
|
||||
Allows adjustment of a task's Input/Output priority. During
|
||||
Autobuilder testing, random failures can occur for tasks due to I/O
|
||||
starvation. These failures occur during various QEMU runtime
|
||||
timeouts. You can use the :term:`BB_TASK_IONICE_LEVEL` variable to adjust
|
||||
timeouts. You can use the ``BB_TASK_IONICE_LEVEL`` variable to adjust
|
||||
the I/O priority of these tasks.
|
||||
|
||||
.. note::
|
||||
@@ -511,7 +533,7 @@ overview of their function and contents.
|
||||
This variable works similarly to the :term:`BB_TASK_NICE_LEVEL`
|
||||
variable except with a task's I/O priorities.
|
||||
|
||||
Set the variable as follows::
|
||||
Set the variable as follows: ::
|
||||
|
||||
BB_TASK_IONICE_LEVEL = "class.prio"
|
||||
|
||||
@@ -529,7 +551,7 @@ overview of their function and contents.
|
||||
In order for your I/O priority settings to take effect, you need the
|
||||
Completely Fair Queuing (CFQ) Scheduler selected for the backing block
|
||||
device. To select the scheduler, use the following command form where
|
||||
device is the device (e.g. sda, sdb, and so forth)::
|
||||
device is the device (e.g. sda, sdb, and so forth): ::
|
||||
|
||||
$ sudo sh -c "echo cfq > /sys/block/device/queu/scheduler"
|
||||
|
||||
@@ -538,7 +560,7 @@ overview of their function and contents.
|
||||
|
||||
You can use this variable in combination with task overrides to raise
|
||||
or lower priorities of specific tasks. For example, on the `Yocto
|
||||
Project <https://www.yoctoproject.org>`__ autobuilder, QEMU emulation
|
||||
Project <http://www.yoctoproject.org>`__ autobuilder, QEMU emulation
|
||||
in images is given a higher priority as compared to build tasks to
|
||||
ensure that images do not suffer timeouts on loaded systems.
|
||||
|
||||
@@ -570,20 +592,20 @@ overview of their function and contents.
|
||||
To build a different variant of the recipe with a minimal amount of
|
||||
code, it usually is as simple as adding the variable to your recipe.
|
||||
Here are two examples. The "native" variants are from the
|
||||
OpenEmbedded-Core metadata::
|
||||
OpenEmbedded-Core metadata: ::
|
||||
|
||||
BBCLASSEXTEND =+ "native nativesdk"
|
||||
BBCLASSEXTEND =+ "multilib:multilib_name"
|
||||
|
||||
.. note::
|
||||
|
||||
Internally, the :term:`BBCLASSEXTEND` mechanism generates recipe
|
||||
Internally, the ``BBCLASSEXTEND`` mechanism generates recipe
|
||||
variants by rewriting variable values and applying overrides such
|
||||
as ``_class-native``. For example, to generate a native version of
|
||||
a recipe, a :term:`DEPENDS` on "foo" is
|
||||
rewritten to a :term:`DEPENDS` on "foo-native".
|
||||
rewritten to a ``DEPENDS`` on "foo-native".
|
||||
|
||||
Even when using :term:`BBCLASSEXTEND`, the recipe is only parsed once.
|
||||
Even when using ``BBCLASSEXTEND``, the recipe is only parsed once.
|
||||
Parsing once adds some limitations. For example, it is not
|
||||
possible to include a different file depending on the variant,
|
||||
since ``include`` statements are processed when the recipe is
|
||||
@@ -619,14 +641,14 @@ overview of their function and contents.
|
||||
- effectively letting you control the precedence for the multiple
|
||||
layers. The precedence established through this variable stands
|
||||
regardless of a recipe's version (:term:`PV` variable).
|
||||
For example, a layer that has a recipe with a higher :term:`PV` value but
|
||||
for which the :term:`BBFILE_PRIORITY` is set to have a lower precedence
|
||||
For example, a layer that has a recipe with a higher ``PV`` value but
|
||||
for which the ``BBFILE_PRIORITY`` is set to have a lower precedence
|
||||
still has a lower precedence.
|
||||
|
||||
A larger value for the :term:`BBFILE_PRIORITY` variable results in a
|
||||
A larger value for the ``BBFILE_PRIORITY`` variable results in a
|
||||
higher precedence. For example, the value 6 has a higher precedence
|
||||
than the value 5. If not specified, the :term:`BBFILE_PRIORITY` variable
|
||||
is set based on layer dependencies (see the :term:`LAYERDEPENDS` variable
|
||||
than the value 5. If not specified, the ``BBFILE_PRIORITY`` variable
|
||||
is set based on layer dependencies (see the ``LAYERDEPENDS`` variable
|
||||
for more information. The default priority, if unspecified for a
|
||||
layer with no dependencies, is the lowest defined priority + 1 (or 1
|
||||
if no priorities are defined).
|
||||
@@ -645,45 +667,6 @@ overview of their function and contents.
|
||||
For details on the syntax, see the documentation by following the
|
||||
previous link.
|
||||
|
||||
:term:`BBFILES_DYNAMIC`
|
||||
Activates content depending on presence of identified layers. You
|
||||
identify the layers by the collections that the layers define.
|
||||
|
||||
Use the :term:`BBFILES_DYNAMIC` variable to avoid ``.bbappend`` files whose
|
||||
corresponding ``.bb`` file is in a layer that attempts to modify other
|
||||
layers through ``.bbappend`` but does not want to introduce a hard
|
||||
dependency on those other layers.
|
||||
|
||||
Additionally you can prefix the rule with "!" to add ``.bbappend`` and
|
||||
``.bb`` files in case a layer is not present. Use this avoid hard
|
||||
dependency on those other layers.
|
||||
|
||||
Use the following form for :term:`BBFILES_DYNAMIC`::
|
||||
|
||||
collection_name:filename_pattern
|
||||
|
||||
The following example identifies two collection names and two filename
|
||||
patterns::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \
|
||||
core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \
|
||||
"
|
||||
|
||||
When the collection name is prefixed with "!" it will add the file pattern in case
|
||||
the layer is absent::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
!clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \
|
||||
"
|
||||
|
||||
This next example shows an error message that occurs because invalid
|
||||
entries are found, which cause parsing to abort::
|
||||
|
||||
ERROR: BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:
|
||||
/work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend
|
||||
/work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend
|
||||
|
||||
:term:`BBINCLUDED`
|
||||
Contains a space-separated list of all of all files that BitBake's
|
||||
parser included during parsing of the current file.
|
||||
@@ -695,13 +678,13 @@ overview of their function and contents.
|
||||
:term:`BBINCLUDELOGS_LINES`
|
||||
If :term:`BBINCLUDELOGS` is set, specifies
|
||||
the maximum number of lines from the task log file to print when
|
||||
reporting a failed task. If you do not set :term:`BBINCLUDELOGS_LINES`,
|
||||
reporting a failed task. If you do not set ``BBINCLUDELOGS_LINES``,
|
||||
the entire log is printed.
|
||||
|
||||
:term:`BBLAYERS`
|
||||
Lists the layers to enable during the build. This variable is defined
|
||||
in the ``bblayers.conf`` configuration file in the build directory.
|
||||
Here is an example::
|
||||
Here is an example: ::
|
||||
|
||||
BBLAYERS = " \
|
||||
/home/scottrif/poky/meta \
|
||||
@@ -721,7 +704,7 @@ overview of their function and contents.
|
||||
:term:`BBMASK`
|
||||
Prevents BitBake from processing recipes and recipe append files.
|
||||
|
||||
You can use the :term:`BBMASK` variable to "hide" these ``.bb`` and
|
||||
You can use the ``BBMASK`` variable to "hide" these ``.bb`` and
|
||||
``.bbappend`` files. BitBake ignores any recipe or recipe append
|
||||
files that match any of the expressions. It is as if BitBake does not
|
||||
see them at all. Consequently, matching files are not parsed or
|
||||
@@ -735,13 +718,13 @@ overview of their function and contents.
|
||||
|
||||
The following example uses a complete regular expression to tell
|
||||
BitBake to ignore all recipe and recipe append files in the
|
||||
``meta-ti/recipes-misc/`` directory::
|
||||
``meta-ti/recipes-misc/`` directory: ::
|
||||
|
||||
BBMASK = "meta-ti/recipes-misc/"
|
||||
|
||||
If you want to mask out multiple directories or recipes, you can
|
||||
specify multiple regular expression fragments. This next example
|
||||
masks out multiple directories and individual recipes::
|
||||
masks out multiple directories and individual recipes: ::
|
||||
|
||||
BBMASK += "/meta-ti/recipes-misc/ meta-ti/recipes-ti/packagegroup/"
|
||||
BBMASK += "/meta-oe/recipes-support/"
|
||||
@@ -758,11 +741,11 @@ overview of their function and contents.
|
||||
Enables BitBake to perform multiple configuration builds and lists
|
||||
each separate configuration (multiconfig). You can use this variable
|
||||
to cause BitBake to build multiple targets where each target has a
|
||||
separate configuration. Define :term:`BBMULTICONFIG` in your
|
||||
separate configuration. Define ``BBMULTICONFIG`` in your
|
||||
``conf/local.conf`` configuration file.
|
||||
|
||||
As an example, the following line specifies three multiconfigs, each
|
||||
having a separate configuration file::
|
||||
having a separate configuration file: ::
|
||||
|
||||
BBMULTIFONFIG = "configA configB configC"
|
||||
|
||||
@@ -770,7 +753,7 @@ overview of their function and contents.
|
||||
build directory within a directory named ``conf/multiconfig`` (e.g.
|
||||
build_directory\ ``/conf/multiconfig/configA.conf``).
|
||||
|
||||
For information on how to use :term:`BBMULTICONFIG` in an environment
|
||||
For information on how to use ``BBMULTICONFIG`` in an environment
|
||||
that supports building targets with multiple configurations, see the
|
||||
":ref:`bitbake-user-manual/bitbake-user-manual-intro:executing a multiple configuration build`"
|
||||
section.
|
||||
@@ -781,9 +764,9 @@ overview of their function and contents.
|
||||
variable.
|
||||
|
||||
If you run BitBake from a directory outside of the build directory,
|
||||
you must be sure to set :term:`BBPATH` to point to the build directory.
|
||||
you must be sure to set ``BBPATH`` to point to the build directory.
|
||||
Set the variable as you would any environment variable and then run
|
||||
BitBake::
|
||||
BitBake: ::
|
||||
|
||||
$ BBPATH="build_directory"
|
||||
$ export BBPATH
|
||||
@@ -797,6 +780,16 @@ overview of their function and contents.
|
||||
Allows you to use a configuration file to add to the list of
|
||||
command-line target recipes you want to build.
|
||||
|
||||
:term:`BBVERSIONS`
|
||||
Allows a single recipe to build multiple versions of a project from a
|
||||
single recipe file. You also able to specify conditional metadata
|
||||
using the :term:`OVERRIDES` mechanism for a
|
||||
single version or for an optionally named range of versions.
|
||||
|
||||
For more information on ``BBVERSIONS``, see the
|
||||
":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variants - class extension mechanism`"
|
||||
section.
|
||||
|
||||
:term:`BITBAKE_UI`
|
||||
Used to specify the UI module to use when running BitBake. Using this
|
||||
variable is equivalent to using the ``-u`` command-line option.
|
||||
@@ -828,7 +821,7 @@ overview of their function and contents.
|
||||
The most common usage of this is variable is to set it to "-1" within
|
||||
a recipe for a development version of a piece of software. Using the
|
||||
variable in this way causes the stable version of the recipe to build
|
||||
by default in the absence of :term:`PREFERRED_VERSION` being used to
|
||||
by default in the absence of ``PREFERRED_VERSION`` being used to
|
||||
build the development version.
|
||||
|
||||
.. note::
|
||||
@@ -841,8 +834,8 @@ overview of their function and contents.
|
||||
Lists a recipe's build-time dependencies (i.e. other recipe files).
|
||||
|
||||
Consider this simple example for two recipes named "a" and "b" that
|
||||
produce similarly named packages. In this example, the :term:`DEPENDS`
|
||||
statement appears in the "a" recipe::
|
||||
produce similarly named packages. In this example, the ``DEPENDS``
|
||||
statement appears in the "a" recipe: ::
|
||||
|
||||
DEPENDS = "b"
|
||||
|
||||
@@ -859,7 +852,7 @@ overview of their function and contents.
|
||||
|
||||
:term:`DL_DIR`
|
||||
The central download directory used by the build process to store
|
||||
downloads. By default, :term:`DL_DIR` gets files suitable for mirroring for
|
||||
downloads. By default, ``DL_DIR`` gets files suitable for mirroring for
|
||||
everything except Git repositories. If you want tarballs of Git
|
||||
repositories, use the :term:`BB_GENERATE_MIRROR_TARBALLS` variable.
|
||||
|
||||
@@ -874,14 +867,14 @@ overview of their function and contents.
|
||||
|
||||
.. note::
|
||||
|
||||
Recipes added to :term:`EXCLUDE_FROM_WORLD` may still be built during a world
|
||||
Recipes added to ``EXCLUDE_FROM_WORLD`` may still be built during a world
|
||||
build in order to satisfy dependencies of other recipes. Adding a
|
||||
recipe to :term:`EXCLUDE_FROM_WORLD` only ensures that the recipe is not
|
||||
recipe to ``EXCLUDE_FROM_WORLD`` only ensures that the recipe is not
|
||||
explicitly added to the list of build targets in a world build.
|
||||
|
||||
:term:`FAKEROOT`
|
||||
Contains the command to use when running a shell script in a fakeroot
|
||||
environment. The :term:`FAKEROOT` variable is obsolete and has been
|
||||
environment. The ``FAKEROOT`` variable is obsolete and has been
|
||||
replaced by the other ``FAKEROOT*`` variables. See these entries in
|
||||
the glossary for more information.
|
||||
|
||||
@@ -944,9 +937,9 @@ overview of their function and contents.
|
||||
Causes the named class or classes to be inherited globally. Anonymous
|
||||
functions in the class or classes are not executed for the base
|
||||
configuration and in each individual recipe. The OpenEmbedded build
|
||||
system ignores changes to :term:`INHERIT` in individual recipes.
|
||||
system ignores changes to ``INHERIT`` in individual recipes.
|
||||
|
||||
For more information on :term:`INHERIT`, see the
|
||||
For more information on ``INHERIT``, see the
|
||||
":ref:`bitbake-user-manual/bitbake-user-manual-metadata:\`\`inherit\`\` configuration directive`"
|
||||
section.
|
||||
|
||||
@@ -994,7 +987,7 @@ overview of their function and contents.
|
||||
the build system searches for source code, it first tries the local
|
||||
download directory. If that location fails, the build system tries
|
||||
locations defined by :term:`PREMIRRORS`, the
|
||||
upstream source, and then locations specified by :term:`MIRRORS` in that
|
||||
upstream source, and then locations specified by ``MIRRORS`` in that
|
||||
order.
|
||||
|
||||
:term:`MULTI_PROVIDER_WHITELIST`
|
||||
@@ -1011,12 +1004,12 @@ overview of their function and contents.
|
||||
``virtual/kernel``, and so forth).
|
||||
|
||||
:term:`OVERRIDES`
|
||||
BitBake uses :term:`OVERRIDES` to control what variables are overridden
|
||||
BitBake uses ``OVERRIDES`` to control what variables are overridden
|
||||
after BitBake parses recipes and configuration files.
|
||||
|
||||
Following is a simple example that uses an overrides list based on
|
||||
machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can
|
||||
find information on how to use :term:`OVERRIDES` in the
|
||||
find information on how to use ``OVERRIDES`` in the
|
||||
":ref:`bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax
|
||||
(overrides)`" section.
|
||||
|
||||
@@ -1030,11 +1023,11 @@ overview of their function and contents.
|
||||
:term:`PACKAGES_DYNAMIC`
|
||||
A promise that your recipe satisfies runtime dependencies for
|
||||
optional modules that are found in other recipes.
|
||||
:term:`PACKAGES_DYNAMIC` does not actually satisfy the dependencies, it
|
||||
``PACKAGES_DYNAMIC`` does not actually satisfy the dependencies, it
|
||||
only states that they should be satisfied. For example, if a hard,
|
||||
runtime dependency (:term:`RDEPENDS`) of another
|
||||
package is satisfied during the build through the
|
||||
:term:`PACKAGES_DYNAMIC` variable, but a package with the module name is
|
||||
``PACKAGES_DYNAMIC`` variable, but a package with the module name is
|
||||
never actually produced, then the other package will be broken.
|
||||
|
||||
:term:`PE`
|
||||
@@ -1064,7 +1057,7 @@ overview of their function and contents.
|
||||
recipes provide the same item. You should always suffix the variable
|
||||
with the name of the provided item, and you should set it to the
|
||||
:term:`PN` of the recipe to which you want to give
|
||||
precedence. Some examples::
|
||||
precedence. Some examples: ::
|
||||
|
||||
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
|
||||
PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86"
|
||||
@@ -1073,30 +1066,30 @@ overview of their function and contents.
|
||||
:term:`PREFERRED_PROVIDERS`
|
||||
Determines which recipe should be given preference for cases where
|
||||
multiple recipes provide the same item. Functionally,
|
||||
:term:`PREFERRED_PROVIDERS` is identical to
|
||||
:term:`PREFERRED_PROVIDER`. However, the :term:`PREFERRED_PROVIDERS` variable
|
||||
``PREFERRED_PROVIDERS`` is identical to
|
||||
:term:`PREFERRED_PROVIDER`. However, the ``PREFERRED_PROVIDERS`` variable
|
||||
lets you define preferences for multiple situations using the following
|
||||
form::
|
||||
form: ::
|
||||
|
||||
PREFERRED_PROVIDERS = "xxx:yyy aaa:bbb ..."
|
||||
|
||||
This form is a convenient replacement for the following::
|
||||
This form is a convenient replacement for the following: ::
|
||||
|
||||
PREFERRED_PROVIDER_xxx = "yyy"
|
||||
PREFERRED_PROVIDER_aaa = "bbb"
|
||||
|
||||
:term:`PREFERRED_VERSION`
|
||||
If there are multiple versions of a recipe available, this variable
|
||||
determines which version should be given preference. You must always
|
||||
If there are multiple versions of recipes available, this variable
|
||||
determines which recipe should be given preference. You must always
|
||||
suffix the variable with the :term:`PN` you want to
|
||||
select, and you should set :term:`PV` accordingly for
|
||||
precedence.
|
||||
|
||||
The :term:`PREFERRED_VERSION` variable supports limited wildcard use
|
||||
The ``PREFERRED_VERSION`` variable supports limited wildcard use
|
||||
through the "``%``" character. You can use the character to match any
|
||||
number of characters, which can be useful when specifying versions
|
||||
that contain long revision numbers that potentially change. Here are
|
||||
two examples::
|
||||
two examples: ::
|
||||
|
||||
PREFERRED_VERSION_python = "2.7.3"
|
||||
PREFERRED_VERSION_linux-yocto = "4.12%"
|
||||
@@ -1107,22 +1100,18 @@ overview of their function and contents.
|
||||
end of the string. You cannot use the wildcard character in any other
|
||||
location of the string.
|
||||
|
||||
If a recipe with the specified version is not available, a warning
|
||||
message will be shown. See :term:`REQUIRED_VERSION` if you want this
|
||||
to be an error instead.
|
||||
|
||||
:term:`PREMIRRORS`
|
||||
Specifies additional paths from which BitBake gets source code. When
|
||||
the build system searches for source code, it first tries the local
|
||||
download directory. If that location fails, the build system tries
|
||||
locations defined by :term:`PREMIRRORS`, the upstream source, and then
|
||||
locations defined by ``PREMIRRORS``, the upstream source, and then
|
||||
locations specified by :term:`MIRRORS` in that order.
|
||||
|
||||
Typically, you would add a specific server for the build system to
|
||||
attempt before any others by adding something like the following to
|
||||
your configuration::
|
||||
your configuration: ::
|
||||
|
||||
PREMIRRORS:prepend = "\
|
||||
PREMIRRORS_prepend = "\
|
||||
git://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
ftp://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
http://.*/.* http://www.yoctoproject.org/sources/ \n \
|
||||
@@ -1135,25 +1124,25 @@ overview of their function and contents.
|
||||
|
||||
:term:`PROVIDES`
|
||||
A list of aliases by which a particular recipe can be known. By
|
||||
default, a recipe's own :term:`PN` is implicitly already in its
|
||||
:term:`PROVIDES` list. If a recipe uses :term:`PROVIDES`, the additional
|
||||
default, a recipe's own ``PN`` is implicitly already in its
|
||||
``PROVIDES`` list. If a recipe uses ``PROVIDES``, the additional
|
||||
aliases are synonyms for the recipe and can be useful satisfying
|
||||
dependencies of other recipes during the build as specified by
|
||||
:term:`DEPENDS`.
|
||||
``DEPENDS``.
|
||||
|
||||
Consider the following example :term:`PROVIDES` statement from a recipe
|
||||
file ``libav_0.8.11.bb``::
|
||||
Consider the following example ``PROVIDES`` statement from a recipe
|
||||
file ``libav_0.8.11.bb``: ::
|
||||
|
||||
PROVIDES += "libpostproc"
|
||||
|
||||
The :term:`PROVIDES` statement results in the "libav" recipe also being known
|
||||
The ``PROVIDES`` statement results in the "libav" recipe also being known
|
||||
as "libpostproc".
|
||||
|
||||
In addition to providing recipes under alternate names, the
|
||||
:term:`PROVIDES` mechanism is also used to implement virtual targets. A
|
||||
``PROVIDES`` mechanism is also used to implement virtual targets. A
|
||||
virtual target is a name that corresponds to some particular
|
||||
functionality (e.g. a Linux kernel). Recipes that provide the
|
||||
functionality in question list the virtual target in :term:`PROVIDES`.
|
||||
functionality in question list the virtual target in ``PROVIDES``.
|
||||
Recipes that depend on the functionality in question can include the
|
||||
virtual target in :term:`DEPENDS` to leave the
|
||||
choice of provider open.
|
||||
@@ -1165,12 +1154,12 @@ overview of their function and contents.
|
||||
:term:`PRSERV_HOST`
|
||||
The network based :term:`PR` service host and port.
|
||||
|
||||
Following is an example of how the :term:`PRSERV_HOST` variable is set::
|
||||
Following is an example of how the ``PRSERV_HOST`` variable is set: ::
|
||||
|
||||
PRSERV_HOST = "localhost:0"
|
||||
|
||||
You must set the variable if you want to automatically start a local PR
|
||||
service. You can set :term:`PRSERV_HOST` to other values to use a remote PR
|
||||
service. You can set ``PRSERV_HOST`` to other values to use a remote PR
|
||||
service.
|
||||
|
||||
:term:`PV`
|
||||
@@ -1182,26 +1171,26 @@ overview of their function and contents.
|
||||
a package in this list cannot be found during the build, you will get
|
||||
a build error.
|
||||
|
||||
Because the :term:`RDEPENDS` variable applies to packages being built,
|
||||
Because the ``RDEPENDS`` variable applies to packages being built,
|
||||
you should always use the variable in a form with an attached package
|
||||
name. For example, suppose you are building a development package
|
||||
that depends on the ``perl`` package. In this case, you would use the
|
||||
following :term:`RDEPENDS` statement::
|
||||
following ``RDEPENDS`` statement: ::
|
||||
|
||||
RDEPENDS:${PN}-dev += "perl"
|
||||
RDEPENDS_${PN}-dev += "perl"
|
||||
|
||||
In the example, the development package depends on the ``perl`` package.
|
||||
Thus, the :term:`RDEPENDS` variable has the ``${PN}-dev`` package name as part
|
||||
Thus, the ``RDEPENDS`` variable has the ``${PN}-dev`` package name as part
|
||||
of the variable.
|
||||
|
||||
BitBake supports specifying versioned dependencies. Although the
|
||||
syntax varies depending on the packaging format, BitBake hides these
|
||||
differences from you. Here is the general syntax to specify versions
|
||||
with the :term:`RDEPENDS` variable::
|
||||
with the ``RDEPENDS`` variable: ::
|
||||
|
||||
RDEPENDS:${PN} = "package (operator version)"
|
||||
RDEPENDS_${PN} = "package (operator version)"
|
||||
|
||||
For ``operator``, you can specify the following::
|
||||
For ``operator``, you can specify the following: ::
|
||||
|
||||
=
|
||||
<
|
||||
@@ -1210,9 +1199,9 @@ overview of their function and contents.
|
||||
>=
|
||||
|
||||
For example, the following sets up a dependency on version 1.2 or
|
||||
greater of the package ``foo``::
|
||||
greater of the package ``foo``: ::
|
||||
|
||||
RDEPENDS:${PN} = "foo (>= 1.2)"
|
||||
RDEPENDS_${PN} = "foo (>= 1.2)"
|
||||
|
||||
For information on build-time dependencies, see the :term:`DEPENDS`
|
||||
variable.
|
||||
@@ -1221,43 +1210,33 @@ overview of their function and contents.
|
||||
The directory in which a local copy of a ``google-repo`` directory is
|
||||
stored when it is synced.
|
||||
|
||||
:term:`REQUIRED_VERSION`
|
||||
If there are multiple versions of a recipe available, this variable
|
||||
determines which version should be given preference. :term:`REQUIRED_VERSION`
|
||||
works in exactly the same manner as :term:`PREFERRED_VERSION`, except
|
||||
that if the specified version is not available then an error message
|
||||
is shown and the build fails immediately.
|
||||
|
||||
If both :term:`REQUIRED_VERSION` and :term:`PREFERRED_VERSION` are set for
|
||||
the same recipe, the :term:`REQUIRED_VERSION` value applies.
|
||||
|
||||
:term:`RPROVIDES`
|
||||
A list of package name aliases that a package also provides. These
|
||||
aliases are useful for satisfying runtime dependencies of other
|
||||
packages both during the build and on the target (as specified by
|
||||
:term:`RDEPENDS`).
|
||||
``RDEPENDS``).
|
||||
|
||||
As with all package-controlling variables, you must always use the
|
||||
variable in conjunction with a package name override. Here is an
|
||||
example::
|
||||
example: ::
|
||||
|
||||
RPROVIDES:${PN} = "widget-abi-2"
|
||||
RPROVIDES_${PN} = "widget-abi-2"
|
||||
|
||||
:term:`RRECOMMENDS`
|
||||
A list of packages that extends the usability of a package being
|
||||
built. The package being built does not depend on this list of
|
||||
packages in order to successfully build, but needs them for the
|
||||
extended usability. To specify runtime dependencies for packages, see
|
||||
the :term:`RDEPENDS` variable.
|
||||
the ``RDEPENDS`` variable.
|
||||
|
||||
BitBake supports specifying versioned recommends. Although the syntax
|
||||
varies depending on the packaging format, BitBake hides these
|
||||
differences from you. Here is the general syntax to specify versions
|
||||
with the :term:`RRECOMMENDS` variable::
|
||||
with the ``RRECOMMENDS`` variable: ::
|
||||
|
||||
RRECOMMENDS:${PN} = "package (operator version)"
|
||||
RRECOMMENDS_${PN} = "package (operator version)"
|
||||
|
||||
For ``operator``, you can specify the following::
|
||||
For ``operator``, you can specify the following: ::
|
||||
|
||||
=
|
||||
<
|
||||
@@ -1266,9 +1245,9 @@ overview of their function and contents.
|
||||
>=
|
||||
|
||||
For example, the following sets up a recommend on version
|
||||
1.2 or greater of the package ``foo``::
|
||||
1.2 or greater of the package ``foo``: ::
|
||||
|
||||
RRECOMMENDS:${PN} = "foo (>= 1.2)"
|
||||
RRECOMMENDS_${PN} = "foo (>= 1.2)"
|
||||
|
||||
:term:`SECTION`
|
||||
The section in which packages should be categorized.
|
||||
@@ -1277,10 +1256,10 @@ overview of their function and contents.
|
||||
The list of source files - local or remote. This variable tells
|
||||
BitBake which bits to pull for the build and how to pull them. For
|
||||
example, if the recipe or append file needs to fetch a single tarball
|
||||
from the Internet, the recipe or append file uses a :term:`SRC_URI` entry
|
||||
from the Internet, the recipe or append file uses a ``SRC_URI`` entry
|
||||
that specifies that tarball. On the other hand, if the recipe or
|
||||
append file needs to fetch a tarball and include a custom file, the
|
||||
recipe or append file needs an :term:`SRC_URI` variable that specifies
|
||||
recipe or append file needs an ``SRC_URI`` variable that specifies
|
||||
all those sources.
|
||||
|
||||
The following list explains the available URI protocols:
|
||||
@@ -1320,8 +1299,6 @@ overview of their function and contents.
|
||||
- ``svn://`` : Fetches files from a Subversion (``svn``) revision
|
||||
control repository.
|
||||
|
||||
- ``az://`` : Fetches files from an Azure Storage account using HTTPS.
|
||||
|
||||
Here are some additional options worth mentioning:
|
||||
|
||||
- ``unpack`` : Controls whether or not to unpack the file if it is
|
||||
@@ -1333,8 +1310,8 @@ overview of their function and contents.
|
||||
subdirectory within the archive.
|
||||
|
||||
- ``name`` : Specifies a name to be used for association with
|
||||
:term:`SRC_URI` checksums when you have more than one file specified
|
||||
in :term:`SRC_URI`.
|
||||
``SRC_URI`` checksums when you have more than one file specified
|
||||
in ``SRC_URI``.
|
||||
|
||||
- ``downloadfilename`` : Specifies the filename used when storing
|
||||
the downloaded file.
|
||||
@@ -1349,7 +1326,7 @@ overview of their function and contents.
|
||||
variable applies only when using Subversion, Git, Mercurial and
|
||||
Bazaar. If you want to build a fixed revision and you want to avoid
|
||||
performing a query on the remote repository every time BitBake parses
|
||||
your recipe, you should specify a :term:`SRCREV` that is a full revision
|
||||
your recipe, you should specify a ``SRCREV`` that is a full revision
|
||||
identifier and not just a tag.
|
||||
|
||||
:term:`SRCREV_FORMAT`
|
||||
@@ -1358,10 +1335,10 @@ overview of their function and contents.
|
||||
:term:`SRC_URI`.
|
||||
|
||||
The system needs help constructing these values under these
|
||||
circumstances. Each component in the :term:`SRC_URI` is assigned a name
|
||||
and these are referenced in the :term:`SRCREV_FORMAT` variable. Consider
|
||||
circumstances. Each component in the ``SRC_URI`` is assigned a name
|
||||
and these are referenced in the ``SRCREV_FORMAT`` variable. Consider
|
||||
an example with URLs named "machine" and "meta". In this case,
|
||||
:term:`SRCREV_FORMAT` could look like "machine_meta" and those names
|
||||
``SRCREV_FORMAT`` could look like "machine_meta" and those names
|
||||
would have the SCM versions substituted into each position. Only one
|
||||
``AUTOINC`` placeholder is added and if needed. And, this placeholder
|
||||
is placed at the start of the returned string.
|
||||
@@ -1373,7 +1350,7 @@ overview of their function and contents.
|
||||
|
||||
:term:`STAMPCLEAN`
|
||||
Specifies the base path used to create recipe stamp files. Unlike the
|
||||
:term:`STAMP` variable, :term:`STAMPCLEAN` can contain
|
||||
:term:`STAMP` variable, ``STAMPCLEAN`` can contain
|
||||
wildcards to match the range of files a clean operation should
|
||||
remove. BitBake uses a clean operation to remove any other stamps it
|
||||
should be removing when creating a new stamp.
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
current_version = "dev"
|
||||
|
||||
@@ -9,11 +9,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "1.52.0"
|
||||
__version__ = "1.48.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 6, 0):
|
||||
raise RuntimeError("Sorry, python 3.6.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (3, 5, 0):
|
||||
raise RuntimeError("Sorry, python 3.5.0 or later is required for this version of bitbake")
|
||||
|
||||
|
||||
class BBHandledException(Exception):
|
||||
@@ -21,8 +21,8 @@ class BBHandledException(Exception):
|
||||
The big dilemma for generic bitbake code is what information to give the user
|
||||
when an exception occurs. Any exception inheriting this base exception class
|
||||
has already provided information to the user via some 'fired' message type such as
|
||||
an explicitly fired event using bb.fire, or a bb.error message. If bitbake
|
||||
encounters an exception derived from this class, no backtrace or other information
|
||||
an explicitly fired event using bb.fire, or a bb.error message. If bitbake
|
||||
encounters an exception derived from this class, no backtrace or other information
|
||||
will be given to the user, its assumed the earlier event provided the relevant information.
|
||||
"""
|
||||
pass
|
||||
@@ -42,23 +42,14 @@ class BBLoggerMixin(object):
|
||||
|
||||
def setup_bblogger(self, name):
|
||||
if name.split(".")[0] == "BitBake":
|
||||
self.debug = self._debug_helper
|
||||
|
||||
def _debug_helper(self, *args, **kwargs):
|
||||
return self.bbdebug(1, *args, **kwargs)
|
||||
|
||||
def debug2(self, *args, **kwargs):
|
||||
return self.bbdebug(2, *args, **kwargs)
|
||||
|
||||
def debug3(self, *args, **kwargs):
|
||||
return self.bbdebug(3, *args, **kwargs)
|
||||
self.debug = self.bbdebug
|
||||
|
||||
def bbdebug(self, level, msg, *args, **kwargs):
|
||||
loglevel = logging.DEBUG - level + 1
|
||||
if not bb.event.worker_pid:
|
||||
if self.name in bb.msg.loggerDefaultDomains and loglevel > (bb.msg.loggerDefaultDomains[self.name]):
|
||||
return
|
||||
if loglevel < bb.msg.loggerDefaultLogLevel:
|
||||
if loglevel > bb.msg.loggerDefaultLogLevel:
|
||||
return
|
||||
return self.log(loglevel, msg, *args, **kwargs)
|
||||
|
||||
@@ -137,7 +128,7 @@ def debug(lvl, *args):
|
||||
mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl)
|
||||
args = (lvl,) + args
|
||||
lvl = 1
|
||||
mainlogger.bbdebug(lvl, ''.join(args))
|
||||
mainlogger.debug(lvl, ''.join(args))
|
||||
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
# The Python async server defaults to a 64K receive buffer, so we hardcode our
|
||||
# maximum chunk size. It would be better if the client and server reported to
|
||||
# each other what the maximum chunk sizes were, but that will slow down the
|
||||
# connection setup with a round trip delay so I'd rather not do that unless it
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
|
||||
def chunkify(msg, max_chunk):
|
||||
if len(msg) < max_chunk - 1:
|
||||
yield ''.join((msg, "\n"))
|
||||
else:
|
||||
yield ''.join((json.dumps({
|
||||
'chunk-stream': None
|
||||
}), "\n"))
|
||||
|
||||
args = [iter(msg)] * (max_chunk - 1)
|
||||
for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
|
||||
yield ''.join(itertools.chain(m, "\n"))
|
||||
yield "\n"
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
@@ -1,172 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import abc
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(self, proto_name, proto_version, logger, timeout=30):
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.proto_name = proto_name
|
||||
self.proto_version = proto_version
|
||||
self.logger = logger
|
||||
self.timeout = timeout
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
return await asyncio.open_connection(address, port)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_unix(self, path):
|
||||
async def connect_sock():
|
||||
return await asyncio.open_unix_connection(path)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def setup_connection(self):
|
||||
s = '%s %s\n\n' % (self.proto_name, self.proto_version)
|
||||
self.writer.write(s.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
async def connect(self):
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
await self.setup_connection()
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
await self.connect()
|
||||
return await proc()
|
||||
except (
|
||||
OSError,
|
||||
ConnectionError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
self.logger.warning("Error talking to server: %s" % e)
|
||||
if count >= 3:
|
||||
if not isinstance(e, ConnectionError):
|
||||
raise ConnectionError(str(e))
|
||||
raise e
|
||||
await self.close()
|
||||
count += 1
|
||||
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
try:
|
||||
line = await asyncio.wait_for(self.reader.readline(), self.timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise ConnectionError("Timed out waiting for server")
|
||||
|
||||
if not line:
|
||||
raise ConnectionError("Connection closed")
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Bad message %r" % (line))
|
||||
|
||||
return line
|
||||
|
||||
async def proc():
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
|
||||
l = await get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def ping(self):
|
||||
return await self.send_message(
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
|
||||
class Client(object):
|
||||
def __init__(self):
|
||||
self.client = self._get_async_client()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
# Override any pre-existing loop.
|
||||
# Without this, the PR server export selftest triggers a hang
|
||||
# when running with Python 3.7. The drawback is that there is
|
||||
# potential for issues if the PR and hash equiv (or some new)
|
||||
# clients need to both be instantiated in the same process.
|
||||
# This should be revisited if/when Python 3.9 becomes the
|
||||
# minimum required version for BitBake, as it seems not
|
||||
# required (but harmless) with it.
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self._add_methods('connect_tcp', 'ping')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_async_client(self):
|
||||
pass
|
||||
|
||||
def _get_downcall_wrapper(self, downcall):
|
||||
def wrapper(*args, **kwargs):
|
||||
return self.loop.run_until_complete(downcall(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
|
||||
def _add_methods(self, *methods):
|
||||
for m in methods:
|
||||
downcall = getattr(self.client, m)
|
||||
setattr(self, m, self._get_downcall_wrapper(downcall))
|
||||
|
||||
def connect_unix(self, path):
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
@property
|
||||
def max_chunk(self):
|
||||
return self.client.max_chunk
|
||||
|
||||
@max_chunk.setter
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
|
||||
def close(self):
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
@@ -1,286 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import abc
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import multiprocessing
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncServerConnection(object):
|
||||
def __init__(self, reader, writer, proto_name, logger):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.proto_name = proto_name
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.handlers = {
|
||||
'chunk-stream': self.handle_chunk,
|
||||
'ping': self.handle_ping,
|
||||
}
|
||||
self.logger = logger
|
||||
|
||||
async def process_requests(self):
|
||||
try:
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
self.logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
# Read protocol and version
|
||||
client_protocol = await self.reader.readline()
|
||||
if client_protocol is None:
|
||||
return
|
||||
|
||||
(client_proto_name, client_proto_version) = client_protocol.decode('utf-8').rstrip().split()
|
||||
if client_proto_name != self.proto_name:
|
||||
self.logger.debug('Rejecting invalid protocol %s' % (self.proto_name))
|
||||
return
|
||||
|
||||
self.proto_version = tuple(int(v) for v in client_proto_version.split('.'))
|
||||
if not self.validate_proto_version():
|
||||
self.logger.debug('Rejecting invalid protocol version %s' % (client_proto_version))
|
||||
return
|
||||
|
||||
# Read headers. Currently, no headers are implemented, so look for
|
||||
# an empty line to signal the end of the headers
|
||||
while True:
|
||||
line = await self.reader.readline()
|
||||
if line is None:
|
||||
return
|
||||
|
||||
line = line.decode('utf-8').rstrip()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Handle messages
|
||||
while True:
|
||||
d = await self.read_message()
|
||||
if d is None:
|
||||
break
|
||||
await self.dispatch_message(d)
|
||||
await self.writer.drain()
|
||||
except ClientError as e:
|
||||
self.logger.error(str(e))
|
||||
finally:
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug('Handling %s' % k)
|
||||
await self.handlers[k](msg[k])
|
||||
return
|
||||
|
||||
raise ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
def write_message(self, msg):
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode('utf-8'))
|
||||
|
||||
async def read_message(self):
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return None
|
||||
|
||||
try:
|
||||
message = l.decode('utf-8')
|
||||
|
||||
if not message.endswith('\n'):
|
||||
return None
|
||||
|
||||
return json.loads(message)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % message)
|
||||
raise e
|
||||
|
||||
async def handle_chunk(self, request):
|
||||
lines = []
|
||||
try:
|
||||
while True:
|
||||
l = await self.reader.readline()
|
||||
l = l.rstrip(b"\n").decode("utf-8")
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
msg = json.loads(''.join(lines))
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
self.logger.error('Bad message from client: %r' % lines)
|
||||
raise e
|
||||
|
||||
if 'chunk-stream' in msg:
|
||||
raise ClientError("Nested chunks are not allowed")
|
||||
|
||||
await self.dispatch_message(msg)
|
||||
|
||||
async def handle_ping(self, request):
|
||||
response = {'alive': True}
|
||||
self.write_message(response)
|
||||
|
||||
|
||||
class AsyncServer(object):
|
||||
def __init__(self, logger):
|
||||
self._cleanup_socket = None
|
||||
self.logger = logger
|
||||
self.start = None
|
||||
self.address = None
|
||||
self.loop = None
|
||||
|
||||
def start_tcp_server(self, host, port):
|
||||
def start_tcp():
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
self.logger.debug('Listening on %r' % (s.getsockname(),))
|
||||
# Newer python does this automatically. Do it manually here for
|
||||
# maximum compatibility
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
|
||||
name = self.server.sockets[0].getsockname()
|
||||
if self.server.sockets[0].family == socket.AF_INET6:
|
||||
self.address = "[%s]:%d" % (name[0], name[1])
|
||||
else:
|
||||
self.address = "%s:%d" % (name[0], name[1])
|
||||
|
||||
self.start = start_tcp
|
||||
|
||||
def start_unix_server(self, path):
|
||||
def cleanup():
|
||||
os.unlink(path)
|
||||
|
||||
def start_unix():
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
self.logger.debug('Listening on %r' % path)
|
||||
|
||||
self._cleanup_socket = cleanup
|
||||
self.address = "unix://%s" % os.path.abspath(path)
|
||||
|
||||
self.start = start_unix
|
||||
|
||||
@abc.abstractmethod
|
||||
def accept_client(self, reader, writer):
|
||||
pass
|
||||
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = self.accept_client(reader, writer)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
self.logger.error('Error from client: %s' % str(e), exc_info=True)
|
||||
traceback.print_exc()
|
||||
writer.close()
|
||||
self.logger.debug('Client disconnected')
|
||||
|
||||
def run_loop_forever(self):
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def signal_handler(self):
|
||||
self.logger.debug("Got exit signal")
|
||||
self.loop.stop()
|
||||
|
||||
def _serve_forever(self):
|
||||
try:
|
||||
self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
|
||||
|
||||
self.run_loop_forever()
|
||||
self.server.close()
|
||||
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.logger.debug('Server shutting down')
|
||||
finally:
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
def serve_forever(self):
|
||||
"""
|
||||
Serve requests in the current process
|
||||
"""
|
||||
# Create loop and override any loop that may have existed in
|
||||
# a parent process. It is possible that the usecases of
|
||||
# serve_forever might be constrained enough to allow using
|
||||
# get_event_loop here, but better safe than sorry for now.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.start()
|
||||
self._serve_forever()
|
||||
|
||||
def serve_as_process(self, *, prefunc=None, args=()):
|
||||
"""
|
||||
Serve requests in a child process
|
||||
"""
|
||||
def run(queue):
|
||||
# Create loop and override any loop that may have existed
|
||||
# in a parent process. Without doing this and instead
|
||||
# using get_event_loop, at the very minimum the hashserv
|
||||
# unit tests will hang when running the second test.
|
||||
# This happens since get_event_loop in the spawned server
|
||||
# process for the second testcase ends up with the loop
|
||||
# from the hashserv client created in the unit test process
|
||||
# when running the first testcase. The problem is somewhat
|
||||
# more general, though, as any potential use of asyncio in
|
||||
# Cooker could create a loop that needs to replaced in this
|
||||
# new process.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
try:
|
||||
self.start()
|
||||
finally:
|
||||
queue.put(self.address)
|
||||
queue.close()
|
||||
|
||||
if prefunc is not None:
|
||||
prefunc(self, *args)
|
||||
|
||||
self._serve_forever()
|
||||
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
|
||||
# Temporarily block SIGTERM. The server process will inherit this
|
||||
# block which will ensure it doesn't receive the SIGTERM until the
|
||||
# handler is ready for it
|
||||
mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGTERM])
|
||||
try:
|
||||
self.process = multiprocessing.Process(target=run, args=(queue,))
|
||||
self.process.start()
|
||||
|
||||
self.address = queue.get()
|
||||
queue.close()
|
||||
queue.join_thread()
|
||||
|
||||
return self.process
|
||||
finally:
|
||||
signal.pthread_sigmask(signal.SIG_SETMASK, mask)
|
||||
@@ -295,13 +295,9 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
lineno = int(d.getVarFlag(func, "lineno", False))
|
||||
bb.methodpool.insert_method(func, text, fn, lineno - 1)
|
||||
|
||||
comp = utils.better_compile(code, func, "exec_func_python() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_func_python() autogenerated")
|
||||
comp = utils.better_compile(code, func, "exec_python_func() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated")
|
||||
finally:
|
||||
# We want any stdout/stderr to be printed before any other log messages to make debugging
|
||||
# more accurate. In some cases we seem to lose stdout/stderr entirely in logging tests without this.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
bb.debug(2, "Python function %s finished" % func)
|
||||
|
||||
if cwd and olddir:
|
||||
@@ -587,7 +583,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.error("No such task: %s" % task)
|
||||
return 1
|
||||
|
||||
logger.debug("Executing task %s", task)
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T')
|
||||
@@ -600,7 +596,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
curnice = os.nice(0)
|
||||
nice = int(nice) - curnice
|
||||
newnice = os.nice(nice)
|
||||
logger.debug("Renice to %s " % newnice)
|
||||
logger.debug(1, "Renice to %s " % newnice)
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
|
||||
if ionice:
|
||||
try:
|
||||
@@ -686,51 +682,47 @@ def _exec_task(fn, task, d, quieterr):
|
||||
try:
|
||||
try:
|
||||
event.fire(TaskStarted(task, fn, logfn, flags, localdata), localdata)
|
||||
except (bb.BBHandledException, SystemExit):
|
||||
return 1
|
||||
|
||||
try:
|
||||
for func in (prefuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
exec_func(task, localdata)
|
||||
for func in (postfuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
finally:
|
||||
# Need to flush and close the logs before sending events where the
|
||||
# UI may try to look at the logs.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
except bb.BBHandledException:
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, True), localdata)
|
||||
return 1
|
||||
except Exception as exc:
|
||||
if quieterr:
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
logger.error(str(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
finally:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
bblogger.removeHandler(handler)
|
||||
bblogger.removeHandler(handler)
|
||||
|
||||
# Restore the backup fds
|
||||
os.dup2(osi[0], osi[1])
|
||||
os.dup2(oso[0], oso[1])
|
||||
os.dup2(ose[0], ose[1])
|
||||
# Restore the backup fds
|
||||
os.dup2(osi[0], osi[1])
|
||||
os.dup2(oso[0], oso[1])
|
||||
os.dup2(ose[0], ose[1])
|
||||
|
||||
# Close the backup fds
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
logger.debug2("Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
except bb.BBHandledException:
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, True), localdata)
|
||||
return 1
|
||||
except (Exception, SystemExit) as exc:
|
||||
if quieterr:
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
# If the output is already on stdout, we've printed the information in the
|
||||
# logs once already so don't duplicate
|
||||
if verboseStdoutLogging:
|
||||
errprinted = True
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
# Close the backup fds
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
logger.debug(2, "Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
event.fire(TaskSucceeded(task, fn, logfn, localdata), localdata)
|
||||
|
||||
if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
|
||||
@@ -862,23 +854,6 @@ def make_stamp(task, d, file_name = None):
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
def find_stale_stamps(task, d, file_name=None):
|
||||
current = stamp_internal(task, d, file_name)
|
||||
current2 = stamp_internal(task + "_setscene", d, file_name)
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
found = []
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
if "sigdata" in name or "sigbasedata" in name:
|
||||
continue
|
||||
if name.endswith('.taint'):
|
||||
continue
|
||||
if name == current or name == current2:
|
||||
continue
|
||||
logger.debug2("Stampfile %s does not match %s or %s" % (name, current, current2))
|
||||
found.append(name)
|
||||
return found
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Removes a stamp for a given task
|
||||
@@ -935,11 +910,6 @@ def add_tasks(tasklist, d):
|
||||
task_deps[name] = {}
|
||||
if name in flags:
|
||||
deptask = d.expand(flags[name])
|
||||
if name in ['noexec', 'fakeroot', 'nostamp']:
|
||||
if deptask != '1':
|
||||
bb.warn("In a future version of BitBake, setting the '{}' flag to something other than '1' "
|
||||
"will result in the flag not being set. See YP bug #13808.".format(name))
|
||||
|
||||
task_deps[name][task] = deptask
|
||||
getTask('mcdepends')
|
||||
getTask('depends')
|
||||
@@ -1038,8 +1008,6 @@ def tasksbetween(task_start, task_end, d):
|
||||
def follow_chain(task, endtask, chain=None):
|
||||
if not chain:
|
||||
chain = []
|
||||
if task in chain:
|
||||
bb.fatal("Circular task dependencies as %s depends on itself via the chain %s" % (task, " -> ".join(chain)))
|
||||
chain.append(task)
|
||||
for othertask in tasks:
|
||||
if othertask == task:
|
||||
|
||||
@@ -19,15 +19,14 @@
|
||||
import os
|
||||
import logging
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from collections.abc import Mapping
|
||||
from collections import defaultdict, Mapping
|
||||
import bb.utils
|
||||
from bb import PrefixLoggerAdapter
|
||||
import re
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "154"
|
||||
__cache_version__ = "153"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -54,12 +53,12 @@ class RecipeInfoCommon(object):
|
||||
|
||||
@classmethod
|
||||
def pkgvar(cls, var, packages, metadata):
|
||||
return dict((pkg, cls.depvar("%s:%s" % (var, pkg), metadata))
|
||||
return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata))
|
||||
for pkg in packages)
|
||||
|
||||
@classmethod
|
||||
def taskvar(cls, var, tasks, metadata):
|
||||
return dict((task, cls.getvar("%s:task-%s" % (var, task), metadata))
|
||||
return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
|
||||
for task in tasks)
|
||||
|
||||
@classmethod
|
||||
@@ -95,7 +94,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
if not self.packages:
|
||||
self.packages.append(self.pn)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
|
||||
self.skipreason = self.getvar('__SKIPPED', metadata)
|
||||
if self.skipreason:
|
||||
@@ -122,12 +120,12 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.depends = self.depvar('DEPENDS', metadata)
|
||||
self.rdepends = self.depvar('RDEPENDS', metadata)
|
||||
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
|
||||
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
|
||||
self.inherits = self.getvar('__inherit_cache', metadata, expand=False)
|
||||
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
|
||||
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
|
||||
self.fakerootlogs = self.getvar('FAKEROOTLOGS', metadata)
|
||||
self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
|
||||
self.extradepsfunc = self.getvar('calculate_extra_depends', metadata)
|
||||
|
||||
@@ -165,7 +163,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv = {}
|
||||
cachedata.fakerootnoenv = {}
|
||||
cachedata.fakerootdirs = {}
|
||||
cachedata.fakerootlogs = {}
|
||||
cachedata.extradepsfunc = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
@@ -218,7 +215,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
if not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
#else:
|
||||
# logger.debug2("EXCLUDE FROM WORLD: %s", fn)
|
||||
# logger.debug(2, "EXCLUDE FROM WORLD: %s", fn)
|
||||
|
||||
# create a collection of all targets for sanity checking
|
||||
# tasks, such as upstream versions, license, and tools for
|
||||
@@ -234,7 +231,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv[fn] = self.fakerootenv
|
||||
cachedata.fakerootnoenv[fn] = self.fakerootnoenv
|
||||
cachedata.fakerootdirs[fn] = self.fakerootdirs
|
||||
cachedata.fakerootlogs[fn] = self.fakerootlogs
|
||||
cachedata.extradepsfunc[fn] = self.extradepsfunc
|
||||
|
||||
def virtualfn2realfn(virtualfn):
|
||||
@@ -242,7 +238,7 @@ def virtualfn2realfn(virtualfn):
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
if virtualfn.startswith('mc:'):
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
@@ -272,7 +268,7 @@ def variant2virtual(realfn, variant):
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
if variant.startswith("mc:") and variant.count(':') >= 2:
|
||||
if variant.startswith("mc:"):
|
||||
elems = variant.split(":")
|
||||
if elems[2]:
|
||||
return "mc:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn
|
||||
@@ -327,7 +323,7 @@ class NoCache(object):
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
logger.debug(1, "Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
|
||||
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
@@ -404,7 +400,7 @@ class Cache(NoCache):
|
||||
|
||||
self.cachefile = self.getCacheFile("bb_cache.dat")
|
||||
|
||||
self.logger.debug("Cache dir: %s", self.cachedir)
|
||||
self.logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
@@ -412,7 +408,7 @@ class Cache(NoCache):
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
cache_exists = os.path.exists(cachefile)
|
||||
self.logger.debug2("Checking if %s exists: %r", cachefile, cache_exists)
|
||||
self.logger.debug(2, "Checking if %s exists: %r", cachefile, cache_exists)
|
||||
cache_ok = cache_ok and cache_exists
|
||||
cache_class.init_cacheData(self)
|
||||
if cache_ok:
|
||||
@@ -420,7 +416,7 @@ class Cache(NoCache):
|
||||
elif os.path.isfile(self.cachefile):
|
||||
self.logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
self.logger.debug("Cache file %s not found, building..." % self.cachefile)
|
||||
self.logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
# We don't use the symlink, its just for debugging convinience
|
||||
if self.mc:
|
||||
@@ -453,11 +449,13 @@ class Cache(NoCache):
|
||||
return cachesize
|
||||
|
||||
def load_cachefile(self, progress):
|
||||
cachesize = self.cachesize()
|
||||
previous_progress = 0
|
||||
previous_percent = 0
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
self.logger.debug('Loading cache file: %s' % cachefile)
|
||||
self.logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
@@ -504,7 +502,7 @@ class Cache(NoCache):
|
||||
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
self.logger.debug(1, "Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.load_bbfile(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
@@ -558,7 +556,7 @@ class Cache(NoCache):
|
||||
cached, infos = self.load(fn, appends)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
self.logger.debug("Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
self.logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info_array, cacheData, not cached)
|
||||
@@ -594,21 +592,21 @@ class Cache(NoCache):
|
||||
|
||||
# File isn't in depends_cache
|
||||
if not fn in self.depends_cache:
|
||||
self.logger.debug2("%s is not cached", fn)
|
||||
self.logger.debug(2, "%s is not cached", fn)
|
||||
return False
|
||||
|
||||
mtime = bb.parse.cached_mtime_noerror(fn)
|
||||
|
||||
# Check file still exists
|
||||
if mtime == 0:
|
||||
self.logger.debug2("%s no longer exists", fn)
|
||||
self.logger.debug(2, "%s no longer exists", fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
info_array = self.depends_cache[fn]
|
||||
# Check the file's timestamp
|
||||
if mtime != info_array[0].timestamp:
|
||||
self.logger.debug2("%s changed", fn)
|
||||
self.logger.debug(2, "%s changed", fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -619,13 +617,13 @@ class Cache(NoCache):
|
||||
fmtime = bb.parse.cached_mtime_noerror(f)
|
||||
# Check if file still exists
|
||||
if old_mtime != 0 and fmtime == 0:
|
||||
self.logger.debug2("%s's dependency %s was removed",
|
||||
self.logger.debug(2, "%s's dependency %s was removed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
if (fmtime != old_mtime):
|
||||
self.logger.debug2("%s's dependency %s changed",
|
||||
self.logger.debug(2, "%s's dependency %s changed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
@@ -642,14 +640,14 @@ class Cache(NoCache):
|
||||
continue
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
self.logger.debug2("%s's file checksum list file %s changed",
|
||||
self.logger.debug(2, "%s's file checksum list file %s changed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
if tuple(appends) != tuple(info_array[0].appends):
|
||||
self.logger.debug2("appends for %s changed", fn)
|
||||
self.logger.debug2("%s to %s" % (str(appends), str(info_array[0].appends)))
|
||||
self.logger.debug(2, "appends for %s changed", fn)
|
||||
self.logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends)))
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -658,10 +656,10 @@ class Cache(NoCache):
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
self.clean.add(virtualfn)
|
||||
if virtualfn not in self.depends_cache:
|
||||
self.logger.debug2("%s is not cached", virtualfn)
|
||||
self.logger.debug(2, "%s is not cached", virtualfn)
|
||||
invalid = True
|
||||
elif len(self.depends_cache[virtualfn]) != len(self.caches_array):
|
||||
self.logger.debug2("Extra caches missing for %s?" % virtualfn)
|
||||
self.logger.debug(2, "Extra caches missing for %s?" % virtualfn)
|
||||
invalid = True
|
||||
|
||||
# If any one of the variants is not present, mark as invalid for all
|
||||
@@ -669,10 +667,10 @@ class Cache(NoCache):
|
||||
for cls in info_array[0].variants:
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
if virtualfn in self.clean:
|
||||
self.logger.debug2("Removing %s from cache", virtualfn)
|
||||
self.logger.debug(2, "Removing %s from cache", virtualfn)
|
||||
self.clean.remove(virtualfn)
|
||||
if fn in self.clean:
|
||||
self.logger.debug2("Marking %s as not clean", fn)
|
||||
self.logger.debug(2, "Marking %s as not clean", fn)
|
||||
self.clean.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -685,10 +683,10 @@ class Cache(NoCache):
|
||||
Called from the parser in error cases
|
||||
"""
|
||||
if fn in self.depends_cache:
|
||||
self.logger.debug("Removing %s from cache", fn)
|
||||
self.logger.debug(1, "Removing %s from cache", fn)
|
||||
del self.depends_cache[fn]
|
||||
if fn in self.clean:
|
||||
self.logger.debug("Marking %s as unclean", fn)
|
||||
self.logger.debug(1, "Marking %s as unclean", fn)
|
||||
self.clean.remove(fn)
|
||||
|
||||
def sync(self):
|
||||
@@ -701,13 +699,13 @@ class Cache(NoCache):
|
||||
return
|
||||
|
||||
if self.cacheclean:
|
||||
self.logger.debug2("Cache is clean, not saving.")
|
||||
self.logger.debug(2, "Cache is clean, not saving.")
|
||||
return
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cache_class_name = cache_class.__name__
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
self.logger.debug2("Writing %s", cachefile)
|
||||
self.logger.debug(2, "Writing %s", cachefile)
|
||||
with open(cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL)
|
||||
p.dump(__cache_version__)
|
||||
@@ -818,6 +816,10 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def keys(self):
|
||||
return self.__caches[key]
|
||||
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
@@ -883,7 +885,7 @@ class MultiProcessCache(object):
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
logger.debug("Using cache in '%s'", self.cachefile)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
|
||||
@@ -989,7 +991,7 @@ class SimpleCache(object):
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
logger.debug("Using cache in '%s'", self.cachefile)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
|
||||
|
||||
@@ -212,9 +212,9 @@ class PythonParser():
|
||||
funcstr = codegen.to_source(func)
|
||||
argstr = codegen.to_source(arg)
|
||||
except TypeError:
|
||||
self.log.debug2('Failed to convert function and argument to source form')
|
||||
self.log.debug(2, 'Failed to convert function and argument to source form')
|
||||
else:
|
||||
self.log.debug(self.unhandled_message % (funcstr, argstr))
|
||||
self.log.debug(1, self.unhandled_message % (funcstr, argstr))
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
@@ -450,7 +450,7 @@ class ShellParser():
|
||||
|
||||
cmd = word[1]
|
||||
if cmd.startswith("$"):
|
||||
self.log.debug(self.unhandled_template % cmd)
|
||||
self.log.debug(1, self.unhandled_template % cmd)
|
||||
elif cmd == "eval":
|
||||
command = " ".join(word for _, word in words[1:])
|
||||
self._parse_shell(command)
|
||||
|
||||
@@ -20,7 +20,6 @@ Commands are queued in a CommandQueue
|
||||
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import io
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
@@ -65,17 +64,9 @@ class Command:
|
||||
|
||||
# Ensure cooker is ready for commands
|
||||
if command != "updateConfig" and command != "setFeatures":
|
||||
try:
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
|
||||
except (Exception, SystemExit) as exc:
|
||||
import traceback
|
||||
if isinstance(exc, bb.BBHandledException):
|
||||
# We need to start returning real exceptions here. Until we do, we can't
|
||||
# tell if an exception is an instance of bb.BBHandledException
|
||||
return None, "bb.BBHandledException()\n" + traceback.format_exc()
|
||||
return None, traceback.format_exc()
|
||||
self.cooker.init_configdata()
|
||||
if not self.remotedatastores:
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
|
||||
|
||||
if hasattr(CommandsSync, command):
|
||||
# Can run synchronous commands straight away
|
||||
@@ -509,17 +500,6 @@ class CommandsSync:
|
||||
d = command.remotedatastores[dsindex].varhistory
|
||||
return getattr(d, method)(*args, **kwargs)
|
||||
|
||||
def dataStoreConnectorVarHistCmdEmit(self, command, params):
|
||||
dsindex = params[0]
|
||||
var = params[1]
|
||||
oval = params[2]
|
||||
val = params[3]
|
||||
d = command.remotedatastores[params[4]]
|
||||
|
||||
o = io.StringIO()
|
||||
command.remotedatastores[dsindex].varhistory.emit(var, oval, val, o, d)
|
||||
return o.getvalue()
|
||||
|
||||
def dataStoreConnectorIncHistCmd(self, command, params):
|
||||
dsindex = params[0]
|
||||
method = params[1]
|
||||
@@ -667,16 +647,6 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
findFilesMatchingInDir.needcache = False
|
||||
|
||||
def testCookerCommandEvent(self, command, params):
|
||||
"""
|
||||
Dummy command used by OEQA selftest to test tinfoil without IO
|
||||
"""
|
||||
pattern = params[0]
|
||||
|
||||
command.cooker.testCookerCommandEvent(pattern)
|
||||
command.finishAsyncCommand()
|
||||
testCookerCommandEvent.needcache = False
|
||||
|
||||
def findConfigFilePath(self, command, params):
|
||||
"""
|
||||
Find the path of the requested configuration file
|
||||
|
||||
@@ -1,194 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Helper library to implement streaming compression and decompression using an
|
||||
# external process
|
||||
#
|
||||
# This library should be used directly by end users; a wrapper library for the
|
||||
# specific compression tool should be created
|
||||
|
||||
import builtins
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
def open_wrap(
|
||||
cls, filename, mode="rb", *, encoding=None, errors=None, newline=None, **kwargs
|
||||
):
|
||||
"""
|
||||
Open a compressed file in binary or text mode.
|
||||
|
||||
Users should not call this directly. A specific compression library can use
|
||||
this helper to provide it's own "open" command
|
||||
|
||||
The filename argument can be an actual filename (a str or bytes object), or
|
||||
an existing file object to read from or write to.
|
||||
|
||||
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for
|
||||
binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is
|
||||
"rb".
|
||||
|
||||
For binary mode, this function is equivalent to the cls constructor:
|
||||
cls(filename, mode). In this case, the encoding, errors and newline
|
||||
arguments must not be provided.
|
||||
|
||||
For text mode, a cls object is created, and wrapped in an
|
||||
io.TextIOWrapper instance with the specified encoding, error handling
|
||||
behavior, and line ending(s).
|
||||
"""
|
||||
if "t" in mode:
|
||||
if "b" in mode:
|
||||
raise ValueError("Invalid mode: %r" % (mode,))
|
||||
else:
|
||||
if encoding is not None:
|
||||
raise ValueError("Argument 'encoding' not supported in binary mode")
|
||||
if errors is not None:
|
||||
raise ValueError("Argument 'errors' not supported in binary mode")
|
||||
if newline is not None:
|
||||
raise ValueError("Argument 'newline' not supported in binary mode")
|
||||
|
||||
file_mode = mode.replace("t", "")
|
||||
if isinstance(filename, (str, bytes, os.PathLike)):
|
||||
binary_file = cls(filename, file_mode, **kwargs)
|
||||
elif hasattr(filename, "read") or hasattr(filename, "write"):
|
||||
binary_file = cls(None, file_mode, fileobj=filename, **kwargs)
|
||||
else:
|
||||
raise TypeError("filename must be a str or bytes object, or a file")
|
||||
|
||||
if "t" in mode:
|
||||
return io.TextIOWrapper(
|
||||
binary_file, encoding, errors, newline, write_through=True
|
||||
)
|
||||
else:
|
||||
return binary_file
|
||||
|
||||
|
||||
class CompressionError(OSError):
|
||||
pass
|
||||
|
||||
|
||||
class PipeFile(io.RawIOBase):
|
||||
"""
|
||||
Class that implements generically piping to/from a compression program
|
||||
|
||||
Derived classes should add the function get_compress() and get_decompress()
|
||||
that return the required commands. Input will be piped into stdin and the
|
||||
(de)compressed output should be written to stdout, e.g.:
|
||||
|
||||
class FooFile(PipeCompressionFile):
|
||||
def get_decompress(self):
|
||||
return ["fooc", "--decompress", "--stdout"]
|
||||
|
||||
def get_compress(self):
|
||||
return ["fooc", "--compress", "--stdout"]
|
||||
|
||||
"""
|
||||
|
||||
READ = 0
|
||||
WRITE = 1
|
||||
|
||||
def __init__(self, filename=None, mode="rb", *, stderr=None, fileobj=None):
|
||||
if "t" in mode or "U" in mode:
|
||||
raise ValueError("Invalid mode: {!r}".format(mode))
|
||||
|
||||
if not "b" in mode:
|
||||
mode += "b"
|
||||
|
||||
if mode.startswith("r"):
|
||||
self.mode = self.READ
|
||||
elif mode.startswith("w"):
|
||||
self.mode = self.WRITE
|
||||
else:
|
||||
raise ValueError("Invalid mode %r" % mode)
|
||||
|
||||
if fileobj is not None:
|
||||
self.fileobj = fileobj
|
||||
else:
|
||||
self.fileobj = builtins.open(filename, mode or "rb")
|
||||
|
||||
if self.mode == self.READ:
|
||||
self.p = subprocess.Popen(
|
||||
self.get_decompress(),
|
||||
stdin=self.fileobj,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
close_fds=True,
|
||||
)
|
||||
self.pipe = self.p.stdout
|
||||
else:
|
||||
self.p = subprocess.Popen(
|
||||
self.get_compress(),
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=self.fileobj,
|
||||
stderr=stderr,
|
||||
close_fds=True,
|
||||
)
|
||||
self.pipe = self.p.stdin
|
||||
|
||||
self.__closed = False
|
||||
|
||||
def _check_process(self):
|
||||
if self.p is None:
|
||||
return
|
||||
|
||||
returncode = self.p.wait()
|
||||
if returncode:
|
||||
raise CompressionError("Process died with %d" % returncode)
|
||||
self.p = None
|
||||
|
||||
def close(self):
|
||||
if self.closed:
|
||||
return
|
||||
|
||||
self.pipe.close()
|
||||
if self.p is not None:
|
||||
self._check_process()
|
||||
self.fileobj.close()
|
||||
|
||||
self.__closed = True
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.__closed
|
||||
|
||||
def fileno(self):
|
||||
return self.pipe.fileno()
|
||||
|
||||
def flush(self):
|
||||
self.pipe.flush()
|
||||
|
||||
def isatty(self):
|
||||
return self.pipe.isatty()
|
||||
|
||||
def readable(self):
|
||||
return self.mode == self.READ
|
||||
|
||||
def writable(self):
|
||||
return self.mode == self.WRITE
|
||||
|
||||
def readinto(self, b):
|
||||
if self.mode != self.READ:
|
||||
import errno
|
||||
|
||||
raise OSError(
|
||||
errno.EBADF, "read() on write-only %s object" % self.__class__.__name__
|
||||
)
|
||||
size = self.pipe.readinto(b)
|
||||
if size == 0:
|
||||
self._check_process()
|
||||
return size
|
||||
|
||||
def write(self, data):
|
||||
if self.mode != self.WRITE:
|
||||
import errno
|
||||
|
||||
raise OSError(
|
||||
errno.EBADF, "write() on read-only %s object" % self.__class__.__name__
|
||||
)
|
||||
data = self.pipe.write(data)
|
||||
|
||||
if not data:
|
||||
self._check_process()
|
||||
|
||||
return data
|
||||
@@ -1,17 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import bb.compress._pipecompress
|
||||
|
||||
|
||||
def open(*args, **kwargs):
|
||||
return bb.compress._pipecompress.open_wrap(LZ4File, *args, **kwargs)
|
||||
|
||||
|
||||
class LZ4File(bb.compress._pipecompress.PipeFile):
|
||||
def get_compress(self):
|
||||
return ["lz4c", "-z", "-c"]
|
||||
|
||||
def get_decompress(self):
|
||||
return ["lz4c", "-d", "-c"]
|
||||
@@ -1,28 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import bb.compress._pipecompress
|
||||
import shutil
|
||||
|
||||
|
||||
def open(*args, **kwargs):
|
||||
return bb.compress._pipecompress.open_wrap(ZstdFile, *args, **kwargs)
|
||||
|
||||
|
||||
class ZstdFile(bb.compress._pipecompress.PipeFile):
|
||||
def __init__(self, *args, num_threads=1, compresslevel=3, **kwargs):
|
||||
self.num_threads = num_threads
|
||||
self.compresslevel = compresslevel
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def _get_zstd(self):
|
||||
if self.num_threads == 1 or not shutil.which("pzstd"):
|
||||
return ["zstd"]
|
||||
return ["pzstd", "-p", "%d" % self.num_threads]
|
||||
|
||||
def get_compress(self):
|
||||
return self._get_zstd() + ["-c", "-%d" % self.compresslevel]
|
||||
|
||||
def get_decompress(self):
|
||||
return self._get_zstd() + ["-d", "-c"]
|
||||
@@ -73,9 +73,7 @@ class SkippedPackage:
|
||||
self.pn = info.pn
|
||||
self.skipreason = info.skipreason
|
||||
self.provides = info.provides
|
||||
self.rprovides = info.packages + info.rprovides
|
||||
for package in info.packages:
|
||||
self.rprovides += info.rprovides_pkg[package]
|
||||
self.rprovides = info.rprovides
|
||||
elif reason:
|
||||
self.skipreason = reason
|
||||
|
||||
@@ -382,30 +380,16 @@ class BBCooker:
|
||||
try:
|
||||
self.prhost = prserv.serv.auto_start(self.data)
|
||||
except prserv.serv.PRServiceConfigError as e:
|
||||
bb.fatal("Unable to start PR Server, exiting, check the bitbake-cookerdaemon.log")
|
||||
bb.fatal("Unable to start PR Server, exitting")
|
||||
|
||||
if self.data.getVar("BB_HASHSERVE") == "auto":
|
||||
# Create a new hash server bound to a unix domain socket
|
||||
if not self.hashserv:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
self.hashservaddr,
|
||||
dbfile,
|
||||
sync=False,
|
||||
upstream=upstream,
|
||||
)
|
||||
self.hashserv.serve_as_process()
|
||||
self.hashserv = hashserv.create_server(self.hashservaddr, dbfile, sync=False)
|
||||
self.hashserv.process = multiprocessing.Process(target=self.hashserv.serve_forever)
|
||||
self.hashserv.process.start()
|
||||
self.data.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.origdata.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
self.databuilder.data.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
@@ -425,8 +409,6 @@ class BBCooker:
|
||||
self.data.disableTracking()
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
# Change nice level if we're asked to
|
||||
nice = self.data.getVar("BB_NICE_LEVEL")
|
||||
if nice:
|
||||
@@ -457,7 +439,7 @@ class BBCooker:
|
||||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
logger.debug("Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
clean = False
|
||||
if hasattr(self.configuration, o):
|
||||
@@ -484,17 +466,17 @@ class BBCooker:
|
||||
|
||||
for k in bb.utils.approved_variables():
|
||||
if k in environment and k not in self.configuration.env:
|
||||
logger.debug("Updating new environment variable %s to %s" % (k, environment[k]))
|
||||
logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k]))
|
||||
self.configuration.env[k] = environment[k]
|
||||
clean = False
|
||||
if k in self.configuration.env and k not in environment:
|
||||
logger.debug("Updating environment variable %s (deleted)" % (k))
|
||||
logger.debug(1, "Updating environment variable %s (deleted)" % (k))
|
||||
del self.configuration.env[k]
|
||||
clean = False
|
||||
if k not in self.configuration.env and k not in environment:
|
||||
continue
|
||||
if environment[k] != self.configuration.env[k]:
|
||||
logger.debug("Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k]))
|
||||
logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k]))
|
||||
self.configuration.env[k] = environment[k]
|
||||
clean = False
|
||||
|
||||
@@ -502,7 +484,7 @@ class BBCooker:
|
||||
self.configuration.env = environment
|
||||
|
||||
if not clean:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
logger.debug(1, "Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
|
||||
def runCommands(self, server, data, abort):
|
||||
@@ -516,30 +498,22 @@ class BBCooker:
|
||||
|
||||
def showVersions(self):
|
||||
|
||||
(latest_versions, preferred_versions, required) = self.findProviders()
|
||||
(latest_versions, preferred_versions) = self.findProviders()
|
||||
|
||||
logger.plain("%-35s %25s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version", "Required Version")
|
||||
logger.plain("%-35s %25s %25s %25s\n", "===========", "==============", "=================", "================")
|
||||
logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version")
|
||||
logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================")
|
||||
|
||||
for p in sorted(self.recipecaches[''].pkg_pn):
|
||||
preferred = preferred_versions[p]
|
||||
pref = preferred_versions[p]
|
||||
latest = latest_versions[p]
|
||||
requiredstr = ""
|
||||
preferredstr = ""
|
||||
if required[p]:
|
||||
if preferred[0] is not None:
|
||||
requiredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2]
|
||||
else:
|
||||
bb.fatal("REQUIRED_VERSION of package %s not available" % p)
|
||||
else:
|
||||
preferredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2]
|
||||
|
||||
prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
|
||||
lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2]
|
||||
|
||||
if preferred == latest:
|
||||
preferredstr = ""
|
||||
if pref == latest:
|
||||
prefstr = ""
|
||||
|
||||
logger.plain("%-35s %25s %25s %25s", p, lateststr, preferredstr, requiredstr)
|
||||
logger.plain("%-35s %25s %25s", p, lateststr, prefstr)
|
||||
|
||||
def showEnvironment(self, buildfile=None, pkgs_to_build=None):
|
||||
"""
|
||||
@@ -638,7 +612,7 @@ class BBCooker:
|
||||
# Replace string such as "mc:*:bash"
|
||||
# into "mc:A:bash mc:B:bash bash"
|
||||
for k in targetlist:
|
||||
if k.startswith("mc:") and k.count(':') >= 2:
|
||||
if k.startswith("mc:"):
|
||||
if wildcard:
|
||||
bb.fatal('multiconfig conflict')
|
||||
if k.split(":")[1] == "*":
|
||||
@@ -672,7 +646,7 @@ class BBCooker:
|
||||
for k in fulltargetlist:
|
||||
origk = k
|
||||
mc = ""
|
||||
if k.startswith("mc:") and k.count(':') >= 2:
|
||||
if k.startswith("mc:"):
|
||||
mc = k.split(":")[1]
|
||||
k = ":".join(k.split(":")[2:])
|
||||
ktask = task
|
||||
@@ -721,7 +695,7 @@ class BBCooker:
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
logger.debug(1, "Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
@@ -814,9 +788,7 @@ class BBCooker:
|
||||
for dep in rq.rqdata.runtaskentries[tid].depends:
|
||||
(depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
deppn = self.recipecaches[depmc].pkg_fn[deptaskfn]
|
||||
if depmc:
|
||||
depmc = "mc:" + depmc + ":"
|
||||
depend_tree["tdepends"][dotname].append("%s%s.%s" % (depmc, deppn, bb.runqueue.taskname_from_tid(dep)))
|
||||
depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
|
||||
if taskfn not in seen_fns:
|
||||
seen_fns.append(taskfn)
|
||||
packages = []
|
||||
@@ -1080,11 +1052,6 @@ class BBCooker:
|
||||
if matches:
|
||||
bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data)
|
||||
|
||||
def testCookerCommandEvent(self, filepattern):
|
||||
# Dummy command used by OEQA selftest to test tinfoil without IO
|
||||
matches = ["A", "B"]
|
||||
bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data)
|
||||
|
||||
def findProviders(self, mc=''):
|
||||
return bb.providers.findProviders(self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
|
||||
|
||||
@@ -1092,16 +1059,10 @@ class BBCooker:
|
||||
if pn in self.recipecaches[mc].providers:
|
||||
filenames = self.recipecaches[mc].providers[pn]
|
||||
eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.databuilder.mcdata[mc], self.recipecaches[mc])
|
||||
if eligible is not None:
|
||||
filename = eligible[0]
|
||||
else:
|
||||
filename = None
|
||||
filename = eligible[0]
|
||||
return None, None, None, filename
|
||||
elif pn in self.recipecaches[mc].pkg_pn:
|
||||
(latest, latest_f, preferred_ver, preferred_file, required) = bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
|
||||
if required and preferred_file is None:
|
||||
return None, None, None, None
|
||||
return (latest, latest_f, preferred_ver, preferred_file)
|
||||
return bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
|
||||
else:
|
||||
return None, None, None, None
|
||||
|
||||
@@ -1590,7 +1551,7 @@ class BBCooker:
|
||||
self.inotify_modified_files = []
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
logger.debug("Reloading base configuration data")
|
||||
logger.debug(1, "Reloading base configuration data")
|
||||
self.initConfigurationData()
|
||||
self.handlePRServ()
|
||||
|
||||
@@ -2220,33 +2181,21 @@ class CookerParser(object):
|
||||
yield not cached, mc, infos
|
||||
|
||||
def parse_generator(self):
|
||||
empty = False
|
||||
while self.processes or not empty:
|
||||
for process in self.processes.copy():
|
||||
if not process.is_alive():
|
||||
process.join()
|
||||
self.processes.remove(process)
|
||||
|
||||
while True:
|
||||
if self.parsed >= self.toparse:
|
||||
break
|
||||
|
||||
try:
|
||||
result = self.result_queue.get(timeout=0.25)
|
||||
except queue.Empty:
|
||||
empty = True
|
||||
pass
|
||||
else:
|
||||
empty = False
|
||||
value = result[1]
|
||||
if isinstance(value, BaseException):
|
||||
raise value
|
||||
else:
|
||||
yield result
|
||||
|
||||
if not (self.parsed >= self.toparse):
|
||||
raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None)
|
||||
|
||||
|
||||
def parse_next(self):
|
||||
result = []
|
||||
parsed = None
|
||||
@@ -2258,18 +2207,18 @@ class CookerParser(object):
|
||||
except bb.BBHandledException as exc:
|
||||
self.error += 1
|
||||
logger.error('Failed to parse recipe: %s' % exc.recipe)
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
self.error += 1
|
||||
logger.error(str(exc))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
self.error += 1
|
||||
@@ -2278,7 +2227,7 @@ class CookerParser(object):
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
@@ -2290,7 +2239,7 @@ class CookerParser(object):
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
logger.error('Exception during parse: %s' % traceback.format_exc())
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
|
||||
self.current += 1
|
||||
|
||||
@@ -23,8 +23,8 @@ logger = logging.getLogger("BitBake")
|
||||
parselog = logging.getLogger("BitBake.Parsing")
|
||||
|
||||
class ConfigParameters(object):
|
||||
def __init__(self, argv=None):
|
||||
self.options, targets = self.parseCommandLine(argv or sys.argv)
|
||||
def __init__(self, argv=sys.argv):
|
||||
self.options, targets = self.parseCommandLine(argv)
|
||||
self.environment = self.parseEnvironment()
|
||||
|
||||
self.options.pkgs_to_build = targets or []
|
||||
@@ -209,7 +209,7 @@ def findConfigFile(configfile, data):
|
||||
return None
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
@@ -291,8 +291,6 @@ class CookerDataBuilder(object):
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
if config[0].isdigit():
|
||||
bb.fatal("Multiconfig name '%s' is invalid as multiconfigs cannot start with a digit" % config)
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
@@ -344,9 +342,6 @@ class CookerDataBuilder(object):
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
broken_layers = []
|
||||
|
||||
if not layers:
|
||||
bb.fatal("The bblayers.conf file doesn't contain any BBLAYERS definition")
|
||||
|
||||
data = bb.data.createCopy(data)
|
||||
approved = bb.utils.approved_variables()
|
||||
|
||||
@@ -401,8 +396,6 @@ class CookerDataBuilder(object):
|
||||
if c in collections_tmp:
|
||||
bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c)
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not layerseries:
|
||||
bb.fatal("No core layer found to work with layer '%s'. Missing entry in bblayers.conf?" % c)
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
@@ -436,7 +429,7 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Undefined event handler function '%s'" % var)
|
||||
raise bb.BBHandledException()
|
||||
handlerln = int(data.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data)
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
|
||||
data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
|
||||
|
||||
|
||||
@@ -226,7 +226,7 @@ def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
deps = newdeps
|
||||
seen |= deps
|
||||
newdeps = set()
|
||||
for dep in sorted(deps):
|
||||
for dep in deps:
|
||||
if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
|
||||
emit_var(dep, o, d, False) and o.write('\n')
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
|
||||
|
||||
@@ -17,7 +17,7 @@ BitBake build tools.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
from collections import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
import bb, bb.codeparser
|
||||
@@ -26,9 +26,9 @@ from bb.COW import COWDictBase
|
||||
|
||||
logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
@@ -151,7 +151,6 @@ class ExpansionError(Exception):
|
||||
self.expression = expression
|
||||
self.variablename = varname
|
||||
self.exception = exception
|
||||
self.varlist = [varname or expression or ""]
|
||||
if varname:
|
||||
if expression:
|
||||
self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
|
||||
@@ -161,14 +160,8 @@ class ExpansionError(Exception):
|
||||
self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception)
|
||||
Exception.__init__(self, self.msg)
|
||||
self.args = (varname, expression, exception)
|
||||
|
||||
def addVar(self, varname):
|
||||
if varname:
|
||||
self.varlist.append(varname)
|
||||
|
||||
def __str__(self):
|
||||
chain = "\nThe variable dependency chain for the failure is: " + " -> ".join(self.varlist)
|
||||
return self.msg + chain
|
||||
return self.msg
|
||||
|
||||
class IncludeHistory(object):
|
||||
def __init__(self, parent = None, filename = '[TOP LEVEL]'):
|
||||
@@ -284,7 +277,7 @@ class VariableHistory(object):
|
||||
for (r, override) in d.overridedata[var]:
|
||||
for event in self.variable(r):
|
||||
loginfo = event.copy()
|
||||
if 'flag' in loginfo and not loginfo['flag'].startswith(("_", ":")):
|
||||
if 'flag' in loginfo and not loginfo['flag'].startswith("_"):
|
||||
continue
|
||||
loginfo['variable'] = var
|
||||
loginfo['op'] = 'override[%s]:%s' % (override, loginfo['op'])
|
||||
@@ -349,7 +342,7 @@ class VariableHistory(object):
|
||||
for event in history:
|
||||
if 'flag' in event:
|
||||
continue
|
||||
if event['op'] == ':remove':
|
||||
if event['op'] == '_remove':
|
||||
continue
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
@@ -410,17 +403,14 @@ class DataSmart(MutableMapping):
|
||||
s = __expand_python_regexp__.sub(varparse.python_sub, s)
|
||||
except SyntaxError as e:
|
||||
# Likely unmatched brackets, just don't expand the expression
|
||||
if e.msg != "EOL while scanning string literal" and not e.msg.startswith("unterminated string literal"):
|
||||
if e.msg != "EOL while scanning string literal":
|
||||
raise
|
||||
if s == olds:
|
||||
break
|
||||
except ExpansionError as e:
|
||||
e.addVar(varname)
|
||||
except ExpansionError:
|
||||
raise
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except bb.BBHandledException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
raise ExpansionError(varname, s, exc).with_traceback(tb) from exc
|
||||
@@ -491,15 +481,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
|
||||
if not var.startswith("__anon_") and ("_append" in var or "_prepend" in var or "_remove" in var):
|
||||
info = "%s" % var
|
||||
if "filename" in loginfo:
|
||||
info += " file: %s" % loginfo[filename]
|
||||
if "lineno" in loginfo:
|
||||
info += " line: %s" % loginfo[lineno]
|
||||
bb.fatal("Variable %s contains an operation using the old override syntax. Please convert this layer/metadata before attempting to use with a newer bitbake." % info)
|
||||
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
@@ -528,7 +509,7 @@ class DataSmart(MutableMapping):
|
||||
# pay the cookie monster
|
||||
|
||||
# more cookies for the cookie monster
|
||||
if ':' in var:
|
||||
if '_' in var:
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
|
||||
if base in self.overridevars:
|
||||
@@ -539,27 +520,27 @@ class DataSmart(MutableMapping):
|
||||
self._makeShadowCopy(var)
|
||||
|
||||
if not parsing:
|
||||
if ":append" in self.dict[var]:
|
||||
del self.dict[var][":append"]
|
||||
if ":prepend" in self.dict[var]:
|
||||
del self.dict[var][":prepend"]
|
||||
if ":remove" in self.dict[var]:
|
||||
del self.dict[var][":remove"]
|
||||
if "_append" in self.dict[var]:
|
||||
del self.dict[var]["_append"]
|
||||
if "_prepend" in self.dict[var]:
|
||||
del self.dict[var]["_prepend"]
|
||||
if "_remove" in self.dict[var]:
|
||||
del self.dict[var]["_remove"]
|
||||
if var in self.overridedata:
|
||||
active = []
|
||||
self.need_overrides()
|
||||
for (r, o) in self.overridedata[var]:
|
||||
if o in self.overridesset:
|
||||
active.append(r)
|
||||
elif ":" in o:
|
||||
if set(o.split(":")).issubset(self.overridesset):
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
active.append(r)
|
||||
for a in active:
|
||||
self.delVar(a)
|
||||
del self.overridedata[var]
|
||||
|
||||
# more cookies for the cookie monster
|
||||
if ':' in var:
|
||||
if '_' in var:
|
||||
self._setvar_update_overrides(var, **loginfo)
|
||||
|
||||
# setting var
|
||||
@@ -585,8 +566,8 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
override = var[var.rfind(':')+1:]
|
||||
shortvar = var[:var.rfind(':')]
|
||||
override = var[var.rfind('_')+1:]
|
||||
shortvar = var[:var.rfind('_')]
|
||||
while override and __override_regexp__.match(override):
|
||||
if shortvar not in self.overridedata:
|
||||
self.overridedata[shortvar] = []
|
||||
@@ -595,9 +576,9 @@ class DataSmart(MutableMapping):
|
||||
self.overridedata[shortvar] = list(self.overridedata[shortvar])
|
||||
self.overridedata[shortvar].append([var, override])
|
||||
override = None
|
||||
if ":" in shortvar:
|
||||
override = var[shortvar.rfind(':')+1:]
|
||||
shortvar = var[:shortvar.rfind(':')]
|
||||
if "_" in shortvar:
|
||||
override = var[shortvar.rfind('_')+1:]
|
||||
shortvar = var[:shortvar.rfind('_')]
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
|
||||
@@ -636,7 +617,7 @@ class DataSmart(MutableMapping):
|
||||
self.overridedata[newkey].append([v.replace(key, newkey), o])
|
||||
self.renameVar(v, v.replace(key, newkey))
|
||||
|
||||
if ':' in newkey and val is None:
|
||||
if '_' in newkey and val is None:
|
||||
self._setvar_update_overrides(newkey, **loginfo)
|
||||
|
||||
loginfo['variable'] = key
|
||||
@@ -648,12 +629,12 @@ class DataSmart(MutableMapping):
|
||||
def appendVar(self, var, value, **loginfo):
|
||||
loginfo['op'] = 'append'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(var + ":append", value, ignore=True, parsing=True)
|
||||
self.setVar(var + "_append", value, ignore=True, parsing=True)
|
||||
|
||||
def prependVar(self, var, value, **loginfo):
|
||||
loginfo['op'] = 'prepend'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(var + ":prepend", value, ignore=True, parsing=True)
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
@@ -664,9 +645,9 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var] = {}
|
||||
if var in self.overridedata:
|
||||
del self.overridedata[var]
|
||||
if ':' in var:
|
||||
override = var[var.rfind(':')+1:]
|
||||
shortvar = var[:var.rfind(':')]
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
shortvar = var[:var.rfind('_')]
|
||||
while override and override.islower():
|
||||
try:
|
||||
if shortvar in self.overridedata:
|
||||
@@ -676,9 +657,9 @@ class DataSmart(MutableMapping):
|
||||
except ValueError as e:
|
||||
pass
|
||||
override = None
|
||||
if ":" in shortvar:
|
||||
override = var[shortvar.rfind(':')+1:]
|
||||
shortvar = var[:shortvar.rfind(':')]
|
||||
if "_" in shortvar:
|
||||
override = var[shortvar.rfind('_')+1:]
|
||||
shortvar = var[:shortvar.rfind('_')]
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
|
||||
@@ -693,7 +674,7 @@ class DataSmart(MutableMapping):
|
||||
self._makeShadowCopy(var)
|
||||
self.dict[var][flag] = value
|
||||
|
||||
if flag == "_defaultval" and ':' in var:
|
||||
if flag == "_defaultval" and '_' in var:
|
||||
self._setvar_update_overrides(var, **loginfo)
|
||||
if flag == "_defaultval" and var in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
@@ -725,11 +706,11 @@ class DataSmart(MutableMapping):
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
for (r, o) in overridedata:
|
||||
# FIXME What about double overrides both with "_" in the name?
|
||||
# What about double overrides both with "_" in the name?
|
||||
if o in self.overridesset:
|
||||
active[o] = r
|
||||
elif ":" in o:
|
||||
if set(o.split(":")).issubset(self.overridesset):
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
active[o] = r
|
||||
|
||||
mod = True
|
||||
@@ -737,10 +718,10 @@ class DataSmart(MutableMapping):
|
||||
mod = False
|
||||
for o in self.overrides:
|
||||
for a in active.copy():
|
||||
if a.endswith(":" + o):
|
||||
if a.endswith("_" + o):
|
||||
t = active[a]
|
||||
del active[a]
|
||||
active[a.replace(":" + o, "")] = t
|
||||
active[a.replace("_" + o, "")] = t
|
||||
mod = True
|
||||
elif a == o:
|
||||
match = active[a]
|
||||
@@ -759,31 +740,31 @@ class DataSmart(MutableMapping):
|
||||
value = copy.copy(local_var["_defaultval"])
|
||||
|
||||
|
||||
if flag == "_content" and local_var is not None and ":append" in local_var and not parsing:
|
||||
if flag == "_content" and local_var is not None and "_append" in local_var and not parsing:
|
||||
if not value:
|
||||
value = ""
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var[":append"]:
|
||||
for (r, o) in local_var["_append"]:
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split(":"):
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
if value is None:
|
||||
value = ""
|
||||
value = value + r
|
||||
|
||||
if flag == "_content" and local_var is not None and ":prepend" in local_var and not parsing:
|
||||
if flag == "_content" and local_var is not None and "_prepend" in local_var and not parsing:
|
||||
if not value:
|
||||
value = ""
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var[":prepend"]:
|
||||
for (r, o) in local_var["_prepend"]:
|
||||
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split(":"):
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
if value is None:
|
||||
value = ""
|
||||
value = r + value
|
||||
|
||||
parser = None
|
||||
@@ -792,12 +773,12 @@ class DataSmart(MutableMapping):
|
||||
if expand:
|
||||
value = parser.value
|
||||
|
||||
if value and flag == "_content" and local_var is not None and ":remove" in local_var and not parsing:
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var and not parsing:
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var[":remove"]:
|
||||
for (r, o) in local_var["_remove"]:
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split(":"):
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
@@ -883,7 +864,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
if local_var:
|
||||
for i in local_var:
|
||||
if i.startswith(("_", ":")) and not internalflags:
|
||||
if i.startswith("_") and not internalflags:
|
||||
continue
|
||||
flags[i] = local_var[i]
|
||||
if expand and i in expand:
|
||||
@@ -981,8 +962,8 @@ class DataSmart(MutableMapping):
|
||||
for (r, o) in self.overridedata[var]:
|
||||
if o in self.overridesset:
|
||||
overrides.add(var)
|
||||
elif ":" in o:
|
||||
if set(o.split(":")).issubset(self.overridesset):
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
overrides.add(var)
|
||||
|
||||
for k in keylist(self.dict):
|
||||
@@ -1024,7 +1005,7 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
data.update({key:value})
|
||||
|
||||
varflags = d.getVarFlags(key, internalflags = True, expand=["vardepvalue"])
|
||||
varflags = d.getVarFlags(key, internalflags = True)
|
||||
if not varflags:
|
||||
continue
|
||||
for f in varflags:
|
||||
|
||||
@@ -118,8 +118,6 @@ def fire_class_handlers(event, d):
|
||||
if _eventfilter:
|
||||
if not _eventfilter(name, handler, event, d):
|
||||
continue
|
||||
if d is not None and not name in (d.getVar("__BBHANDLERS_MC") or set()):
|
||||
continue
|
||||
execute_handler(name, handler, event, d)
|
||||
|
||||
ui_queue = []
|
||||
@@ -229,19 +227,11 @@ def fire_from_worker(event, d):
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
noop = lambda _: None
|
||||
def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
"""Register an Event handler"""
|
||||
|
||||
if data is not None and data.getVar("BB_CURRENT_MC"):
|
||||
mc = data.getVar("BB_CURRENT_MC")
|
||||
name = '%s%s' % (mc.replace('-', '_'), name)
|
||||
|
||||
# already registered
|
||||
if name in _handlers:
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
bbhands_mc.add(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
return AlreadyRegistered
|
||||
|
||||
if handler is not None:
|
||||
@@ -278,20 +268,10 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
_event_handler_map[m] = {}
|
||||
_event_handler_map[m][name] = True
|
||||
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
bbhands_mc.add(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
|
||||
return Registered
|
||||
|
||||
def remove(name, handler, data=None):
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
if data is not None:
|
||||
if data.getVar("BB_CURRENT_MC"):
|
||||
mc = data.getVar("BB_CURRENT_MC")
|
||||
name = '%s%s' % (mc.replace('-', '_'), name)
|
||||
|
||||
_handlers.pop(name)
|
||||
if name in _catchall_handlers:
|
||||
_catchall_handlers.pop(name)
|
||||
@@ -299,12 +279,6 @@ def remove(name, handler, data=None):
|
||||
if name in _event_handler_map[event]:
|
||||
_event_handler_map[event].pop(name)
|
||||
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
if name in bbhands_mc:
|
||||
bbhands_mc.remove(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
|
||||
def get_handlers():
|
||||
return _handlers
|
||||
|
||||
@@ -670,17 +644,6 @@ class ReachableStamps(Event):
|
||||
Event.__init__(self)
|
||||
self.stamps = stamps
|
||||
|
||||
class StaleSetSceneTasks(Event):
|
||||
"""
|
||||
An event listing setscene tasks which are 'stale' and will
|
||||
be rerun. The metadata may use to clean up stale data.
|
||||
tasks is a mapping of tasks and matching stale stamps.
|
||||
"""
|
||||
|
||||
def __init__(self, tasks):
|
||||
Event.__init__(self)
|
||||
self.tasks = tasks
|
||||
|
||||
class FilesMatchingFound(Event):
|
||||
"""
|
||||
Event when a list of files matching the supplied pattern has
|
||||
|
||||
@@ -290,7 +290,7 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
@@ -428,9 +428,8 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
uri_decoded = list(decodeurl(ud.url))
|
||||
uri_find_decoded = list(decodeurl(uri_find))
|
||||
uri_replace_decoded = list(decodeurl(uri_replace))
|
||||
logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
result_decoded = ['', '', '', '', '', {}]
|
||||
# 0 - type, 1 - host, 2 - path, 3 - user, 4- pswd, 5 - params
|
||||
for loc, i in enumerate(uri_find_decoded):
|
||||
result_decoded[loc] = uri_decoded[loc]
|
||||
regexp = i
|
||||
@@ -450,9 +449,6 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
for l in replacements:
|
||||
uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l])
|
||||
result_decoded[loc][k] = uri_replace_decoded[loc][k]
|
||||
elif (loc == 3 or loc == 4) and uri_replace_decoded[loc]:
|
||||
# User/password in the replacement is just a straight replacement
|
||||
result_decoded[loc] = uri_replace_decoded[loc]
|
||||
elif (re.match(regexp, uri_decoded[loc])):
|
||||
if not uri_replace_decoded[loc]:
|
||||
result_decoded[loc] = ""
|
||||
@@ -471,18 +467,14 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
uri_decoded[5] = {}
|
||||
elif ud.localpath and ud.method.supports_checksum(ud):
|
||||
basename = os.path.basename(ud.localpath)
|
||||
if basename:
|
||||
uri_basename = os.path.basename(uri_decoded[loc])
|
||||
if uri_basename and basename != uri_basename and result_decoded[loc].endswith(uri_basename):
|
||||
result_decoded[loc] = result_decoded[loc].replace(uri_basename, basename)
|
||||
elif not result_decoded[loc].endswith(basename):
|
||||
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
|
||||
if basename and not result_decoded[loc].endswith(basename):
|
||||
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
|
||||
else:
|
||||
return None
|
||||
result = encodeurl(result_decoded)
|
||||
if result == ud.url:
|
||||
return None
|
||||
logger.debug2("For url %s returning %s" % (ud.url, result))
|
||||
logger.debug(2, "For url %s returning %s" % (ud.url, result))
|
||||
return result
|
||||
|
||||
methods = []
|
||||
@@ -507,9 +499,9 @@ def fetcher_init(d):
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
@@ -570,9 +562,6 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
checksum_expected = getattr(ud, "%s_expected" % checksum_id)
|
||||
|
||||
if checksum_expected == '':
|
||||
checksum_expected = None
|
||||
|
||||
return {
|
||||
"id": checksum_id,
|
||||
"name": checksum_name,
|
||||
@@ -623,7 +612,7 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
for ci in checksum_infos:
|
||||
if ci["expected"] and ci["expected"] != ci["data"]:
|
||||
messages.append("File: '%s' has %s checksum '%s' when '%s' was " \
|
||||
messages.append("File: '%s' has %s checksum %s when %s was " \
|
||||
"expected" % (ud.localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
bad_checksum = ci["data"]
|
||||
|
||||
@@ -762,11 +751,6 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
that fetcher provides a method with the given name and the same signature as sortable_revision.
|
||||
"""
|
||||
|
||||
recursion = d.getVar("__BBINSRCREV")
|
||||
if recursion:
|
||||
raise FetchError("There are recursive references in fetcher variables, likely through SRC_URI")
|
||||
d.setVar("__BBINSRCREV", True)
|
||||
|
||||
scms = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
urldata = fetcher.ud
|
||||
@@ -781,7 +765,6 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
d.delVar("__BBINSRCREV")
|
||||
if autoinc:
|
||||
return "AUTOINC+" + rev
|
||||
return rev
|
||||
@@ -816,37 +799,12 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
if seenautoinc:
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
d.delVar("__BBINSRCREV")
|
||||
return format
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
return fetcher.localpath(url)
|
||||
|
||||
# Need to export PATH as binary could be in metadata paths
|
||||
# rather than host provided
|
||||
# Also include some other variables.
|
||||
FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'HTTP_PROXY', 'http_proxy',
|
||||
'HTTPS_PROXY', 'https_proxy',
|
||||
'FTP_PROXY', 'ftp_proxy',
|
||||
'FTPS_PROXY', 'ftps_proxy',
|
||||
'NO_PROXY', 'no_proxy',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
'SOCKS5_USER', 'SOCKS5_PASSWD',
|
||||
'DBUS_SESSION_BUS_ADDRESS',
|
||||
'P4CONFIG',
|
||||
'SSL_CERT_FILE',
|
||||
'AWS_PROFILE',
|
||||
'AWS_ACCESS_KEY_ID',
|
||||
'AWS_SECRET_ACCESS_KEY',
|
||||
'AWS_DEFAULT_REGION']
|
||||
|
||||
def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
"""
|
||||
Run cmd returning the command output
|
||||
@@ -855,7 +813,25 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
Optionally remove the files/directories listed in cleanup upon failure
|
||||
"""
|
||||
|
||||
exportvars = FETCH_EXPORT_VARS
|
||||
# Need to export PATH as binary could be in metadata paths
|
||||
# rather than host provided
|
||||
# Also include some other variables.
|
||||
# FIXME: Should really include all export varaiables?
|
||||
exportvars = ['HOME', 'PATH',
|
||||
'HTTP_PROXY', 'http_proxy',
|
||||
'HTTPS_PROXY', 'https_proxy',
|
||||
'FTP_PROXY', 'ftp_proxy',
|
||||
'FTPS_PROXY', 'ftps_proxy',
|
||||
'NO_PROXY', 'no_proxy',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
'SOCKS5_USER', 'SOCKS5_PASSWD',
|
||||
'DBUS_SESSION_BUS_ADDRESS',
|
||||
'P4CONFIG']
|
||||
|
||||
if not cleanup:
|
||||
cleanup = []
|
||||
@@ -877,13 +853,18 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
# Ensure that a _PYTHON_SYSCONFIGDATA_NAME value set by a recipe
|
||||
# (for example via python3native.bbclass since warrior) is not set for
|
||||
# host Python (otherwise tools like git-make-shallow will fail)
|
||||
cmd = 'unset _PYTHON_SYSCONFIGDATA_NAME; ' + cmd
|
||||
|
||||
# Disable pseudo as it may affect ssh, potentially causing it to hang.
|
||||
cmd = 'export PSEUDO_DISABLED=1; ' + cmd
|
||||
|
||||
if workdir:
|
||||
logger.debug("Running '%s' in %s" % (cmd, workdir))
|
||||
logger.debug(1, "Running '%s' in %s" % (cmd, workdir))
|
||||
else:
|
||||
logger.debug("Running %s", cmd)
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
success = False
|
||||
error_message = ""
|
||||
@@ -892,7 +873,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
(output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir)
|
||||
success = True
|
||||
except bb.process.NotFoundError as e:
|
||||
error_message = "Fetch command %s not found" % (e.command)
|
||||
error_message = "Fetch command %s" % (e.command)
|
||||
except bb.process.ExecutionError as e:
|
||||
if e.stdout:
|
||||
output = "output:\n%s\n%s" % (e.stdout, e.stderr)
|
||||
@@ -924,7 +905,7 @@ def check_network_access(d, info, url):
|
||||
elif not trusted_network(d, url):
|
||||
raise UntrustedUrl(url, info)
|
||||
else:
|
||||
logger.debug("Fetcher accessed the network with the command %s" % info)
|
||||
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
|
||||
|
||||
def build_mirroruris(origud, mirrors, ld):
|
||||
uris = []
|
||||
@@ -950,7 +931,7 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
continue
|
||||
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug("Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
@@ -963,8 +944,8 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
@@ -1067,8 +1048,8 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
elif isinstance(e, NoChecksumError):
|
||||
raise
|
||||
else:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
@@ -1164,11 +1145,11 @@ def srcrev_internal_helper(ud, d, name):
|
||||
pn = d.getVar("PN")
|
||||
attempts = []
|
||||
if name != '' and pn:
|
||||
attempts.append("SRCREV_%s:pn-%s" % (name, pn))
|
||||
attempts.append("SRCREV_%s_pn-%s" % (name, pn))
|
||||
if name != '':
|
||||
attempts.append("SRCREV_%s" % name)
|
||||
if pn:
|
||||
attempts.append("SRCREV:pn-%s" % pn)
|
||||
attempts.append("SRCREV_pn-%s" % pn)
|
||||
attempts.append("SRCREV")
|
||||
|
||||
for a in attempts:
|
||||
@@ -1267,7 +1248,7 @@ class FetchData(object):
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
@@ -1480,10 +1461,6 @@ class FetchMethod(object):
|
||||
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
elif file.endswith('.tzst') or file.endswith('.tar.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s > %s' % (file, efile)
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
try:
|
||||
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
|
||||
@@ -1712,7 +1689,7 @@ class Fetch(object):
|
||||
if m.verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
done = True
|
||||
elif m.try_premirror(ud, self.d):
|
||||
logger.debug("Trying PREMIRRORS")
|
||||
logger.debug(1, "Trying PREMIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
if done:
|
||||
@@ -1722,7 +1699,7 @@ class Fetch(object):
|
||||
m.update_donestamp(ud, self.d)
|
||||
except ChecksumError as e:
|
||||
logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
done = False
|
||||
|
||||
if premirroronly:
|
||||
@@ -1734,7 +1711,7 @@ class Fetch(object):
|
||||
try:
|
||||
if not trusted_network(self.d, ud.url):
|
||||
raise UntrustedUrl(ud.url)
|
||||
logger.debug("Trying Upstream")
|
||||
logger.debug(1, "Trying Upstream")
|
||||
m.download(ud, self.d)
|
||||
if hasattr(m, "build_mirror_data"):
|
||||
m.build_mirror_data(ud, self.d)
|
||||
@@ -1749,19 +1726,19 @@ class Fetch(object):
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
if os.path.exists(ud.localpath):
|
||||
rename_bad_checksum(ud, e.checksum)
|
||||
elif isinstance(e, NoChecksumError):
|
||||
raise
|
||||
else:
|
||||
logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
logger.debug(1, "Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
|
||||
@@ -1798,7 +1775,7 @@ class Fetch(object):
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
m = ud.method
|
||||
logger.debug("Testing URL %s", u)
|
||||
logger.debug(1, "Testing URL %s", u)
|
||||
# First try checking uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
@@ -1932,7 +1909,6 @@ from . import repo
|
||||
from . import clearcase
|
||||
from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -1952,4 +1928,3 @@ methods.append(repo.Repo())
|
||||
methods.append(clearcase.ClearCase())
|
||||
methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' Azure Storage implementation
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2021 Alejandro Hernandez Samaniego
|
||||
#
|
||||
# Based on bb.fetch2.wget:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import shlex
|
||||
import os
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
class Az(Wget):
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched from Azure Storage
|
||||
"""
|
||||
return ud.type in ['az']
|
||||
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
|
||||
# checkstatus discards parameters either way, we need to do this before adding the SAS
|
||||
ud.url = ud.url.replace('az://','https://').split(';')[0]
|
||||
|
||||
az_sas = d.getVar('AZ_SAS')
|
||||
if az_sas and az_sas not in ud.url:
|
||||
ud.url += az_sas
|
||||
|
||||
return Wget.checkstatus(self, fetch, ud, d, try_again)
|
||||
|
||||
# Override download method, include retries
|
||||
def download(self, ud, d, retries=3):
|
||||
"""Fetch urls"""
|
||||
|
||||
# If were reaching the account transaction limit we might be refused a connection,
|
||||
# retrying allows us to avoid false negatives since the limit changes over time
|
||||
fetchcmd = self.basecmd + ' --retry-connrefused --waitretry=5'
|
||||
|
||||
# We need to provide a localpath to avoid wget using the SAS
|
||||
# ud.localfile either has the downloadfilename or ud.path
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
|
||||
|
||||
# Check if a Shared Access Signature was given and use it
|
||||
az_sas = d.getVar('AZ_SAS')
|
||||
|
||||
if az_sas:
|
||||
azuri = '%s%s%s%s' % ('https://', ud.host, ud.path, az_sas)
|
||||
else:
|
||||
azuri = '%s%s%s' % ('https://', ud.host, ud.path)
|
||||
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again.
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % azuri)
|
||||
else:
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % azuri)
|
||||
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
except FetchError as e:
|
||||
# Azure fails on handshake sometimes when using wget after some stress, producing a
|
||||
# FetchError from the fetcher, if the artifact exists retyring should succeed
|
||||
if 'Unable to establish SSL connection' in str(e):
|
||||
logger.debug2('Unable to establish SSL connection: Retries remaining: %s, Retrying...' % retries)
|
||||
self.download(ud, d, retries -1)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (azuri, ud.localpath), azuri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (azuri), azuri)
|
||||
|
||||
return True
|
||||
@@ -74,16 +74,16 @@ class Bzr(FetchMethod):
|
||||
|
||||
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "update")
|
||||
logger.debug("BZR Update %s", ud.url)
|
||||
logger.debug(1, "BZR Update %s", ud.url)
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path)))
|
||||
else:
|
||||
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
logger.debug("BZR Checkout %s", ud.url)
|
||||
logger.debug(1, "BZR Checkout %s", ud.url)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", bzrcmd)
|
||||
logger.debug(1, "Running %s", bzrcmd)
|
||||
runfetchcmd(bzrcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
@@ -109,7 +109,7 @@ class Bzr(FetchMethod):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
logger.debug2("BZR fetcher hitting network for %s", ud.url)
|
||||
logger.debug(2, "BZR fetcher hitting network for %s", ud.url)
|
||||
|
||||
bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class ClearCase(FetchMethod):
|
||||
return ud.type in ['ccrc']
|
||||
|
||||
def debug(self, msg):
|
||||
logger.debug("ClearCase: %s", msg)
|
||||
logger.debug(1, "ClearCase: %s", msg)
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
|
||||
@@ -109,7 +109,7 @@ class Cvs(FetchMethod):
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
logger.debug2("Fetch: checking for module directory")
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
moddir = os.path.join(ud.pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
@@ -123,7 +123,7 @@ class Cvs(FetchMethod):
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
workdir = ud.pkgdir
|
||||
logger.debug("Running %s", cvscmd)
|
||||
logger.debug(1, "Running %s", cvscmd)
|
||||
bb.fetch2.check_network_access(d, cvscmd, ud.url)
|
||||
cmd = cvscmd
|
||||
|
||||
|
||||
@@ -68,7 +68,6 @@ import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
import bb.progress
|
||||
from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -142,10 +141,6 @@ class Git(FetchMethod):
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "git"
|
||||
if ud.host == "github.com" and ud.proto == "git":
|
||||
# github stopped supporting git protocol
|
||||
# https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git
|
||||
ud.proto = "https"
|
||||
|
||||
if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'):
|
||||
raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
|
||||
@@ -173,11 +168,7 @@ class Git(FetchMethod):
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.noshared = d.getVar("BB_GIT_NOSHARED") == "1"
|
||||
|
||||
ud.cloneflags = "-n"
|
||||
if not ud.noshared:
|
||||
ud.cloneflags += " -s"
|
||||
ud.cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
@@ -229,14 +220,9 @@ class Git(FetchMethod):
|
||||
ud.shallow = False
|
||||
|
||||
if ud.usehead:
|
||||
# When usehead is set let's associate 'HEAD' with the unresolved
|
||||
# rev of this repository. This will get resolved into a revision
|
||||
# later. If an actual revision happens to have also been provided
|
||||
# then this setting will be overridden.
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
ud.unresolvedrev['default'] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -393,50 +379,7 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the the srcrev.
|
||||
#
|
||||
# It would be nice to just do this inline here by running 'git-lfs fetch'
|
||||
# on the bare clonedir, but that operation requires a working copy on some
|
||||
# releases of Git LFS.
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
try:
|
||||
# Do the checkout. This implicitly involves a Git LFS fetch.
|
||||
Git.unpack(self, ud, tmpdir, d)
|
||||
|
||||
# Scoop up a copy of any stuff that Git LFS downloaded. Merge them into
|
||||
# the bare clonedir.
|
||||
#
|
||||
# As this procedure is invoked repeatedly on incremental fetches as
|
||||
# a recipe's SRCREV is bumped throughout its lifetime, this will
|
||||
# result in a gradual accumulation of LFS blobs in <ud.clonedir>/lfs
|
||||
# corresponding to all the blobs reachable from the different revs
|
||||
# fetched across time.
|
||||
#
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
finally:
|
||||
bb.utils.remove(tmpdir, recurse=True)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
# Create as a temp file and move atomically into position to avoid races
|
||||
@contextmanager
|
||||
def create_atomic(filename):
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
@@ -447,8 +390,7 @@ class Git(FetchMethod):
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
@@ -457,8 +399,7 @@ class Git(FetchMethod):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
@@ -533,7 +474,7 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
need_lfs = ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
if not need_lfs:
|
||||
ud.basecmd = "GIT_LFS_SKIP_SMUDGE=1 " + ud.basecmd
|
||||
@@ -622,9 +563,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
def _contains_lfs(self, ud, d, wd):
|
||||
"""
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
@@ -635,14 +573,8 @@ class Git(FetchMethod):
|
||||
else:
|
||||
branchname = "master"
|
||||
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
|
||||
cmd = "%s grep lfs %s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, refname)
|
||||
cmd = "%s grep lfs origin/%s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, ud.branches[ud.names[0]])
|
||||
|
||||
try:
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
|
||||
@@ -663,11 +595,6 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Return the repository URL
|
||||
"""
|
||||
# Note that we do not support passwords directly in the git urls. There are several
|
||||
# reasons. SRC_URI can be written out to things like buildhistory and people don't
|
||||
# want to leak passwords like that. Its also all too easy to share metadata without
|
||||
# removing the password. ssh keys, ~/.netrc and ~/.ssh/config files can be used as
|
||||
# alternatives so we will not take patches adding password support here.
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
|
||||
@@ -78,7 +78,7 @@ class GitSM(Git):
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
@@ -140,6 +140,16 @@ class GitSM(Git):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
|
||||
try:
|
||||
# Check for the nugget dropped by the download operation
|
||||
known_srcrevs = runfetchcmd("%s config --get-all bitbake.srcrev" % \
|
||||
(ud.basecmd), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.revisions[ud.names[0]] in known_srcrevs.split():
|
||||
return False
|
||||
except bb.fetch2.FetchError:
|
||||
pass
|
||||
|
||||
need_update_list = []
|
||||
def need_update_submodule(ud, url, module, modpath, workdir, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
@@ -162,9 +172,14 @@ class GitSM(Git):
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
if len(need_update_list) == 0:
|
||||
# We already have the required commits of all submodules. Drop
|
||||
# a nugget so we don't need to check again.
|
||||
runfetchcmd("%s config --add bitbake.srcrev %s" % \
|
||||
(ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=ud.clonedir)
|
||||
|
||||
if len(need_update_list) > 0:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
logger.debug(1, 'gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -194,6 +209,9 @@ class GitSM(Git):
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
# Drop a nugget for the srcrev we've fetched (used by need_update)
|
||||
runfetchcmd("%s config --add bitbake.srcrev %s" % \
|
||||
(ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=ud.clonedir)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
|
||||
@@ -150,7 +150,7 @@ class Hg(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror):
|
||||
@@ -160,7 +160,7 @@ class Hg(FetchMethod):
|
||||
if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
|
||||
# Found the source, check whether need pull
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
logger.debug("Running %s", updatecmd)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
try:
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
except bb.fetch2.FetchError:
|
||||
@@ -168,7 +168,7 @@ class Hg(FetchMethod):
|
||||
pullcmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Pulling " + ud.url)
|
||||
# update sources there
|
||||
logger.debug("Running %s", pullcmd)
|
||||
logger.debug(1, "Running %s", pullcmd)
|
||||
bb.fetch2.check_network_access(d, pullcmd, ud.url)
|
||||
runfetchcmd(pullcmd, d, workdir=ud.moddir)
|
||||
try:
|
||||
@@ -183,14 +183,14 @@ class Hg(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", fetchcmd)
|
||||
logger.debug(1, "Running %s", fetchcmd)
|
||||
bb.fetch2.check_network_access(d, fetchcmd, ud.url)
|
||||
runfetchcmd(fetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
# Even when we clone (fetch), we still need to update as hg's clone
|
||||
# won't checkout the specified revision if its on a branch
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
logger.debug("Running %s", updatecmd)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
@@ -247,9 +247,9 @@ class Hg(FetchMethod):
|
||||
if scmdata != "nokeep":
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
if not os.access(os.path.join(codir, '.hg'), os.R_OK):
|
||||
logger.debug2("Unpack: creating new hg repository in '" + codir + "'")
|
||||
logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'")
|
||||
runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
|
||||
logger.debug2("Unpack: updating source in '" + codir + "'")
|
||||
logger.debug(2, "Unpack: updating source in '" + codir + "'")
|
||||
if ud.user and ud.pswd:
|
||||
runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull %s" % (ud.basecmd, ud.user, ud.pswd, proto, ud.moddir), d, workdir=codir)
|
||||
else:
|
||||
@@ -259,5 +259,5 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
|
||||
else:
|
||||
logger.debug2("Unpack: extracting source to '" + codir + "'")
|
||||
logger.debug(2, "Unpack: extracting source to '" + codir + "'")
|
||||
runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir)
|
||||
|
||||
@@ -54,12 +54,12 @@ class Local(FetchMethod):
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
logger.debug2("Defaulting to %s for %s" % (dldirfile, path))
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
return searched
|
||||
|
||||
@@ -69,31 +69,17 @@ def npm_unpack(tarball, destdir, d):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
cmd = "tar --extract --gzip --file=%s" % shlex.quote(tarball)
|
||||
cmd += " --no-same-owner"
|
||||
cmd += " --delay-directory-restore"
|
||||
cmd += " --strip-components=1"
|
||||
runfetchcmd(cmd, d, workdir=destdir)
|
||||
runfetchcmd("chmod -R +X '%s'" % (destdir), d, quiet=True, workdir=destdir)
|
||||
|
||||
class NpmEnvironment(object):
|
||||
"""
|
||||
Using a npm config file seems more reliable than using cli arguments.
|
||||
This class allows to create a controlled environment for npm commands.
|
||||
"""
|
||||
def __init__(self, d, configs=[], npmrc=None):
|
||||
def __init__(self, d, configs=None):
|
||||
self.d = d
|
||||
|
||||
self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1)
|
||||
for key, value in configs:
|
||||
self.user_config.write("%s=%s\n" % (key, value))
|
||||
|
||||
if npmrc:
|
||||
self.global_config_name = npmrc
|
||||
else:
|
||||
self.global_config_name = "/dev/null"
|
||||
|
||||
def __del__(self):
|
||||
if self.user_config:
|
||||
self.user_config.close()
|
||||
self.configs = configs
|
||||
|
||||
def run(self, cmd, args=None, configs=None, workdir=None):
|
||||
"""Run npm command in a controlled environment"""
|
||||
@@ -101,19 +87,23 @@ class NpmEnvironment(object):
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("HOME", tmpdir)
|
||||
|
||||
cfgfile = os.path.join(tmpdir, "npmrc")
|
||||
|
||||
if not workdir:
|
||||
workdir = tmpdir
|
||||
|
||||
def _run(cmd):
|
||||
cmd = "NPM_CONFIG_USERCONFIG=%s " % (self.user_config.name) + cmd
|
||||
cmd = "NPM_CONFIG_GLOBALCONFIG=%s " % (self.global_config_name) + cmd
|
||||
cmd = "NPM_CONFIG_USERCONFIG=%s " % cfgfile + cmd
|
||||
cmd = "NPM_CONFIG_GLOBALCONFIG=%s " % cfgfile + cmd
|
||||
return runfetchcmd(cmd, d, workdir=workdir)
|
||||
|
||||
if self.configs:
|
||||
for key, value in self.configs:
|
||||
_run("npm config set %s %s" % (key, shlex.quote(value)))
|
||||
|
||||
if configs:
|
||||
bb.warn("Use of configs argument of NpmEnvironment.run() function"
|
||||
" is deprecated. Please use args argument instead.")
|
||||
for key, value in configs:
|
||||
cmd += " --%s=%s" % (key, shlex.quote(value))
|
||||
_run("npm config set %s %s" % (key, shlex.quote(value)))
|
||||
|
||||
if args:
|
||||
for key, value in args:
|
||||
@@ -175,14 +165,14 @@ class Npm(FetchMethod):
|
||||
|
||||
def _resolve_proxy_url(self, ud, d):
|
||||
def _npm_view():
|
||||
args = []
|
||||
args.append(("json", "true"))
|
||||
args.append(("registry", ud.registry))
|
||||
configs = []
|
||||
configs.append(("json", "true"))
|
||||
configs.append(("registry", ud.registry))
|
||||
pkgver = shlex.quote(ud.package + "@" + ud.version)
|
||||
cmd = ud.basecmd + " view %s" % pkgver
|
||||
env = NpmEnvironment(d)
|
||||
check_network_access(d, cmd, ud.registry)
|
||||
view_string = env.run(cmd, args=args)
|
||||
view_string = env.run(cmd, configs=configs)
|
||||
|
||||
if not view_string:
|
||||
raise FetchError("Unavailable package %s" % pkgver, ud.url)
|
||||
|
||||
@@ -24,7 +24,6 @@ import bb
|
||||
from bb.fetch2 import Fetch
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import ParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import URI
|
||||
from bb.fetch2.npm import npm_integrity
|
||||
from bb.fetch2.npm import npm_localfile
|
||||
@@ -79,7 +78,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
extrapaths = []
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity", None)
|
||||
resolved = params.get("resolved", None)
|
||||
@@ -150,12 +148,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
url = str(uri)
|
||||
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
# local tarball sources and local link sources are unsupported
|
||||
else:
|
||||
raise ParameterError("Unsupported dependency: %s" % name, ud.url)
|
||||
|
||||
@@ -164,7 +157,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
"localpath": localpath,
|
||||
"extrapaths": extrapaths,
|
||||
"destsuffix": destsuffix,
|
||||
"unpack": unpack,
|
||||
})
|
||||
|
||||
try:
|
||||
@@ -185,7 +177,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
# This fetcher resolves multiple URIs from a shrinkwrap file and then
|
||||
# forwards it to a proxy fetcher. The management of the donestamp file,
|
||||
# the lockfile and the checksums are forwarded to the proxy fetcher.
|
||||
ud.proxy = Fetch([dep["url"] for dep in ud.deps if dep["url"]], data)
|
||||
ud.proxy = Fetch([dep["url"] for dep in ud.deps], data)
|
||||
ud.needdonestamp = False
|
||||
|
||||
@staticmethod
|
||||
@@ -245,16 +237,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
for dep in manual:
|
||||
depdestdir = os.path.join(destdir, dep["destsuffix"])
|
||||
if dep["url"]:
|
||||
npm_unpack(dep["localpath"], depdestdir, d)
|
||||
else:
|
||||
depsrcdir= os.path.join(destdir, dep["localpath"])
|
||||
if dep["unpack"]:
|
||||
npm_unpack(depsrcdir, depdestdir, d)
|
||||
else:
|
||||
bb.utils.mkdirhier(depdestdir)
|
||||
cmd = 'cp -fpPRH "%s/." .' % (depsrcdir)
|
||||
runfetchcmd(cmd, d, workdir=depdestdir)
|
||||
npm_unpack(dep["localpath"], depdestdir, d)
|
||||
|
||||
def clean(self, ud, d):
|
||||
"""Clean any existing full or partial download"""
|
||||
|
||||
@@ -84,13 +84,13 @@ class Osc(FetchMethod):
|
||||
Fetch url
|
||||
"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
logger.debug("Running %s", oscupdatecmd)
|
||||
logger.debug(1, "Running %s", oscupdatecmd)
|
||||
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
|
||||
runfetchcmd(oscupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
@@ -98,7 +98,7 @@ class Osc(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", oscfetchcmd)
|
||||
logger.debug(1, "Running %s", oscfetchcmd)
|
||||
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
|
||||
runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
|
||||
@@ -90,16 +90,16 @@ class Perforce(FetchMethod):
|
||||
p4port = d.getVar('P4PORT')
|
||||
|
||||
if p4port:
|
||||
logger.debug('Using recipe provided P4PORT: %s' % p4port)
|
||||
logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
|
||||
ud.host = p4port
|
||||
else:
|
||||
logger.debug('Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
ud.usingp4config = True
|
||||
p4cmd = '%s info | grep "Server address"' % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
ud.host = runfetchcmd(p4cmd, d, True)
|
||||
ud.host = ud.host.split(': ')[1].strip()
|
||||
logger.debug('Determined P4PORT to be: %s' % ud.host)
|
||||
logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
|
||||
if not ud.host:
|
||||
raise FetchError('Could not determine P4PORT from P4CONFIG')
|
||||
|
||||
@@ -119,7 +119,6 @@ class Perforce(FetchMethod):
|
||||
cleanedpath = ud.path.replace('/...', '').replace('/', '.')
|
||||
cleanedhost = ud.host.replace(':', '.')
|
||||
|
||||
cleanedmodule = ""
|
||||
# Merge the path and module into the final depot location
|
||||
if ud.module:
|
||||
if ud.module.find('/') == 0:
|
||||
@@ -134,7 +133,7 @@ class Perforce(FetchMethod):
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, cleanedmodule, ud.revision))
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
|
||||
|
||||
def _buildp4command(self, ud, d, command, depot_filename=None):
|
||||
"""
|
||||
@@ -208,7 +207,7 @@ class Perforce(FetchMethod):
|
||||
for filename in p4fileslist:
|
||||
item = filename.split(' - ')
|
||||
lastaction = item[1].split()
|
||||
logger.debug('File: %s Last Action: %s' % (item[0], lastaction[0]))
|
||||
logger.debug(1, 'File: %s Last Action: %s' % (item[0], lastaction[0]))
|
||||
if lastaction[0] == 'delete':
|
||||
continue
|
||||
filelist.append(item[0])
|
||||
@@ -255,7 +254,7 @@ class Perforce(FetchMethod):
|
||||
raise FetchError('Could not determine the latest perforce changelist')
|
||||
|
||||
tipcset = tip.split(' ')[1]
|
||||
logger.debug('p4 tip found to be changelist %s' % tipcset)
|
||||
logger.debug(1, 'p4 tip found to be changelist %s' % tipcset)
|
||||
return tipcset
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
|
||||
@@ -47,7 +47,7 @@ class Repo(FetchMethod):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
|
||||
logger.debug("%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo")
|
||||
|
||||
@@ -18,47 +18,10 @@ The aws tool must be correctly installed and configured prior to use.
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
def convertToBytes(value, unit):
|
||||
value = float(value)
|
||||
if (unit == "KiB"):
|
||||
value = value*1024.0;
|
||||
elif (unit == "MiB"):
|
||||
value = value*1024.0*1024.0;
|
||||
elif (unit == "GiB"):
|
||||
value = value*1024.0*1024.0*1024.0;
|
||||
return value
|
||||
|
||||
class S3ProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
"""
|
||||
Extract progress information from s3 cp output, e.g.:
|
||||
Completed 5.1 KiB/8.8 GiB (12.0 MiB/s) with 1 file(s) remaining
|
||||
"""
|
||||
def __init__(self, d):
|
||||
super(S3ProgressHandler, self).__init__(d)
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(0)
|
||||
|
||||
def writeline(self, line):
|
||||
percs = re.findall(r'^Completed (\d+.{0,1}\d*) (\w+)\/(\d+.{0,1}\d*) (\w+) (\(.+\)) with\s+', line)
|
||||
if percs:
|
||||
completed = (percs[-1][0])
|
||||
completedUnit = (percs[-1][1])
|
||||
total = (percs[-1][2])
|
||||
totalUnit = (percs[-1][3])
|
||||
completed = convertToBytes(completed, completedUnit)
|
||||
total = convertToBytes(total, totalUnit)
|
||||
progress = (completed/total)*100.0
|
||||
rate = percs[-1][4]
|
||||
self.update(progress, rate)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class S3(FetchMethod):
|
||||
"""Class to fetch urls via 'aws s3'"""
|
||||
|
||||
@@ -89,9 +52,7 @@ class S3(FetchMethod):
|
||||
|
||||
cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
|
||||
progresshandler = S3ProgressHandler(d)
|
||||
runfetchcmd(cmd, d, False, log=progresshandler)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the aws cli
|
||||
|
||||
@@ -57,12 +57,7 @@ class Svn(FetchMethod):
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
# Whether to use the @REV peg-revision syntax in the svn command or not
|
||||
ud.pegrevision = True
|
||||
if 'nopegrevision' in ud.parm:
|
||||
ud.pegrevision = False
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ["0", "1"][ud.pegrevision]))
|
||||
ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
|
||||
def _buildsvncommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -91,7 +86,7 @@ class Svn(FetchMethod):
|
||||
if command == "info":
|
||||
svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
elif command == "log1":
|
||||
svncmd = "%s log --limit 1 --quiet %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
svncmd = "%s log --limit 1 %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
@@ -103,8 +98,7 @@ class Svn(FetchMethod):
|
||||
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
if ud.pegrevision:
|
||||
suffix = "@%s" % (ud.revision)
|
||||
suffix = "@%s" % (ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
transportuser = ud.parm.get("transportuser", "")
|
||||
@@ -122,7 +116,7 @@ class Svn(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
lf = bb.utils.lockfile(ud.svnlock)
|
||||
|
||||
@@ -135,7 +129,7 @@ class Svn(FetchMethod):
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug("Running %s", svncmd)
|
||||
logger.debug(1, "Running %s", svncmd)
|
||||
bb.fetch2.check_network_access(d, svncmd, ud.url)
|
||||
runfetchcmd(svncmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
@@ -143,7 +137,7 @@ class Svn(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", svncmd)
|
||||
logger.debug(1, "Running %s", svncmd)
|
||||
bb.fetch2.check_network_access(d, svncmd, ud.url)
|
||||
runfetchcmd(svncmd, d, workdir=ud.pkgdir)
|
||||
|
||||
|
||||
@@ -53,23 +53,11 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
|
||||
# CDNs like CloudFlare may do a 'browser integrity test' which can fail
|
||||
# with the standard wget/urllib User-Agent, so pretend to be a modern
|
||||
# browser.
|
||||
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
|
||||
|
||||
def check_certs(self, d):
|
||||
"""
|
||||
Should certificates be checked?
|
||||
"""
|
||||
return (d.getVar("BB_CHECK_SSL_CERTS") or "1") != "0"
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with wget.
|
||||
"""
|
||||
return ud.type in ['http', 'https', 'ftp', 'ftps']
|
||||
return ud.type in ['http', 'https', 'ftp']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
@@ -88,16 +76,13 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
|
||||
def _runwget(self, ud, d, command, quiet, workdir=None):
|
||||
|
||||
progresshandler = WgetProgressHandler(d)
|
||||
|
||||
logger.debug2("Fetching %s using command '%s'" % (ud.url, command))
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
|
||||
@@ -291,90 +276,56 @@ class Wget(FetchMethod):
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = req.get_method
|
||||
return newreq
|
||||
exported_proxies = export_proxies(d)
|
||||
|
||||
# We need to update the environment here as both the proxy and HTTPS
|
||||
# handlers need variables set. The proxy needs http_proxy and friends to
|
||||
# be set, and HTTPSHandler ends up calling into openssl to load the
|
||||
# certificates. In buildtools configurations this will be looking at the
|
||||
# wrong place for certificates by default: we set SSL_CERT_FILE to the
|
||||
# right location in the buildtools environment script but as BitBake
|
||||
# prunes prunes the environment this is lost. When binaries are executed
|
||||
# runfetchcmd ensures these values are in the environment, but this is
|
||||
# pure Python so we need to update the environment.
|
||||
#
|
||||
# Avoid tramping the environment too much by using bb.utils.environment
|
||||
# to scope the changes to the build_opener request, which is when the
|
||||
# environment lookups happen.
|
||||
newenv = {}
|
||||
for name in bb.fetch2.FETCH_EXPORT_VARS:
|
||||
value = d.getVar(name)
|
||||
if not value:
|
||||
origenv = d.getVar("BB_ORIGENV")
|
||||
if origenv:
|
||||
value = origenv.getVar(name)
|
||||
if value:
|
||||
newenv[name] = value
|
||||
handlers = [FixedHTTPRedirectHandler, HTTPMethodFallback]
|
||||
if exported_proxies:
|
||||
handlers.append(urllib.request.ProxyHandler())
|
||||
handlers.append(CacheHTTPHandler())
|
||||
# Since Python 2.7.9 ssl cert validation is enabled by default
|
||||
# see PEP-0476, this causes verification errors on some https servers
|
||||
# so disable by default.
|
||||
import ssl
|
||||
if hasattr(ssl, '_create_unverified_context'):
|
||||
handlers.append(urllib.request.HTTPSHandler(context=ssl._create_unverified_context()))
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
with bb.utils.environment(**newenv):
|
||||
import ssl
|
||||
try:
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12")
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
r.add_header("Authorization", authheader)
|
||||
|
||||
if self.check_certs(d):
|
||||
context = ssl.create_default_context()
|
||||
else:
|
||||
context = ssl._create_unverified_context()
|
||||
|
||||
handlers = [FixedHTTPRedirectHandler,
|
||||
HTTPMethodFallback,
|
||||
urllib.request.ProxyHandler(),
|
||||
CacheHTTPHandler(),
|
||||
urllib.request.HTTPSHandler(context=context)]
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
if ud.user and ud.pswd:
|
||||
add_basic_auth(ud.user + ':' + ud.pswd, r)
|
||||
|
||||
try:
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", self.user_agent)
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
r.add_header("Authorization", authheader)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
add_basic_auth(ud.user + ':' + ud.pswd, r)
|
||||
|
||||
try:
|
||||
import netrc
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
except ConnectionResetError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
import netrc
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug(2, "checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _parse_path(self, regex, s):
|
||||
@@ -450,8 +401,9 @@ class Wget(FetchMethod):
|
||||
"""
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
@@ -507,7 +459,7 @@ class Wget(FetchMethod):
|
||||
version_dir = ['', '', '']
|
||||
version = ['', '', '']
|
||||
|
||||
dirver_regex = re.compile(r"(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])*(\d+))")
|
||||
dirver_regex = re.compile(r"(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
|
||||
s = dirver_regex.search(dirver)
|
||||
if s:
|
||||
version_dir[1] = s.group('ver')
|
||||
|
||||
@@ -112,181 +112,185 @@ def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
warnlog.warning(s)
|
||||
|
||||
warnings.showwarning = _showwarning
|
||||
warnings.filterwarnings("ignore")
|
||||
warnings.filterwarnings("default", module="(<string>$|(oe|bb)\.)")
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=ImportWarning)
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
|
||||
warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
|
||||
|
||||
def create_bitbake_parser():
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
return parser
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = create_bitbake_parser()
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
@@ -462,7 +466,7 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
logger.info("Retrying server connection (#%d)..." % tryno)
|
||||
else:
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
|
||||
|
||||
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).")
|
||||
bb.event.print_ui_queue()
|
||||
|
||||
@@ -59,7 +59,7 @@ def getMountedDev(path):
|
||||
pass
|
||||
return None
|
||||
|
||||
def getDiskData(BBDirs):
|
||||
def getDiskData(BBDirs, configuration):
|
||||
|
||||
"""Prepare disk data for disk space monitor"""
|
||||
|
||||
@@ -168,7 +168,7 @@ class diskMonitor:
|
||||
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
|
||||
if BBDirs:
|
||||
self.devDict = getDiskData(BBDirs)
|
||||
self.devDict = getDiskData(BBDirs, configuration)
|
||||
if self.devDict:
|
||||
self.spaceInterval, self.inodeInterval = getInterval(configuration)
|
||||
if self.spaceInterval and self.inodeInterval:
|
||||
|
||||
@@ -278,7 +278,7 @@ def setLoggingConfig(defaultconfig, userconfigfile=None):
|
||||
with open(os.path.normpath(userconfigfile), 'r') as f:
|
||||
if userconfigfile.endswith('.yml') or userconfigfile.endswith('.yaml'):
|
||||
import yaml
|
||||
userconfig = yaml.safe_load(f)
|
||||
userconfig = yaml.load(f)
|
||||
elif userconfigfile.endswith('.json') or userconfigfile.endswith('.cfg'):
|
||||
import json
|
||||
userconfig = json.load(f)
|
||||
|
||||
@@ -71,7 +71,7 @@ def update_mtime(f):
|
||||
|
||||
def update_cache(f):
|
||||
if f in __mtime_cache:
|
||||
logger.debug("Updating mtime cache for %s" % f)
|
||||
logger.debug(1, "Updating mtime cache for %s" % f)
|
||||
update_mtime(f)
|
||||
|
||||
def clear_cache():
|
||||
|
||||
@@ -34,7 +34,7 @@ class IncludeNode(AstNode):
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = data.expand(self.what_file)
|
||||
logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
# TODO: Cache those includes... maybe not here though
|
||||
if self.force:
|
||||
@@ -145,7 +145,7 @@ class DataNode(AstNode):
|
||||
data.setVar(key, val, parsing=True, **loginfo)
|
||||
|
||||
class MethodNode(AstNode):
|
||||
tr_tbl = str.maketrans('/.+-@%&~', '________')
|
||||
tr_tbl = str.maketrans('/.+-@%&', '_______')
|
||||
|
||||
def __init__(self, filename, lineno, func_name, body, python, fakeroot):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -335,7 +335,7 @@ def finalize(fn, d, variant = None):
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data=d)
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
@@ -376,7 +376,7 @@ def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND") or "").split()
|
||||
for append in appends:
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
@@ -13,13 +13,16 @@
|
||||
#
|
||||
|
||||
import re, bb, os
|
||||
import bb.build, bb.utils, bb.data_smart
|
||||
import bb.build, bb.utils
|
||||
|
||||
from . import ConfHandler
|
||||
from .. import resolve_file, ast, logger, ParseError
|
||||
from .ConfHandler import include, init
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
# For compatibility
|
||||
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
@@ -57,7 +60,7 @@ def inherit(files, fn, lineno, d):
|
||||
file = abs_fn
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.debug("Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
__inherit_cache.append( file )
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
@@ -230,10 +233,6 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
@@ -95,7 +95,7 @@ def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
if exc.errno == errno.ENOENT:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno)
|
||||
logger.debug2("CONF file '%s' not found", fn)
|
||||
logger.debug(2, "CONF file '%s' not found", fn)
|
||||
else:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno)
|
||||
|
||||
@@ -12,14 +12,14 @@ currently, providing a key/value store accessed by 'domain'.
|
||||
#
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import sqlite3
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
import warnings
|
||||
from collections import Mapping
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
@@ -29,7 +29,7 @@ if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
|
||||
@functools.total_ordering
|
||||
class SQLTable(collections.abc.MutableMapping):
|
||||
class SQLTable(collections.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
@@ -238,6 +238,55 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
|
||||
|
||||
class PersistData(object):
|
||||
"""Deprecated representation of the bitbake persistent data store"""
|
||||
def __init__(self, d):
|
||||
warnings.warn("Use of PersistData is deprecated. Please use "
|
||||
"persist(domain, d) instead.",
|
||||
category=DeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
self.data = persist(d)
|
||||
logger.debug(1, "Using '%s' as the persistent data cache",
|
||||
self.data.filename)
|
||||
|
||||
def addDomain(self, domain):
|
||||
"""
|
||||
Add a domain (pending deprecation)
|
||||
"""
|
||||
return self.data[domain]
|
||||
|
||||
def delDomain(self, domain):
|
||||
"""
|
||||
Removes a domain and all the data it contains
|
||||
"""
|
||||
del self.data[domain]
|
||||
|
||||
def getKeyValues(self, domain):
|
||||
"""
|
||||
Return a list of key + value pairs for a domain
|
||||
"""
|
||||
return list(self.data[domain].items())
|
||||
|
||||
def getValue(self, domain, key):
|
||||
"""
|
||||
Return the value of a key for a domain
|
||||
"""
|
||||
return self.data[domain][key]
|
||||
|
||||
def setValue(self, domain, key, value):
|
||||
"""
|
||||
Sets the value of a key for a domain
|
||||
"""
|
||||
self.data[domain][key] = value
|
||||
|
||||
def delValue(self, domain, key):
|
||||
"""
|
||||
Deletes a key/value pair
|
||||
"""
|
||||
del self.data[domain][key]
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
|
||||
@@ -60,7 +60,7 @@ class Popen(subprocess.Popen):
|
||||
"close_fds": True,
|
||||
"preexec_fn": subprocess_setup,
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"stderr": subprocess.STDOUT,
|
||||
"stdin": subprocess.PIPE,
|
||||
"shell": False,
|
||||
}
|
||||
@@ -181,8 +181,5 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
stderr = stderr.decode("utf-8")
|
||||
|
||||
if pipe.returncode != 0:
|
||||
if log:
|
||||
# Don't duplicate the output in the exception if logging it
|
||||
raise ExecutionError(cmd, pipe.returncode, None, None)
|
||||
raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
|
||||
return stdout, stderr
|
||||
|
||||
@@ -94,15 +94,12 @@ class LineFilterProgressHandler(ProgressHandler):
|
||||
while True:
|
||||
breakpos = self._linebuffer.find('\n') + 1
|
||||
if breakpos == 0:
|
||||
# for the case when the line with progress ends with only '\r'
|
||||
breakpos = self._linebuffer.find('\r') + 1
|
||||
if breakpos == 0:
|
||||
break
|
||||
break
|
||||
line = self._linebuffer[:breakpos]
|
||||
self._linebuffer = self._linebuffer[breakpos:]
|
||||
# Drop any line feeds and anything that precedes them
|
||||
lbreakpos = line.rfind('\r') + 1
|
||||
if lbreakpos and lbreakpos != breakpos:
|
||||
if lbreakpos:
|
||||
line = line[lbreakpos:]
|
||||
if self.writeline(filter_color(line)):
|
||||
super().write(line)
|
||||
|
||||
@@ -38,17 +38,16 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
required = {}
|
||||
preferred_versions = {}
|
||||
latest_versions = {}
|
||||
|
||||
for pn in pkg_pn:
|
||||
(last_ver, last_file, pref_ver, pref_file, req) = findBestProvider(pn, localdata, dataCache, pkg_pn)
|
||||
(last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = (pref_ver, pref_file)
|
||||
latest_versions[pn] = (last_ver, last_file)
|
||||
required[pn] = req
|
||||
|
||||
return (latest_versions, preferred_versions, required)
|
||||
return (latest_versions, preferred_versions)
|
||||
|
||||
|
||||
def allProviders(dataCache):
|
||||
"""
|
||||
@@ -60,6 +59,7 @@ def allProviders(dataCache):
|
||||
all_providers[pn].append((ver, fn))
|
||||
return all_providers
|
||||
|
||||
|
||||
def sortPriorities(pn, dataCache, pkg_pn = None):
|
||||
"""
|
||||
Reorder pkg_pn by file priority and default preference
|
||||
@@ -87,21 +87,6 @@ def sortPriorities(pn, dataCache, pkg_pn = None):
|
||||
|
||||
return tmp_pn
|
||||
|
||||
def versionVariableMatch(cfgData, keyword, pn):
|
||||
"""
|
||||
Return the value of the <keyword>_VERSION variable if set.
|
||||
"""
|
||||
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
ver = cfgData.getVar("%s_VERSION:pn-%s" % (keyword, pn))
|
||||
if not ver:
|
||||
ver = cfgData.getVar("%s_VERSION_%s" % (keyword, pn))
|
||||
if not ver:
|
||||
ver = cfgData.getVar("%s_VERSION" % keyword)
|
||||
|
||||
return ver
|
||||
|
||||
def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
"""
|
||||
Check if the version pe,pv,pr is the preferred one.
|
||||
@@ -117,28 +102,19 @@ def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
|
||||
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
Find the first provider in pkg_pn with REQUIRED_VERSION or PREFERRED_VERSION set.
|
||||
Find the first provider in pkg_pn with a PREFERRED_VERSION set.
|
||||
"""
|
||||
|
||||
preferred_file = None
|
||||
preferred_ver = None
|
||||
required = False
|
||||
|
||||
required_v = versionVariableMatch(cfgData, "REQUIRED", pn)
|
||||
preferred_v = versionVariableMatch(cfgData, "PREFERRED", pn)
|
||||
|
||||
itemstr = ""
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
|
||||
if required_v is not None:
|
||||
if preferred_v is not None:
|
||||
logger.warning("REQUIRED_VERSION and PREFERRED_VERSION for package %s%s are both set using REQUIRED_VERSION %s", pn, itemstr, required_v)
|
||||
else:
|
||||
logger.debug("REQUIRED_VERSION is set for package %s%s", pn, itemstr)
|
||||
# REQUIRED_VERSION always takes precedence over PREFERRED_VERSION
|
||||
preferred_v = required_v
|
||||
required = True
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION")
|
||||
|
||||
if preferred_v:
|
||||
m = re.match(r'(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
@@ -171,9 +147,11 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
pv_str = preferred_v
|
||||
if not (preferred_e is None):
|
||||
pv_str = '%s:%s' % (preferred_e, pv_str)
|
||||
itemstr = ""
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
if preferred_file is None:
|
||||
if not required:
|
||||
logger.warning("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
available_vers = []
|
||||
for file_set in pkg_pn:
|
||||
for f in file_set:
|
||||
@@ -185,16 +163,12 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
available_vers.append(ver_str)
|
||||
if available_vers:
|
||||
available_vers.sort()
|
||||
logger.warning("versions of %s available: %s", pn, ' '.join(available_vers))
|
||||
if required:
|
||||
logger.error("required version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
logger.info("versions of %s available: %s", pn, ' '.join(available_vers))
|
||||
else:
|
||||
if required:
|
||||
logger.debug("selecting %s as REQUIRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
else:
|
||||
logger.debug("selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
|
||||
return (preferred_ver, preferred_file)
|
||||
|
||||
return (preferred_ver, preferred_file, required)
|
||||
|
||||
def findLatestProvider(pn, cfgData, dataCache, file_set):
|
||||
"""
|
||||
@@ -215,6 +189,7 @@ def findLatestProvider(pn, cfgData, dataCache, file_set):
|
||||
|
||||
return (latest, latest_f)
|
||||
|
||||
|
||||
def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
If there is a PREFERRED_VERSION, find the highest-priority bbfile
|
||||
@@ -223,16 +198,17 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
|
||||
sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn)
|
||||
# Find the highest priority provider with a REQUIRED_VERSION or PREFERRED_VERSION set
|
||||
(preferred_ver, preferred_file, required) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
|
||||
# Find the highest priority provider with a PREFERRED_VERSION set
|
||||
(preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
|
||||
# Find the latest version of the highest priority provider
|
||||
(latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0])
|
||||
|
||||
if not required and preferred_file is None:
|
||||
if preferred_file is None:
|
||||
preferred_file = latest_f
|
||||
preferred_ver = latest
|
||||
|
||||
return (latest, latest_f, preferred_ver, preferred_file, required)
|
||||
return (latest, latest_f, preferred_ver, preferred_file)
|
||||
|
||||
|
||||
def _filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
@@ -256,15 +232,12 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug("providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
|
||||
# First add REQUIRED_VERSIONS or PREFERRED_VERSIONS
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_ver, preferred_file, required = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if required and preferred_file is None:
|
||||
return eligible
|
||||
preferred_versions[pn] = (preferred_ver, preferred_file)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
@@ -275,8 +248,9 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
if not eligible:
|
||||
return eligible
|
||||
if len(eligible) == 0:
|
||||
logger.error("no eligible providers for %s", item)
|
||||
return 0
|
||||
|
||||
# If pn == item, give it a slight default preference
|
||||
# This means PREFERRED_PROVIDER_foobar defaults to foobar if available
|
||||
@@ -292,6 +266,7 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
|
||||
return eligible
|
||||
|
||||
|
||||
def filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
@@ -316,7 +291,7 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
foundUnique = True
|
||||
break
|
||||
|
||||
logger.debug("sorted providers for %s are: %s", item, eligible)
|
||||
logger.debug(1, "sorted providers for %s are: %s", item, eligible)
|
||||
|
||||
return eligible, foundUnique
|
||||
|
||||
@@ -358,7 +333,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
|
||||
#logger.debug("checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
|
||||
@@ -374,7 +349,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
if numberPreferred > 1:
|
||||
logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item))
|
||||
|
||||
logger.debug("sorted runtime providers for %s are: %s", item, eligible)
|
||||
logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible)
|
||||
|
||||
return eligible, numberPreferred
|
||||
|
||||
@@ -409,10 +384,11 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
regexp_cache[pattern] = regexp
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
|
||||
def buildWorldTargetList(dataCache, task=None):
|
||||
"""
|
||||
Build package list for "bitbake world"
|
||||
@@ -420,22 +396,22 @@ def buildWorldTargetList(dataCache, task=None):
|
||||
if dataCache.world_target:
|
||||
return
|
||||
|
||||
logger.debug("collating packages for \"world\"")
|
||||
logger.debug(1, "collating packages for \"world\"")
|
||||
for f in dataCache.possible_world:
|
||||
terminal = True
|
||||
pn = dataCache.pkg_fn[f]
|
||||
if task and task not in dataCache.task_deps[f]['tasks']:
|
||||
logger.debug2("World build skipping %s as task %s doesn't exist", f, task)
|
||||
logger.debug(2, "World build skipping %s as task %s doesn't exist", f, task)
|
||||
terminal = False
|
||||
|
||||
for p in dataCache.pn_provides[pn]:
|
||||
if p.startswith('virtual/'):
|
||||
logger.debug2("World build skipping %s due to %s provider starting with virtual/", f, p)
|
||||
logger.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p)
|
||||
terminal = False
|
||||
break
|
||||
for pf in dataCache.providers[p]:
|
||||
if dataCache.pkg_fn[pf] != pn:
|
||||
logger.debug2("World build skipping %s due to both us and %s providing %s", f, pf, p)
|
||||
logger.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p)
|
||||
terminal = False
|
||||
break
|
||||
if terminal:
|
||||
|
||||
@@ -38,7 +38,7 @@ def taskname_from_tid(tid):
|
||||
return tid.rsplit(":", 1)[1]
|
||||
|
||||
def mc_from_tid(tid):
|
||||
if tid.startswith('mc:') and tid.count(':') >= 2:
|
||||
if tid.startswith('mc:'):
|
||||
return tid.split(':')[1]
|
||||
return ""
|
||||
|
||||
@@ -47,13 +47,13 @@ def split_tid(tid):
|
||||
return (mc, fn, taskname)
|
||||
|
||||
def split_mc(n):
|
||||
if n.startswith("mc:") and n.count(':') >= 2:
|
||||
if n.startswith("mc:"):
|
||||
_, mc, n = n.split(":", 2)
|
||||
return (mc, n)
|
||||
return ('', n)
|
||||
|
||||
def split_tid_mcfn(tid):
|
||||
if tid.startswith('mc:') and tid.count(':') >= 2:
|
||||
if tid.startswith('mc:'):
|
||||
elems = tid.split(':')
|
||||
mc = elems[1]
|
||||
fn = ":".join(elems[2:-1])
|
||||
@@ -85,19 +85,15 @@ class RunQueueStats:
|
||||
"""
|
||||
Holds statistics on the tasks handled by the associated runQueue
|
||||
"""
|
||||
def __init__(self, total, setscene_total):
|
||||
def __init__(self, total):
|
||||
self.completed = 0
|
||||
self.skipped = 0
|
||||
self.failed = 0
|
||||
self.active = 0
|
||||
self.setscene_active = 0
|
||||
self.setscene_covered = 0
|
||||
self.setscene_notcovered = 0
|
||||
self.setscene_total = setscene_total
|
||||
self.total = total
|
||||
|
||||
def copy(self):
|
||||
obj = self.__class__(self.total, self.setscene_total)
|
||||
obj = self.__class__(self.total)
|
||||
obj.__dict__.update(self.__dict__)
|
||||
return obj
|
||||
|
||||
@@ -116,13 +112,6 @@ class RunQueueStats:
|
||||
def taskActive(self):
|
||||
self.active = self.active + 1
|
||||
|
||||
def updateCovered(self, covered, notcovered):
|
||||
self.setscene_covered = covered
|
||||
self.setscene_notcovered = notcovered
|
||||
|
||||
def updateActiveSetscene(self, active):
|
||||
self.setscene_active = active
|
||||
|
||||
# These values indicate the next step due to be run in the
|
||||
# runQueue state machine
|
||||
runQueuePrepare = 2
|
||||
@@ -555,8 +544,8 @@ class RunQueueData:
|
||||
for tid in self.runtaskentries:
|
||||
if task_done[tid] is False or deps_left[tid] != 0:
|
||||
problem_tasks.append(tid)
|
||||
logger.debug2("Task %s is not buildable", tid)
|
||||
logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
|
||||
logger.debug(2, "Task %s is not buildable", tid)
|
||||
logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
|
||||
self.runtaskentries[tid].weight = weight[tid]
|
||||
|
||||
if problem_tasks:
|
||||
@@ -654,7 +643,7 @@ class RunQueueData:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
#runtid = build_tid(mc, fn, taskname)
|
||||
|
||||
#logger.debug2("Processing %s,%s:%s", mc, fn, taskname)
|
||||
#logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
|
||||
|
||||
depends = set()
|
||||
task_deps = self.dataCaches[mc].task_deps[taskfn]
|
||||
@@ -926,36 +915,38 @@ class RunQueueData:
|
||||
#
|
||||
# Once all active tasks are marked, prune the ones we don't need.
|
||||
|
||||
delcount = {}
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
# Handle --runall
|
||||
if self.cooker.configuration.runall:
|
||||
# re-run the mark_active and then drop unused tasks from new list
|
||||
reduced_tasklist = set(self.runtaskentries.keys())
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
reduced_tasklist.remove(tid)
|
||||
runq_build = {}
|
||||
|
||||
for task in self.cooker.configuration.runall:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
runall_tids = set()
|
||||
for tid in reduced_tasklist:
|
||||
for tid in list(self.runtaskentries):
|
||||
wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
|
||||
if wanttid in delcount:
|
||||
self.runtaskentries[wanttid] = delcount[wanttid]
|
||||
if wanttid in self.runtaskentries:
|
||||
runall_tids.add(wanttid)
|
||||
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid, 1)
|
||||
mark_active(tid,1)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
|
||||
delcount = set()
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount.add(tid)
|
||||
del self.runtaskentries[tid]
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if self.cooker.configuration.runall:
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
|
||||
|
||||
@@ -969,16 +960,16 @@ class RunQueueData:
|
||||
for task in self.cooker.configuration.runonly:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task]
|
||||
runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
|
||||
|
||||
for tid in runonly_tids:
|
||||
mark_active(tid, 1)
|
||||
for tid in list(runonly_tids):
|
||||
mark_active(tid,1)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount.add(tid)
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if len(self.runtaskentries) == 0:
|
||||
@@ -1208,9 +1199,9 @@ class RunQueueData:
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug3("run_tasks:")
|
||||
logger.debug(3, "run_tasks:")
|
||||
for tid in self.runtaskentries:
|
||||
logger.debug3(" %s: %s Deps %s RevDeps %s", tid,
|
||||
logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
|
||||
self.runtaskentries[tid].weight,
|
||||
self.runtaskentries[tid].depends,
|
||||
self.runtaskentries[tid].revdeps)
|
||||
@@ -1247,11 +1238,10 @@ class RunQueue:
|
||||
self.fakeworker = {}
|
||||
|
||||
def _start_worker(self, mc, fakeroot = False, rqexec = None):
|
||||
logger.debug("Starting bitbake-worker")
|
||||
logger.debug(1, "Starting bitbake-worker")
|
||||
magic = "decafbad"
|
||||
if self.cooker.configuration.profile:
|
||||
magic = "decafbadbad"
|
||||
fakerootlogs = None
|
||||
if fakeroot:
|
||||
magic = magic + "beef"
|
||||
mcdata = self.cooker.databuilder.mcdata[mc]
|
||||
@@ -1261,11 +1251,10 @@ class RunQueue:
|
||||
for key, value in (var.split('=') for var in fakerootenv):
|
||||
env[key] = value
|
||||
worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
|
||||
fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
|
||||
else:
|
||||
worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
bb.utils.nonblockingfd(worker.stdout)
|
||||
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
|
||||
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
|
||||
|
||||
workerdata = {
|
||||
"taskdeps" : self.rqdata.dataCaches[mc].task_deps,
|
||||
@@ -1282,7 +1271,6 @@ class RunQueue:
|
||||
"date" : self.cfgData.getVar("DATE"),
|
||||
"time" : self.cfgData.getVar("TIME"),
|
||||
"hashservaddr" : self.cooker.hashservaddr,
|
||||
"umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
|
||||
}
|
||||
|
||||
worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
|
||||
@@ -1295,7 +1283,7 @@ class RunQueue:
|
||||
def _teardown_worker(self, worker):
|
||||
if not worker:
|
||||
return
|
||||
logger.debug("Teardown for bitbake-worker")
|
||||
logger.debug(1, "Teardown for bitbake-worker")
|
||||
try:
|
||||
worker.process.stdin.write(b"<quit></quit>")
|
||||
worker.process.stdin.flush()
|
||||
@@ -1368,12 +1356,12 @@ class RunQueue:
|
||||
|
||||
# If the stamp is missing, it's not current
|
||||
if not os.access(stampfile, os.F_OK):
|
||||
logger.debug2("Stampfile %s not available", stampfile)
|
||||
logger.debug(2, "Stampfile %s not available", stampfile)
|
||||
return False
|
||||
# If it's a 'nostamp' task, it's not current
|
||||
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
|
||||
logger.debug2("%s.%s is nostamp\n", fn, taskname)
|
||||
logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
|
||||
return False
|
||||
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
@@ -1397,18 +1385,18 @@ class RunQueue:
|
||||
continue
|
||||
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
|
||||
if not t2:
|
||||
logger.debug2('Stampfile %s does not exist', stampfile2)
|
||||
logger.debug(2, 'Stampfile %s does not exist', stampfile2)
|
||||
iscurrent = False
|
||||
break
|
||||
if t1 < t2:
|
||||
logger.debug2('Stampfile %s < %s', stampfile, stampfile2)
|
||||
logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
|
||||
iscurrent = False
|
||||
break
|
||||
if recurse and iscurrent:
|
||||
if dep in cache:
|
||||
iscurrent = cache[dep]
|
||||
if not iscurrent:
|
||||
logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
|
||||
logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
|
||||
else:
|
||||
iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
|
||||
cache[dep] = iscurrent
|
||||
@@ -1478,7 +1466,7 @@ class RunQueue:
|
||||
if not self.dm_event_handler_registered:
|
||||
res = bb.event.register(self.dm_event_handler_name,
|
||||
lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
|
||||
('bb.event.HeartbeatEvent',), data=self.cfgData)
|
||||
('bb.event.HeartbeatEvent',))
|
||||
self.dm_event_handler_registered = True
|
||||
|
||||
dump = self.cooker.configuration.dump_signatures
|
||||
@@ -1517,7 +1505,7 @@ class RunQueue:
|
||||
build_done = self.state is runQueueComplete or self.state is runQueueFailed
|
||||
|
||||
if build_done and self.dm_event_handler_registered:
|
||||
bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
|
||||
bb.event.remove(self.dm_event_handler_name, None)
|
||||
self.dm_event_handler_registered = False
|
||||
|
||||
if build_done and self.rqexe:
|
||||
@@ -1744,7 +1732,8 @@ class RunQueueExecute:
|
||||
self.holdoff_need_update = True
|
||||
self.sqdone = False
|
||||
|
||||
self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids))
|
||||
self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
|
||||
self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
|
||||
|
||||
for mc in rq.worker:
|
||||
rq.worker[mc].pipe.setrunqueueexec(self)
|
||||
@@ -1772,7 +1761,7 @@ class RunQueueExecute:
|
||||
for scheduler in schedulers:
|
||||
if self.scheduler == scheduler.name:
|
||||
self.sched = scheduler(self, self.rqdata)
|
||||
logger.debug("Using runqueue scheduler '%s'", scheduler.name)
|
||||
logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
|
||||
break
|
||||
else:
|
||||
bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
|
||||
@@ -1782,7 +1771,7 @@ class RunQueueExecute:
|
||||
self.sqdata = SQData()
|
||||
build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
|
||||
|
||||
def runqueue_process_waitpid(self, task, status, fakerootlog=None):
|
||||
def runqueue_process_waitpid(self, task, status):
|
||||
|
||||
# self.build_stamps[pid] may not exist when use shared work directory.
|
||||
if task in self.build_stamps:
|
||||
@@ -1795,10 +1784,9 @@ class RunQueueExecute:
|
||||
else:
|
||||
self.sq_task_complete(task)
|
||||
self.sq_live.remove(task)
|
||||
self.stats.updateActiveSetscene(len(self.sq_live))
|
||||
else:
|
||||
if status != 0:
|
||||
self.task_fail(task, status, fakerootlog=fakerootlog)
|
||||
self.task_fail(task, status)
|
||||
else:
|
||||
self.task_complete(task)
|
||||
return True
|
||||
@@ -1829,7 +1817,7 @@ class RunQueueExecute:
|
||||
def finish(self):
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
active = self.stats.active + len(self.sq_live)
|
||||
active = self.stats.active + self.sq_stats.active
|
||||
if active > 0:
|
||||
bb.event.fire(runQueueExitWait(active), self.cfgData)
|
||||
self.rq.read_workers()
|
||||
@@ -1862,7 +1850,7 @@ class RunQueueExecute:
|
||||
return valid
|
||||
|
||||
def can_start_task(self):
|
||||
active = self.stats.active + len(self.sq_live)
|
||||
active = self.stats.active + self.sq_stats.active
|
||||
can_start = active < self.number_tasks
|
||||
return can_start
|
||||
|
||||
@@ -1911,13 +1899,7 @@ class RunQueueExecute:
|
||||
break
|
||||
if alldeps:
|
||||
self.setbuildable(revdep)
|
||||
logger.debug("Marking task %s as buildable", revdep)
|
||||
|
||||
for t in self.sq_deferred.copy():
|
||||
if self.sq_deferred[t] == task:
|
||||
logger.debug2("Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
logger.debug(1, "Marking task %s as buildable", revdep)
|
||||
|
||||
def task_complete(self, task):
|
||||
self.stats.taskCompleted()
|
||||
@@ -1925,31 +1907,14 @@ class RunQueueExecute:
|
||||
self.task_completeoutright(task)
|
||||
self.runq_tasksrun.add(task)
|
||||
|
||||
def task_fail(self, task, exitcode, fakerootlog=None):
|
||||
def task_fail(self, task, exitcode):
|
||||
"""
|
||||
Called when a task has failed
|
||||
Updates the state engine with the failure
|
||||
"""
|
||||
self.stats.taskFailed()
|
||||
self.failed_tids.append(task)
|
||||
|
||||
fakeroot_log = ""
|
||||
if fakerootlog and os.path.exists(fakerootlog):
|
||||
with open(fakerootlog) as fakeroot_log_file:
|
||||
fakeroot_failed = False
|
||||
for line in reversed(fakeroot_log_file.readlines()):
|
||||
for fakeroot_error in ['mismatch', 'error', 'fatal']:
|
||||
if fakeroot_error in line.lower():
|
||||
fakeroot_failed = True
|
||||
if 'doing new pid setup and server start' in line:
|
||||
break
|
||||
fakeroot_log = line + fakeroot_log
|
||||
|
||||
if not fakeroot_failed:
|
||||
fakeroot_log = None
|
||||
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
|
||||
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
|
||||
if self.rqdata.taskData[''].abort:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
@@ -1964,8 +1929,8 @@ class RunQueueExecute:
|
||||
def summarise_scenequeue_errors(self):
|
||||
err = False
|
||||
if not self.sqdone:
|
||||
logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
|
||||
completeevent = sceneQueueComplete(self.stats, self.rq)
|
||||
logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
|
||||
completeevent = sceneQueueComplete(self.sq_stats, self.rq)
|
||||
bb.event.fire(completeevent, self.cfgData)
|
||||
if self.sq_deferred:
|
||||
logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
|
||||
@@ -1977,10 +1942,6 @@ class RunQueueExecute:
|
||||
logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
|
||||
err = True
|
||||
|
||||
for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered):
|
||||
# No task should end up in both covered and uncovered, that is a bug.
|
||||
logger.error("Setscene task %s in both covered and notcovered." % tid)
|
||||
|
||||
for tid in self.rqdata.runq_setscene_tids:
|
||||
if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
|
||||
err = True
|
||||
@@ -2025,7 +1986,7 @@ class RunQueueExecute:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
|
||||
if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
if nexttask not in self.rqdata.target_tids:
|
||||
logger.debug2("Skipping setscene for task %s" % nexttask)
|
||||
logger.debug(2, "Skipping setscene for task %s" % nexttask)
|
||||
self.sq_task_skip(nexttask)
|
||||
self.scenequeue_notneeded.add(nexttask)
|
||||
if nexttask in self.sq_deferred:
|
||||
@@ -2038,26 +1999,28 @@ class RunQueueExecute:
|
||||
if nexttask in self.sq_deferred:
|
||||
if self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
continue
|
||||
logger.debug("Task %s no longer deferred" % nexttask)
|
||||
logger.debug(1, "Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
if not valid:
|
||||
logger.debug("%s didn't become valid, skipping setscene" % nexttask)
|
||||
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
|
||||
self.sq_task_failoutright(nexttask)
|
||||
return True
|
||||
else:
|
||||
self.sqdata.outrightfail.remove(nexttask)
|
||||
if nexttask in self.sqdata.outrightfail:
|
||||
logger.debug2('No package found, so skipping setscene task %s', nexttask)
|
||||
logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
|
||||
self.sq_task_failoutright(nexttask)
|
||||
return True
|
||||
if nexttask in self.sqdata.unskippable:
|
||||
logger.debug2("Setscene task %s is unskippable" % nexttask)
|
||||
logger.debug(2, "Setscene task %s is unskippable" % nexttask)
|
||||
task = nexttask
|
||||
break
|
||||
if task is not None:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
|
||||
taskname = taskname + "_setscene"
|
||||
if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
|
||||
logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task)
|
||||
logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
|
||||
self.sq_task_failoutright(task)
|
||||
return True
|
||||
|
||||
@@ -2067,16 +2030,16 @@ class RunQueueExecute:
|
||||
return True
|
||||
|
||||
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
|
||||
logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task)
|
||||
logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
|
||||
self.sq_task_skip(task)
|
||||
return True
|
||||
|
||||
if self.cooker.configuration.skipsetscene:
|
||||
logger.debug2('No setscene tasks should be executed. Skipping %s', task)
|
||||
logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
|
||||
self.sq_task_failoutright(task)
|
||||
return True
|
||||
|
||||
startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
|
||||
startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
|
||||
bb.event.fire(startevent, self.cfgData)
|
||||
|
||||
taskdepdata = self.sq_build_taskdepdata(task)
|
||||
@@ -2097,7 +2060,7 @@ class RunQueueExecute:
|
||||
self.build_stamps2.append(self.build_stamps[task])
|
||||
self.sq_running.add(task)
|
||||
self.sq_live.add(task)
|
||||
self.stats.updateActiveSetscene(len(self.sq_live))
|
||||
self.sq_stats.taskActive()
|
||||
if self.can_start_task():
|
||||
return True
|
||||
|
||||
@@ -2134,12 +2097,12 @@ class RunQueueExecute:
|
||||
return True
|
||||
|
||||
if task in self.tasks_covered:
|
||||
logger.debug2("Setscene covered task %s", task)
|
||||
logger.debug(2, "Setscene covered task %s", task)
|
||||
self.task_skip(task, "covered")
|
||||
return True
|
||||
|
||||
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
|
||||
logger.debug2("Stamp current task %s", task)
|
||||
logger.debug(2, "Stamp current task %s", task)
|
||||
|
||||
self.task_skip(task, "existing")
|
||||
self.runq_tasksrun.add(task)
|
||||
@@ -2187,7 +2150,7 @@ class RunQueueExecute:
|
||||
if self.can_start_task():
|
||||
return True
|
||||
|
||||
if self.stats.active > 0 or len(self.sq_live) > 0:
|
||||
if self.stats.active > 0 or self.sq_stats.active > 0:
|
||||
self.rq.read_workers()
|
||||
return self.rq.active_fds()
|
||||
|
||||
@@ -2195,8 +2158,7 @@ class RunQueueExecute:
|
||||
if self.sq_deferred:
|
||||
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
|
||||
if tid not in self.runq_complete:
|
||||
self.sq_task_failoutright(tid)
|
||||
self.sq_task_failoutright(tid)
|
||||
return True
|
||||
|
||||
if len(self.failed_tids) != 0:
|
||||
@@ -2310,16 +2272,10 @@ class RunQueueExecute:
|
||||
self.updated_taskhash_queue.remove((tid, unihash))
|
||||
|
||||
if unihash != self.rqdata.runtaskentries[tid].unihash:
|
||||
# Make sure we rehash any other tasks with the same task hash that we're deferred against.
|
||||
torehash = [tid]
|
||||
for deftid in self.sq_deferred:
|
||||
if self.sq_deferred[deftid] == tid:
|
||||
torehash.append(deftid)
|
||||
for hashtid in torehash:
|
||||
hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash))
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
|
||||
self.rqdata.runtaskentries[tid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(tid, unihash)
|
||||
toprocess.add(tid)
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2366,7 +2322,7 @@ class RunQueueExecute:
|
||||
remapped = True
|
||||
|
||||
if not remapped:
|
||||
#logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
|
||||
#logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
|
||||
self.rqdata.runtaskentries[tid].hash = newhash
|
||||
self.rqdata.runtaskentries[tid].unihash = newuni
|
||||
changed.add(tid)
|
||||
@@ -2381,7 +2337,7 @@ class RunQueueExecute:
|
||||
for mc in self.rq.fakeworker:
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
|
||||
|
||||
hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed)))
|
||||
hashequiv_logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
|
||||
|
||||
for tid in changed:
|
||||
if tid not in self.rqdata.runq_setscene_tids:
|
||||
@@ -2400,7 +2356,7 @@ class RunQueueExecute:
|
||||
# Check no tasks this covers are running
|
||||
for dep in self.sqdata.sq_covered_tasks[tid]:
|
||||
if dep in self.runq_running and dep not in self.runq_complete:
|
||||
hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid))
|
||||
hashequiv_logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
|
||||
valid = False
|
||||
break
|
||||
if not valid:
|
||||
@@ -2459,11 +2415,6 @@ class RunQueueExecute:
|
||||
|
||||
if update_tasks:
|
||||
self.sqdone = False
|
||||
for tid in [t[0] for t in update_tasks]:
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
for (tid, harddepfail, origvalid) in update_tasks:
|
||||
@@ -2473,20 +2424,13 @@ class RunQueueExecute:
|
||||
self.sq_task_failoutright(tid)
|
||||
|
||||
if changed:
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.holdoff_need_update = True
|
||||
|
||||
def scenequeue_updatecounters(self, task, fail=False):
|
||||
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
|
||||
if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
|
||||
# dependency could be already processed, e.g. noexec setscene task
|
||||
continue
|
||||
noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache)
|
||||
if noexec or stamppresent:
|
||||
continue
|
||||
logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
self.sq_task_failoutright(dep)
|
||||
continue
|
||||
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
@@ -2507,7 +2451,6 @@ class RunQueueExecute:
|
||||
new.add(dep)
|
||||
next = new
|
||||
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.holdoff_need_update = True
|
||||
|
||||
def sq_task_completeoutright(self, task):
|
||||
@@ -2517,7 +2460,7 @@ class RunQueueExecute:
|
||||
completed dependencies as buildable
|
||||
"""
|
||||
|
||||
logger.debug('Found task %s which could be accelerated', task)
|
||||
logger.debug(1, 'Found task %s which could be accelerated', task)
|
||||
self.scenequeue_covered.add(task)
|
||||
self.scenequeue_updatecounters(task)
|
||||
|
||||
@@ -2531,11 +2474,13 @@ class RunQueueExecute:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
def sq_task_complete(self, task):
|
||||
bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
|
||||
self.sq_stats.taskCompleted()
|
||||
bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
|
||||
self.sq_task_completeoutright(task)
|
||||
|
||||
def sq_task_fail(self, task, result):
|
||||
bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
|
||||
self.sq_stats.taskFailed()
|
||||
bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
|
||||
self.scenequeue_notcovered.add(task)
|
||||
self.scenequeue_updatecounters(task, True)
|
||||
self.sq_check_taskfail(task)
|
||||
@@ -2543,6 +2488,8 @@ class RunQueueExecute:
|
||||
def sq_task_failoutright(self, task):
|
||||
self.sq_running.add(task)
|
||||
self.sq_buildable.add(task)
|
||||
self.sq_stats.taskSkipped()
|
||||
self.sq_stats.taskCompleted()
|
||||
self.scenequeue_notcovered.add(task)
|
||||
self.scenequeue_updatecounters(task, True)
|
||||
|
||||
@@ -2550,6 +2497,8 @@ class RunQueueExecute:
|
||||
self.sq_running.add(task)
|
||||
self.sq_buildable.add(task)
|
||||
self.sq_task_completeoutright(task)
|
||||
self.sq_stats.taskSkipped()
|
||||
self.sq_stats.taskCompleted()
|
||||
|
||||
def sq_build_taskdepdata(self, task):
|
||||
def getsetscenedeps(tid):
|
||||
@@ -2803,55 +2752,8 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sqdata.stamppresent = set()
|
||||
sqdata.valid = set()
|
||||
|
||||
sqdata.hashes = {}
|
||||
sqrq.sq_deferred = {}
|
||||
for mc in sorted(sqdata.multiconfigs):
|
||||
for tid in sorted(sqdata.sq_revdeps):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
h = pending_hash_index(tid, rqdata)
|
||||
if h not in sqdata.hashes:
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
|
||||
|
||||
# Compute a list of 'stale' sstate tasks where the current hash does not match the one
|
||||
# in any stamp files. Pass the list out to metadata as an event.
|
||||
found = {}
|
||||
for tid in rqdata.runq_setscene_tids:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
stamps = bb.build.find_stale_stamps(taskname, rqdata.dataCaches[mc], taskfn)
|
||||
if stamps:
|
||||
if mc not in found:
|
||||
found[mc] = {}
|
||||
found[mc][tid] = stamps
|
||||
for mc in found:
|
||||
event = bb.event.StaleSetSceneTasks(found[mc])
|
||||
bb.event.fire(event, cooker.databuilder.mcdata[mc])
|
||||
|
||||
def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False):
|
||||
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
|
||||
taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
|
||||
if 'noexec' in taskdep and taskname in taskdep['noexec']:
|
||||
bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
|
||||
return True, False
|
||||
|
||||
if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
|
||||
logger.debug2('Setscene stamp current for task %s', tid)
|
||||
return False, True
|
||||
|
||||
if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
|
||||
logger.debug2('Normal stamp current for task %s', tid)
|
||||
return False, True
|
||||
|
||||
return False, False
|
||||
|
||||
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
|
||||
|
||||
tocheck = set()
|
||||
@@ -2861,17 +2763,25 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
|
||||
sqdata.stamppresent.remove(tid)
|
||||
if tid in sqdata.valid:
|
||||
sqdata.valid.remove(tid)
|
||||
if tid in sqdata.outrightfail:
|
||||
sqdata.outrightfail.remove(tid)
|
||||
|
||||
noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True)
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
|
||||
if noexec:
|
||||
taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
|
||||
if 'noexec' in taskdep and taskname in taskdep['noexec']:
|
||||
sqdata.noexec.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
|
||||
continue
|
||||
|
||||
if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
|
||||
logger.debug(2, 'Setscene stamp current for task %s', tid)
|
||||
sqdata.stamppresent.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
continue
|
||||
|
||||
if stamppresent:
|
||||
if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
|
||||
logger.debug(2, 'Normal stamp current for task %s', tid)
|
||||
sqdata.stamppresent.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
continue
|
||||
@@ -2880,20 +2790,28 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
|
||||
|
||||
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
|
||||
|
||||
for tid in tids:
|
||||
if tid in sqdata.stamppresent:
|
||||
continue
|
||||
if tid in sqdata.valid:
|
||||
continue
|
||||
if tid in sqdata.noexec:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_covered:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_notcovered:
|
||||
continue
|
||||
if tid in sqrq.sq_deferred:
|
||||
continue
|
||||
sqdata.outrightfail.add(tid)
|
||||
sqdata.hashes = {}
|
||||
for mc in sorted(sqdata.multiconfigs):
|
||||
for tid in sorted(sqdata.sq_revdeps):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
if tid in sqdata.stamppresent:
|
||||
continue
|
||||
if tid in sqdata.valid:
|
||||
continue
|
||||
if tid in sqdata.noexec:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_notcovered:
|
||||
continue
|
||||
sqdata.outrightfail.add(tid)
|
||||
|
||||
h = pending_hash_index(tid, rqdata)
|
||||
if h not in sqdata.hashes:
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
|
||||
class TaskFailure(Exception):
|
||||
"""
|
||||
@@ -2957,16 +2875,12 @@ class runQueueTaskFailed(runQueueEvent):
|
||||
"""
|
||||
Event notifying a task failed
|
||||
"""
|
||||
def __init__(self, task, stats, exitcode, rq, fakeroot_log=None):
|
||||
def __init__(self, task, stats, exitcode, rq):
|
||||
runQueueEvent.__init__(self, task, stats, rq)
|
||||
self.exitcode = exitcode
|
||||
self.fakeroot_log = fakeroot_log
|
||||
|
||||
def __str__(self):
|
||||
if self.fakeroot_log:
|
||||
return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log)
|
||||
else:
|
||||
return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
|
||||
return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
|
||||
|
||||
class sceneQueueTaskFailed(sceneQueueEvent):
|
||||
"""
|
||||
@@ -3018,7 +2932,7 @@ class runQueuePipe():
|
||||
"""
|
||||
Abstraction for a pipe between a worker thread and the server
|
||||
"""
|
||||
def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None):
|
||||
def __init__(self, pipein, pipeout, d, rq, rqexec):
|
||||
self.input = pipein
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
@@ -3027,7 +2941,6 @@ class runQueuePipe():
|
||||
self.d = d
|
||||
self.rq = rq
|
||||
self.rqexec = rqexec
|
||||
self.fakerootlogs = fakerootlogs
|
||||
|
||||
def setrunqueueexec(self, rqexec):
|
||||
self.rqexec = rqexec
|
||||
@@ -3073,11 +2986,7 @@ class runQueuePipe():
|
||||
task, status = pickle.loads(self.queue[10:index])
|
||||
except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
|
||||
bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
|
||||
(_, _, _, taskfn) = split_tid_mcfn(task)
|
||||
fakerootlog = None
|
||||
if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs:
|
||||
fakerootlog = self.fakerootlogs[taskfn]
|
||||
self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog)
|
||||
self.rqexec.runqueue_process_waitpid(task, status)
|
||||
found = True
|
||||
self.queue = self.queue[index+11:]
|
||||
index = self.queue.find(b"</exitcode>")
|
||||
|
||||
@@ -26,7 +26,6 @@ import errno
|
||||
import re
|
||||
import datetime
|
||||
import pickle
|
||||
import traceback
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
@@ -148,7 +147,7 @@ class ProcessServer():
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
self.controllersock = conn
|
||||
elif not self.timeout and not ready:
|
||||
elif self.timeout is None and not ready:
|
||||
serverlog("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
@@ -218,9 +217,8 @@ class ProcessServer():
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
serverlog("Command Completed")
|
||||
except Exception as e:
|
||||
stack = traceback.format_exc()
|
||||
serverlog('Exception in server main event loop running command %s (%s)' % (command, stack))
|
||||
logger.exception('Exception in server main event loop running command %s (%s)' % (command, stack))
|
||||
serverlog('Exception in server main event loop running command %s (%s)' % (command, str(e)))
|
||||
logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
|
||||
|
||||
if self.xmlrpc in ready:
|
||||
self.xmlrpc.handle_requests()
|
||||
@@ -369,12 +367,7 @@ class ProcessServer():
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running heartbeat function')
|
||||
self.quit = True
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
@@ -473,7 +466,7 @@ class BitBakeServer(object):
|
||||
try:
|
||||
r = ready.get()
|
||||
except EOFError:
|
||||
# Trap the child exiting/closing the pipe and error out
|
||||
# Trap the child exitting/closing the pipe and error out
|
||||
r = None
|
||||
if not r or r[0] != "r":
|
||||
ready.close()
|
||||
@@ -516,7 +509,7 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface):
|
||||
|
||||
@@ -661,7 +654,7 @@ class BBUIEventQueue:
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.daemon = True
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
|
||||
@@ -228,7 +228,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH:task-%s" % task, self.basehash[fn + ":" + task])
|
||||
d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + ":" + task])
|
||||
|
||||
def postparsing_clean_cache(self):
|
||||
#
|
||||
@@ -311,7 +311,13 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data = data + self.get_unihash(dep)
|
||||
if dep in self.unihash:
|
||||
if self.unihash[dep] is None:
|
||||
data = data + self.taskhash[dep]
|
||||
else:
|
||||
data = data + self.unihash[dep]
|
||||
else:
|
||||
data = data + self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
@@ -325,7 +331,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
self.taskhash[tid] = h
|
||||
#d.setVar("BB_TASKHASH:task-%s" % task, taskhash[task])
|
||||
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
|
||||
return h
|
||||
|
||||
def writeout_file_checksum_cache(self):
|
||||
@@ -402,7 +408,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
p = pickle.dump(data, stream, -1)
|
||||
stream.flush()
|
||||
os.chmod(tmpfile, 0o664)
|
||||
bb.utils.rename(tmpfile, sigfile)
|
||||
os.rename(tmpfile, sigfile)
|
||||
except (OSError, IOError) as err:
|
||||
try:
|
||||
os.unlink(tmpfile)
|
||||
@@ -541,8 +547,8 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
except ConnectionError as e:
|
||||
hashequiv_logger.debug(2, 'No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
@@ -615,13 +621,13 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
hashequiv_logger.debug('Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
hashequiv_logger.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
|
||||
self.set_unihash(tid, new_unihash)
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except ConnectionError as e:
|
||||
hashequiv_logger.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
if sigfile:
|
||||
@@ -661,7 +667,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# TODO: What to do here?
|
||||
hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash))
|
||||
|
||||
except ConnectionError as e:
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
return False
|
||||
@@ -748,7 +754,7 @@ def clean_basepath(basepath):
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
if basepath.startswith("mc:"):
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
@@ -864,21 +870,21 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
for dep in changed:
|
||||
output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
|
||||
output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
for dep in added:
|
||||
output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
|
||||
if removed:
|
||||
for dep in sorted(removed):
|
||||
for dep in removed:
|
||||
output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
|
||||
|
||||
|
||||
changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
for dep in changed:
|
||||
oldval = a_data['varvals'][dep]
|
||||
newval = b_data['varvals'][dep]
|
||||
if newval and oldval and ('\n' in oldval or '\n' in newval):
|
||||
@@ -948,7 +954,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
b = b_data['runtaskhashes']
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
for dep in added:
|
||||
bdep_found = False
|
||||
if removed:
|
||||
for bdep in removed:
|
||||
@@ -958,7 +964,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in sorted(removed):
|
||||
for dep in removed:
|
||||
adep_found = False
|
||||
if added:
|
||||
for adep in added:
|
||||
@@ -968,7 +974,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
for dep in changed:
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
|
||||
@@ -131,7 +131,7 @@ class TaskData:
|
||||
for depend in dataCache.deps[fn]:
|
||||
dependids.add(depend)
|
||||
self.depids[fn] = list(dependids)
|
||||
logger.debug2("Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
|
||||
logger.debug(2, "Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
|
||||
|
||||
# Work out runtime dependencies
|
||||
if not fn in self.rdepids:
|
||||
@@ -149,9 +149,9 @@ class TaskData:
|
||||
rreclist.append(rdepend)
|
||||
rdependids.add(rdepend)
|
||||
if rdependlist:
|
||||
logger.debug2("Added runtime dependencies %s for %s", str(rdependlist), fn)
|
||||
logger.debug(2, "Added runtime dependencies %s for %s", str(rdependlist), fn)
|
||||
if rreclist:
|
||||
logger.debug2("Added runtime recommendations %s for %s", str(rreclist), fn)
|
||||
logger.debug(2, "Added runtime recommendations %s for %s", str(rreclist), fn)
|
||||
self.rdepids[fn] = list(rdependids)
|
||||
|
||||
for dep in self.depids[fn]:
|
||||
@@ -378,7 +378,7 @@ class TaskData:
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
continue
|
||||
logger.debug2("adding %s to satisfy %s", fn, item)
|
||||
logger.debug(2, "adding %s to satisfy %s", fn, item)
|
||||
self.add_build_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -431,7 +431,7 @@ class TaskData:
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
continue
|
||||
logger.debug2("adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
self.add_runtime_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -446,7 +446,7 @@ class TaskData:
|
||||
return
|
||||
if not missing_list:
|
||||
missing_list = []
|
||||
logger.debug("File '%s' is unbuildable, removing...", fn)
|
||||
logger.debug(1, "File '%s' is unbuildable, removing...", fn)
|
||||
self.failed_fns.append(fn)
|
||||
for target in self.build_targets:
|
||||
if fn in self.build_targets[target]:
|
||||
@@ -526,7 +526,7 @@ class TaskData:
|
||||
added = added + 1
|
||||
except (bb.providers.NoRProvider, bb.providers.MultipleRProvider):
|
||||
self.remove_runtarget(target)
|
||||
logger.debug("Resolved " + str(added) + " extra dependencies")
|
||||
logger.debug(1, "Resolved " + str(added) + " extra dependencies")
|
||||
if added == 0:
|
||||
break
|
||||
# self.dump_data()
|
||||
@@ -549,38 +549,38 @@ class TaskData:
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug3("build_names:")
|
||||
logger.debug3(", ".join(self.build_targets))
|
||||
logger.debug(3, "build_names:")
|
||||
logger.debug(3, ", ".join(self.build_targets))
|
||||
|
||||
logger.debug3("run_names:")
|
||||
logger.debug3(", ".join(self.run_targets))
|
||||
logger.debug(3, "run_names:")
|
||||
logger.debug(3, ", ".join(self.run_targets))
|
||||
|
||||
logger.debug3("build_targets:")
|
||||
logger.debug(3, "build_targets:")
|
||||
for target in self.build_targets:
|
||||
targets = "None"
|
||||
if target in self.build_targets:
|
||||
targets = self.build_targets[target]
|
||||
logger.debug3(" %s: %s", target, targets)
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
|
||||
logger.debug3("run_targets:")
|
||||
logger.debug(3, "run_targets:")
|
||||
for target in self.run_targets:
|
||||
targets = "None"
|
||||
if target in self.run_targets:
|
||||
targets = self.run_targets[target]
|
||||
logger.debug3(" %s: %s", target, targets)
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
|
||||
logger.debug3("tasks:")
|
||||
logger.debug(3, "tasks:")
|
||||
for tid in self.taskentries:
|
||||
logger.debug3(" %s: %s %s %s",
|
||||
logger.debug(3, " %s: %s %s %s",
|
||||
tid,
|
||||
self.taskentries[tid].idepends,
|
||||
self.taskentries[tid].irdepends,
|
||||
self.taskentries[tid].tdepends)
|
||||
|
||||
logger.debug3("dependency ids (per fn):")
|
||||
logger.debug(3, "dependency ids (per fn):")
|
||||
for fn in self.depids:
|
||||
logger.debug3(" %s: %s", fn, self.depids[fn])
|
||||
logger.debug(3, " %s: %s", fn, self.depids[fn])
|
||||
|
||||
logger.debug3("runtime dependency ids (per fn):")
|
||||
logger.debug(3, "runtime dependency ids (per fn):")
|
||||
for fn in self.rdepids:
|
||||
logger.debug3(" %s: %s", fn, self.rdepids[fn])
|
||||
logger.debug(3, " %s: %s", fn, self.rdepids[fn])
|
||||
|
||||
@@ -111,9 +111,9 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.assertExecs(set(["sed"]))
|
||||
|
||||
def test_parameter_expansion_modifiers(self):
|
||||
# -,+ and : are also valid modifiers for parameter expansion, but are
|
||||
# - and + are also valid modifiers for parameter expansion, but are
|
||||
# valid characters in bitbake variable names, so are not included here
|
||||
for i in ('=', '?', '#', '%', '##', '%%'):
|
||||
for i in ('=', ':-', ':=', '?', ':?', ':+', '#', '%', '##', '%%'):
|
||||
name = "foo%sbar" % i
|
||||
self.parseExpression("${%s}" % name)
|
||||
self.assertNotIn(name, self.references)
|
||||
|
||||
@@ -31,7 +31,7 @@ class ColorCodeTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
self._progress_watcher = ProgressWatcher()
|
||||
bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event, data=self.d)
|
||||
bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event)
|
||||
|
||||
def tearDown(self):
|
||||
bb.event.remove("bb.build.TaskProgress", None)
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
from pathlib import Path
|
||||
import bb.compress.lz4
|
||||
import bb.compress.zstd
|
||||
import contextlib
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
import subprocess
|
||||
|
||||
|
||||
class CompressionTests(object):
|
||||
def setUp(self):
|
||||
self._t = tempfile.TemporaryDirectory()
|
||||
self.tmpdir = Path(self._t.name)
|
||||
self.addCleanup(self._t.cleanup)
|
||||
|
||||
def _file_helper(self, mode_suffix, data):
|
||||
tmp_file = self.tmpdir / "compressed"
|
||||
|
||||
with self.do_open(tmp_file, mode="w" + mode_suffix) as f:
|
||||
f.write(data)
|
||||
|
||||
with self.do_open(tmp_file, mode="r" + mode_suffix) as f:
|
||||
read_data = f.read()
|
||||
|
||||
self.assertEqual(read_data, data)
|
||||
|
||||
def test_text_file(self):
|
||||
self._file_helper("t", "Hello")
|
||||
|
||||
def test_binary_file(self):
|
||||
self._file_helper("b", "Hello".encode("utf-8"))
|
||||
|
||||
def _pipe_helper(self, mode_suffix, data):
|
||||
rfd, wfd = os.pipe()
|
||||
with open(rfd, "rb") as r, open(wfd, "wb") as w:
|
||||
with self.do_open(r, mode="r" + mode_suffix) as decompress:
|
||||
with self.do_open(w, mode="w" + mode_suffix) as compress:
|
||||
compress.write(data)
|
||||
read_data = decompress.read()
|
||||
|
||||
self.assertEqual(read_data, data)
|
||||
|
||||
def test_text_pipe(self):
|
||||
self._pipe_helper("t", "Hello")
|
||||
|
||||
def test_binary_pipe(self):
|
||||
self._pipe_helper("b", "Hello".encode("utf-8"))
|
||||
|
||||
def test_bad_decompress(self):
|
||||
tmp_file = self.tmpdir / "compressed"
|
||||
with tmp_file.open("wb") as f:
|
||||
f.write(b"\x00")
|
||||
|
||||
with self.assertRaises(OSError):
|
||||
with self.do_open(tmp_file, mode="rb", stderr=subprocess.DEVNULL) as f:
|
||||
data = f.read()
|
||||
|
||||
|
||||
class LZ4Tests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("lz4c") is None:
|
||||
self.skipTest("'lz4c' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def do_open(self, *args, **kwargs):
|
||||
with bb.compress.lz4.open(*args, **kwargs) as f:
|
||||
yield f
|
||||
|
||||
|
||||
class ZStdTests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("zstd") is None:
|
||||
self.skipTest("'zstd' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def do_open(self, *args, **kwargs):
|
||||
with bb.compress.zstd.open(*args, **kwargs) as f:
|
||||
yield f
|
||||
|
||||
|
||||
class PZStdTests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("pzstd") is None:
|
||||
self.skipTest("'pzstd' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def do_open(self, *args, **kwargs):
|
||||
with bb.compress.zstd.open(*args, num_threads=2, **kwargs) as f:
|
||||
yield f
|
||||
@@ -245,35 +245,35 @@ class TestConcatOverride(unittest.TestCase):
|
||||
|
||||
def test_prepend(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST:prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val")
|
||||
|
||||
def test_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST:append", ":${BAR}")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val:bar")
|
||||
|
||||
def test_multiple_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST:prepend", "${FOO}:")
|
||||
self.d.setVar("TEST:append", ":val2")
|
||||
self.d.setVar("TEST:append", ":${BAR}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
|
||||
|
||||
def test_append_unset(self):
|
||||
self.d.setVar("TEST:prepend", "${FOO}:")
|
||||
self.d.setVar("TEST:append", ":val2")
|
||||
self.d.setVar("TEST:append", ":${BAR}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
|
||||
|
||||
def test_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST:remove", "val")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " bar")
|
||||
|
||||
def test_remove_cleared(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST:remove", "val")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar")
|
||||
|
||||
@@ -281,42 +281,42 @@ class TestConcatOverride(unittest.TestCase):
|
||||
# (including that whitespace is preserved)
|
||||
def test_remove_inactive_override(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR} 123")
|
||||
self.d.setVar("TEST:remove:inactiveoverride", "val")
|
||||
self.d.setVar("TEST_remove_inactiveoverride", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar 123")
|
||||
|
||||
def test_doubleref_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST:remove", "val")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
|
||||
self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar")
|
||||
|
||||
def test_empty_remove(self):
|
||||
self.d.setVar("TEST", "")
|
||||
self.d.setVar("TEST:remove", "val")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "")
|
||||
|
||||
def test_remove_expansion(self):
|
||||
self.d.setVar("BAR", "Z")
|
||||
self.d.setVar("TEST", "${BAR}/X Y")
|
||||
self.d.setVar("TEST:remove", "${BAR}/X")
|
||||
self.d.setVar("TEST_remove", "${BAR}/X")
|
||||
self.assertEqual(self.d.getVar("TEST"), " Y")
|
||||
|
||||
def test_remove_expansion_items(self):
|
||||
self.d.setVar("TEST", "A B C D")
|
||||
self.d.setVar("BAR", "B D")
|
||||
self.d.setVar("TEST:remove", "${BAR}")
|
||||
self.d.setVar("TEST_remove", "${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "A C ")
|
||||
|
||||
def test_remove_preserve_whitespace(self):
|
||||
# When the removal isn't active, the original value should be preserved
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST:remove", "C")
|
||||
self.d.setVar("TEST_remove", "C")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A B")
|
||||
|
||||
def test_remove_preserve_whitespace2(self):
|
||||
# When the removal is active preserve the whitespace
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST:remove", "B")
|
||||
self.d.setVar("TEST_remove", "B")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A ")
|
||||
|
||||
class TestOverrides(unittest.TestCase):
|
||||
@@ -329,70 +329,70 @@ class TestOverrides(unittest.TestCase):
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue")
|
||||
|
||||
def test_one_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
def test_one_override_unset(self):
|
||||
self.d.setVar("TEST2:bar", "testvalue2")
|
||||
self.d.setVar("TEST2_bar", "testvalue2")
|
||||
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2:bar'])
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
|
||||
|
||||
def test_multiple_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:local", "testvalue3")
|
||||
self.d.setVar("TEST:foo", "testvalue4")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_local", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST:foo', 'OVERRIDES', 'TEST:bar', 'TEST:local'])
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
|
||||
|
||||
def test_multiple_combined_overrides(self):
|
||||
self.d.setVar("TEST:local:foo:bar", "testvalue3")
|
||||
self.d.setVar("TEST_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_multiple_overrides_unset(self):
|
||||
self.d.setVar("TEST2:local:foo:bar", "testvalue3")
|
||||
self.d.setVar("TEST2_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
|
||||
|
||||
def test_keyexpansion_override(self):
|
||||
self.d.setVar("LOCAL", "local")
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:${LOCAL}", "testvalue3")
|
||||
self.d.setVar("TEST:foo", "testvalue4")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_${LOCAL}", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_rename_override(self):
|
||||
self.d.setVar("ALTERNATIVE:ncurses-tools:class-target", "a")
|
||||
self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
|
||||
self.d.setVar("OVERRIDES", "class-target")
|
||||
self.d.renameVar("ALTERNATIVE:ncurses-tools", "ALTERNATIVE:lib32-ncurses-tools")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE:lib32-ncurses-tools"), "a")
|
||||
self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
|
||||
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3")
|
||||
self.d.setVar("TEST:foo", "testvalue4")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST:bar", "testvalue2")
|
||||
self.d.setVar("TEST:some_val", "testvalue3 testvalue5")
|
||||
self.d.setVar("TEST:some_val:remove", "testvalue3")
|
||||
self.d.setVar("TEST:foo", "testvalue4")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3 testvalue5")
|
||||
self.d.setVar("TEST_some_val_remove", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
|
||||
|
||||
def test_append_and_override_1(self):
|
||||
self.d.setVar("TEST:append", "testvalue2")
|
||||
self.d.setVar("TEST:bar", "testvalue3")
|
||||
self.d.setVar("TEST_append", "testvalue2")
|
||||
self.d.setVar("TEST_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3testvalue2")
|
||||
|
||||
def test_append_and_override_2(self):
|
||||
self.d.setVar("TEST:append:bar", "testvalue2")
|
||||
self.d.setVar("TEST_append_bar", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvaluetestvalue2")
|
||||
|
||||
def test_append_and_override_3(self):
|
||||
self.d.setVar("TEST:bar:append", "testvalue2")
|
||||
self.d.setVar("TEST_bar_append", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
@@ -400,16 +400,11 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("VERSION_pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
|
||||
def test_append_and_unused_override(self):
|
||||
# Had a bug where an unused override append could return "" instead of None
|
||||
self.d.setVar("BAR:append:unusedoverride", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("BAR"), None)
|
||||
|
||||
class TestKeyExpansion(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
@@ -503,7 +498,7 @@ class TaskHash(unittest.TestCase):
|
||||
d.setVar("VAR", "val")
|
||||
# Adding an inactive removal shouldn't change the hash
|
||||
d.setVar("BAR", "notbar")
|
||||
d.setVar("MYCOMMAND:remove", "${BAR}")
|
||||
d.setVar("MYCOMMAND_remove", "${BAR}")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
|
||||
@@ -87,25 +87,6 @@ class URITest(unittest.TestCase):
|
||||
},
|
||||
'relative': False
|
||||
},
|
||||
# Check that trailing semicolons are handled correctly
|
||||
"http://www.example.org/index.html?qparam1=qvalue1;param2=value2;" : {
|
||||
'uri': 'http://www.example.org/index.html?qparam1=qvalue1;param2=value2',
|
||||
'scheme': 'http',
|
||||
'hostname': 'www.example.org',
|
||||
'port': None,
|
||||
'hostport': 'www.example.org',
|
||||
'path': '/index.html',
|
||||
'userinfo': '',
|
||||
'username': '',
|
||||
'password': '',
|
||||
'params': {
|
||||
'param2': 'value2'
|
||||
},
|
||||
'query': {
|
||||
'qparam1': 'qvalue1'
|
||||
},
|
||||
'relative': False
|
||||
},
|
||||
"http://www.example.com:8080/index.html" : {
|
||||
'uri': 'http://www.example.com:8080/index.html',
|
||||
'scheme': 'http',
|
||||
@@ -376,7 +357,7 @@ class FetcherTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.origdir = os.getcwd()
|
||||
self.d = bb.data.init()
|
||||
self.tempdir = tempfile.mkdtemp(prefix="bitbake-fetch-")
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.dldir = os.path.join(self.tempdir, "download")
|
||||
os.mkdir(self.dldir)
|
||||
self.d.setVar("DL_DIR", self.dldir)
|
||||
@@ -390,7 +371,6 @@ class FetcherTest(unittest.TestCase):
|
||||
if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes":
|
||||
print("Not cleaning up %s. Please remove manually." % self.tempdir)
|
||||
else:
|
||||
bb.process.run('chmod u+rw -R %s' % self.tempdir)
|
||||
bb.utils.prunedir(self.tempdir)
|
||||
|
||||
class MirrorUriTest(FetcherTest):
|
||||
@@ -431,11 +411,6 @@ class MirrorUriTest(FetcherTest):
|
||||
("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http")
|
||||
: "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
|
||||
|
||||
("git://user1@someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://user2@git.openembedded.org/bitbake;protocol=http")
|
||||
: "git://user2@git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
|
||||
|
||||
("gitsm://git.qemu.org/git/seabios.git/;protocol=https;name=roms/seabios;subpath=roms/seabios;bareclone=1;nobranch=1;rev=1234567890123456789012345678901234567890", "gitsm://.*/.*", "http://petalinux.xilinx.com/sswreleases/rel-v${XILINX_VER_MAIN}/downloads") : "http://petalinux.xilinx.com/sswreleases/rel-v%24%7BXILINX_VER_MAIN%7D/downloads/git2_git.qemu.org.git.seabios.git..tar.gz",
|
||||
|
||||
#Renaming files doesn't work
|
||||
#("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
|
||||
#("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
|
||||
@@ -496,7 +471,7 @@ class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
super(GitDownloadDirectoryNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_dir = "git.openembedded.org.bitbake"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_dir = "github.com.openembedded.bitbake.git"
|
||||
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
@@ -544,7 +519,7 @@ class TarballNamingTest(FetcherTest):
|
||||
super(TarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1')
|
||||
@@ -578,7 +553,7 @@ class GitShallowTarballNamingTest(FetcherTest):
|
||||
super(GitShallowTarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
@@ -679,52 +654,6 @@ class FetcherLocalTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.UnpackError):
|
||||
self.fetchUnpack(['file://a;subdir=/bin/sh'])
|
||||
|
||||
def dummyGitTest(self, suffix):
|
||||
# Create dummy local Git repo
|
||||
src_dir = tempfile.mkdtemp(dir=self.tempdir,
|
||||
prefix='gitfetch_localusehead_')
|
||||
src_dir = os.path.abspath(src_dir)
|
||||
bb.process.run("git init", cwd=src_dir)
|
||||
bb.process.run("git config user.email 'you@example.com'", cwd=src_dir)
|
||||
bb.process.run("git config user.name 'Your Name'", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit'",
|
||||
cwd=src_dir)
|
||||
# Use other branch than master
|
||||
bb.process.run("git checkout -b my-devel", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
|
||||
cwd=src_dir)
|
||||
stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
|
||||
orig_rev = stdout[0].strip()
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
url = "git://" + src_dir + ";protocol=file;" + suffix
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
stdout = bb.process.run("git rev-parse HEAD",
|
||||
cwd=os.path.join(self.unpackdir, 'git'))
|
||||
unpack_rev = stdout[0].strip()
|
||||
self.assertEqual(orig_rev, unpack_rev)
|
||||
|
||||
def test_local_gitfetch_usehead(self):
|
||||
self.dummyGitTest("usehead=1")
|
||||
|
||||
def test_local_gitfetch_usehead_withname(self):
|
||||
self.dummyGitTest("usehead=1;name=newName")
|
||||
|
||||
def test_local_gitfetch_shared(self):
|
||||
self.dummyGitTest("usehead=1;name=sharedName")
|
||||
alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates')
|
||||
self.assertTrue(os.path.exists(alt))
|
||||
|
||||
def test_local_gitfetch_noshared(self):
|
||||
self.d.setVar('BB_GIT_NOSHARED', '1')
|
||||
self.unpackdir += '_noshared'
|
||||
self.dummyGitTest("usehead=1;name=noSharedName")
|
||||
alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates')
|
||||
self.assertFalse(os.path.exists(alt))
|
||||
|
||||
class FetcherNoNetworkTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@@ -831,12 +760,12 @@ class FetcherNoNetworkTest(FetcherTest):
|
||||
class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch(self):
|
||||
fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
|
||||
fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892)
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
|
||||
fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9)
|
||||
@@ -844,21 +773,21 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_mirror(self):
|
||||
self.d.setVar("MIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake")
|
||||
self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_mirror_of_mirror(self):
|
||||
self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* https://downloads.yoctoproject.org/releases/bitbake")
|
||||
self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_file_mirror_of_mirror(self):
|
||||
self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* https://downloads.yoctoproject.org/releases/bitbake")
|
||||
self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
|
||||
os.mkdir(self.dldir + "/some2where")
|
||||
fetcher.download()
|
||||
@@ -866,40 +795,11 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_premirror(self):
|
||||
self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake")
|
||||
self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_specify_downloadfilename(self):
|
||||
fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz;downloadfilename=bitbake-v1.0.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-v1.0.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_premirror_specify_downloadfilename_regex_uri(self):
|
||||
self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake/")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/1.0.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
# BZ13039
|
||||
def test_fetch_premirror_specify_downloadfilename_specific_uri(self):
|
||||
self.d.setVar("PREMIRRORS", "http://invalid.yoctoproject.org/releases/bitbake https://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/1.0.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch_premirror_use_downloadfilename_to_fetch(self):
|
||||
# Ensure downloadfilename is used when fetching from premirror.
|
||||
self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake")
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def gitfetcher(self, url1, url2):
|
||||
def checkrevision(self, fetcher):
|
||||
@@ -944,21 +844,35 @@ class FetcherNetworkTest(FetcherTest):
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_usehead(self):
|
||||
# Since self.gitfetcher() sets SRCREV we expect this to override
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
def test_gitfetch_localusehead(self):
|
||||
# Create dummy local Git repo
|
||||
src_dir = tempfile.mkdtemp(dir=self.tempdir,
|
||||
prefix='gitfetch_localusehead_')
|
||||
src_dir = os.path.abspath(src_dir)
|
||||
bb.process.run("git init", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit'",
|
||||
cwd=src_dir)
|
||||
# Use other branch than master
|
||||
bb.process.run("git checkout -b my-devel", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
|
||||
cwd=src_dir)
|
||||
stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
|
||||
orig_rev = stdout[0].strip()
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
url = "git://" + src_dir + ";protocol=file;usehead=1"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
stdout = bb.process.run("git rev-parse HEAD",
|
||||
cwd=os.path.join(self.unpackdir, 'git'))
|
||||
unpack_rev = stdout[0].strip()
|
||||
self.assertEqual(orig_rev, unpack_rev)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_usehead_withname(self):
|
||||
# Since self.gitfetcher() sets SRCREV we expect this to override
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName"
|
||||
def test_gitfetch_remoteusehead(self):
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1009,7 +923,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
def test_git_submodule_dbus_broker(self):
|
||||
# The following external repositories have show failures in fetch and unpack operations
|
||||
# We want to avoid regressions!
|
||||
url = "gitsm://github.com/bus1/dbus-broker;protocol=https;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2;branch=main"
|
||||
url = "gitsm://github.com/bus1/dbus-broker;protocol=git;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2;branch=main"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1025,7 +939,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_CLI11(self):
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1040,12 +954,12 @@ class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_update_CLI11(self):
|
||||
""" Prevent regression on update detection not finding missing submodule, or modules without needed commits """
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
# CLI11 that pulls in a newer nlohmann-json
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1059,7 +973,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_aktualizr(self):
|
||||
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=https;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
|
||||
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=git;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1079,7 +993,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
""" Prevent regression on deeply nested submodules not being checked out properly, even though they were fetched. """
|
||||
|
||||
# This repository also has submodules where the module (name), path and url do not align
|
||||
url = "gitsm://github.com/azure/iotedge.git;protocol=https;rev=d76e0316c6f324345d77c48a83ce836d09392699"
|
||||
url = "gitsm://github.com/azure/iotedge.git;protocol=git;rev=d76e0316c6f324345d77c48a83ce836d09392699"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1137,7 +1051,7 @@ class SVNTest(FetcherTest):
|
||||
|
||||
bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir)
|
||||
# Github will emulate SVN. Use this to check if we're downloding...
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://github.com/PhilipHazel/pcre2.git' .",
|
||||
bb.process.run("svn propset svn:externals 'bitbake svn://vcs.pcre.org/pcre2/code' .",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
bb.process.run("svn commit --non-interactive -m 'Add external'",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
@@ -1255,7 +1169,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
test_git_uris = {
|
||||
# version pattern "X.Y.Z"
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
: "1.99.4",
|
||||
# version pattern "vX.Y"
|
||||
# mirror of git.infradead.org since network issues interfered with testing
|
||||
@@ -1266,7 +1180,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "")
|
||||
: "1.0",
|
||||
# version pattern "pkg_name-vX.Y.Z"
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
("dtc", "git://git.qemu.org/dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
@@ -1278,13 +1192,13 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
# mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P<pver>(\d+\.(\d\.?)*))")
|
||||
("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", "(?P<pver>(\d+\.(\d\.?)*))")
|
||||
: "0.4.3",
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P<pver>(([0-9][\.|_]?)+[0-9]))")
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky", "b37dd451a52622d5b570183a81583cc34c2ff555", "(?P<pver>(([0-9][\.|_]?)+[0-9]))")
|
||||
: "11.0.0",
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot", "cd437ecbd8986c894442f8fce1e0061e20f04dee", "chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
: "1.3.59",
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
("remake", "git://github.com/rocky/remake.git", "f05508e521987c8494c92d9c2871aec46307d51d", "(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
: "3.82+dbg0.9",
|
||||
}
|
||||
|
||||
@@ -1304,10 +1218,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
#
|
||||
# packages with versions only in current directory
|
||||
#
|
||||
# https://downloads.yoctoproject.org/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2
|
||||
# http://downloads.yoctoproject.org/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2
|
||||
("eglic", "/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2", "", "")
|
||||
: "2.19",
|
||||
# https://downloads.yoctoproject.org/releases/gnu-config/gnu-config-20120814.tar.bz2
|
||||
# http://downloads.yoctoproject.org/releases/gnu-config/gnu-config-20120814.tar.bz2
|
||||
("gnu-config", "/releases/gnu-config/gnu-config-20120814.tar.bz2", "", "")
|
||||
: "20120814",
|
||||
#
|
||||
@@ -1324,11 +1238,11 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
#
|
||||
# http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2
|
||||
# https://github.com/apple/cups/releases
|
||||
("cups", "/software/1.7.2/cups-1.7.2-source.tar.bz2", "/apple/cups/releases", r"(?P<name>cups\-)(?P<pver>((\d+[\.\-_]*)+))\-source\.tar\.gz")
|
||||
("cups", "/software/1.7.2/cups-1.7.2-source.tar.bz2", "/apple/cups/releases", "(?P<name>cups\-)(?P<pver>((\d+[\.\-_]*)+))\-source\.tar\.gz")
|
||||
: "2.0.0",
|
||||
# http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz
|
||||
# http://ftp.debian.org/debian/pool/main/d/db5.3/
|
||||
("db", "/berkeley-db/db-5.3.21.tar.gz", "/debian/pool/main/d/db5.3/", r"(?P<name>db5\.3_)(?P<pver>\d+(\.\d+)+).+\.orig\.tar\.xz")
|
||||
("db", "/berkeley-db/db-5.3.21.tar.gz", "/debian/pool/main/d/db5.3/", "(?P<name>db5\.3_)(?P<pver>\d+(\.\d+)+).+\.orig\.tar\.xz")
|
||||
: "5.3.10",
|
||||
}
|
||||
|
||||
@@ -1370,14 +1284,17 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
|
||||
class FetchCheckStatusTest(FetcherTest):
|
||||
test_wget_uris = ["https://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
|
||||
"https://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
|
||||
"https://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz",
|
||||
test_wget_uris = ["http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
|
||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
|
||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz",
|
||||
"https://yoctoproject.org/",
|
||||
"https://docs.yoctoproject.org",
|
||||
"https://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
|
||||
"https://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
|
||||
"https://yoctoproject.org/documentation",
|
||||
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
|
||||
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
|
||||
"ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz",
|
||||
"http://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz",
|
||||
"https://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz",
|
||||
"https://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz",
|
||||
# GitHub releases are hosted on Amazon S3, which doesn't support HEAD
|
||||
"https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz"
|
||||
]
|
||||
@@ -1416,8 +1333,6 @@ class GitMakeShallowTest(FetcherTest):
|
||||
self.gitdir = os.path.join(self.tempdir, 'gitshallow')
|
||||
bb.utils.mkdirhier(self.gitdir)
|
||||
bb.process.run('git init', cwd=self.gitdir)
|
||||
bb.process.run('git config user.email "you@example.com"', cwd=self.gitdir)
|
||||
bb.process.run('git config user.name "Your Name"', cwd=self.gitdir)
|
||||
|
||||
def assertRefs(self, expected_refs):
|
||||
actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines()
|
||||
@@ -1541,8 +1456,6 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git('init', cwd=self.srcdir)
|
||||
self.git('config user.email "you@example.com"', cwd=self.srcdir)
|
||||
self.git('config user.name "Your Name"', cwd=self.srcdir)
|
||||
self.d.setVar('WORKDIR', self.tempdir)
|
||||
self.d.setVar('S', self.gitdir)
|
||||
self.d.delVar('PREMIRRORS')
|
||||
@@ -1624,7 +1537,6 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
# fetch and unpack, from the shallow tarball
|
||||
bb.utils.remove(self.gitdir, recurse=True)
|
||||
bb.process.run('chmod u+w -R "%s"' % ud.clonedir)
|
||||
bb.utils.remove(ud.clonedir, recurse=True)
|
||||
bb.utils.remove(ud.clonedir.replace('gitsource', 'gitsubmodule'), recurse=True)
|
||||
|
||||
@@ -1777,8 +1689,6 @@ class GitShallowTest(FetcherTest):
|
||||
smdir = os.path.join(self.tempdir, 'gitsubmodule')
|
||||
bb.utils.mkdirhier(smdir)
|
||||
self.git('init', cwd=smdir)
|
||||
self.git('config user.email "you@example.com"', cwd=smdir)
|
||||
self.git('config user.name "Your Name"', cwd=smdir)
|
||||
# Make this look like it was cloned from a remote...
|
||||
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
|
||||
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
|
||||
@@ -1809,8 +1719,6 @@ class GitShallowTest(FetcherTest):
|
||||
smdir = os.path.join(self.tempdir, 'gitsubmodule')
|
||||
bb.utils.mkdirhier(smdir)
|
||||
self.git('init', cwd=smdir)
|
||||
self.git('config user.email "you@example.com"', cwd=smdir)
|
||||
self.git('config user.name "Your Name"', cwd=smdir)
|
||||
# Make this look like it was cloned from a remote...
|
||||
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
|
||||
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
|
||||
@@ -1829,7 +1737,7 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
# Set up the mirror
|
||||
mirrordir = os.path.join(self.tempdir, 'mirror')
|
||||
bb.utils.rename(self.dldir, mirrordir)
|
||||
os.rename(self.dldir, mirrordir)
|
||||
self.d.setVar('PREMIRRORS', 'gitsm://.*/.* file://%s/\n' % mirrordir)
|
||||
|
||||
# Fetch from the mirror
|
||||
@@ -1853,8 +1761,8 @@ class GitShallowTest(FetcherTest):
|
||||
self.git('annex init', cwd=self.srcdir)
|
||||
open(os.path.join(self.srcdir, 'c'), 'w').close()
|
||||
self.git('annex add c', cwd=self.srcdir)
|
||||
self.git('commit --author "Foo Bar <foo@bar>" -m annex-c -a', cwd=self.srcdir)
|
||||
bb.process.run('chmod u+w -R %s' % self.srcdir)
|
||||
self.git('commit -m annex-c -a', cwd=self.srcdir)
|
||||
bb.process.run('chmod u+w -R %s' % os.path.join(self.srcdir, '.git', 'annex'))
|
||||
|
||||
uri = 'gitannex://%s;protocol=file;subdir=${S}' % self.srcdir
|
||||
fetcher, ud = self.fetch_shallow(uri)
|
||||
@@ -1947,7 +1855,7 @@ class GitShallowTest(FetcherTest):
|
||||
bb.utils.mkdirhier(mirrordir)
|
||||
self.d.setVar('PREMIRRORS', 'git://.*/.* file://%s/\n' % mirrordir)
|
||||
|
||||
bb.utils.rename(os.path.join(self.dldir, mirrortarball),
|
||||
os.rename(os.path.join(self.dldir, mirrortarball),
|
||||
os.path.join(mirrordir, mirrortarball))
|
||||
|
||||
# Fetch from the mirror
|
||||
@@ -2068,7 +1976,7 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_bitbake(self):
|
||||
self.git('remote add --mirror=fetch origin https://github.com/openembedded/bitbake', cwd=self.srcdir)
|
||||
self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir)
|
||||
self.git('config core.bare true', cwd=self.srcdir)
|
||||
self.git('fetch', cwd=self.srcdir)
|
||||
|
||||
@@ -2129,8 +2037,6 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git('init', cwd=self.srcdir)
|
||||
self.git('config user.email "you@example.com"', cwd=self.srcdir)
|
||||
self.git('config user.name "Your Name"', cwd=self.srcdir)
|
||||
with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
|
||||
attrs.write('*.mp3 filter=lfs -text')
|
||||
self.git(['add', '.gitattributes'], cwd=self.srcdir)
|
||||
@@ -2145,14 +2051,13 @@ class GitLfsTest(FetcherTest):
|
||||
cwd = self.gitdir
|
||||
return bb.process.run(cmd, cwd=cwd)[0]
|
||||
|
||||
def fetch(self, uri=None, download=True):
|
||||
def fetch(self, uri=None):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
uri = uris[0]
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, d)
|
||||
if download:
|
||||
fetcher.download()
|
||||
fetcher.download()
|
||||
ud = fetcher.ud[uri]
|
||||
return fetcher, ud
|
||||
|
||||
@@ -2162,21 +2067,16 @@ class GitLfsTest(FetcherTest):
|
||||
uri = 'git://%s;protocol=file;subdir=${S};lfs=1' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading until
|
||||
# we know whether git-lfs is installed.
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. Only
|
||||
# attempt this with the real live copy of git-lfs installed.
|
||||
if ud.method._find_git_lfs(self.d):
|
||||
fetcher.download()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
# If git-lfs can be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
@@ -2187,16 +2087,10 @@ class GitLfsTest(FetcherTest):
|
||||
uri = 'git://%s;protocol=file;subdir=${S};lfs=0' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# In contrast to test_lfs_enabled(), allow the implicit download
|
||||
# done by self.fetch() to occur here. The point of this test case
|
||||
# is to verify that the fetcher can survive even if the source
|
||||
# repository has Git LFS usage configured.
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
# If git-lfs can be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
@@ -2288,10 +2182,9 @@ class NPMTest(FetcherTest):
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
# Setup the mirror
|
||||
pkgname = os.path.basename(ud.proxy.urls[0].split(';')[0])
|
||||
mirrordir = os.path.join(self.tempdir, 'mirror')
|
||||
bb.utils.mkdirhier(mirrordir)
|
||||
os.replace(ud.localpath, os.path.join(mirrordir, pkgname))
|
||||
os.replace(ud.localpath, os.path.join(mirrordir, os.path.basename(ud.localpath)))
|
||||
self.d.setVar('PREMIRRORS', 'https?$://.*/.* file://%s/\n' % mirrordir)
|
||||
self.d.setVar('BB_FETCH_PREMIRRORONLY', '1')
|
||||
# Fetch again
|
||||
@@ -2299,27 +2192,6 @@ class NPMTest(FetcherTest):
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
|
||||
@skipIfNoNpm()
|
||||
@skipIfNoNetwork()
|
||||
def test_npm_premirrors_with_specified_filename(self):
|
||||
url = 'npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'
|
||||
# Fetch once to get a tarball
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
ud = fetcher.ud[fetcher.urls[0]]
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
# Setup the mirror
|
||||
mirrordir = os.path.join(self.tempdir, 'mirror')
|
||||
bb.utils.mkdirhier(mirrordir)
|
||||
mirrorfilename = os.path.join(mirrordir, os.path.basename(ud.localpath))
|
||||
os.replace(ud.localpath, mirrorfilename)
|
||||
self.d.setVar('PREMIRRORS', 'https?$://.*/.* file://%s\n' % mirrorfilename)
|
||||
self.d.setVar('BB_FETCH_PREMIRRORONLY', '1')
|
||||
# Fetch again
|
||||
self.assertFalse(os.path.exists(ud.localpath))
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
|
||||
@skipIfNoNpm()
|
||||
@skipIfNoNetwork()
|
||||
def test_npm_mirrors(self):
|
||||
@@ -2382,7 +2254,7 @@ class NPMTest(FetcherTest):
|
||||
@skipIfNoNpm()
|
||||
@skipIfNoNetwork()
|
||||
def test_npm_registry_alternate(self):
|
||||
url = 'npm://skimdb.npmjs.com;package=@savoirfairelinux/node-server-example;version=1.0.0'
|
||||
url = 'npm://registry.freajs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
@@ -2693,29 +2565,3 @@ class NPMTest(FetcherTest):
|
||||
fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d)
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
|
||||
class GitSharedTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitSharedTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_shared_unpack(self):
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates')
|
||||
self.assertTrue(os.path.exists(alt))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_noshared_unpack(self):
|
||||
self.d.setVar('BB_GIT_NOSHARED', '1')
|
||||
self.unpackdir += '_noshared'
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates')
|
||||
self.assertFalse(os.path.exists(alt))
|
||||
|
||||
@@ -98,8 +98,8 @@ exportD = "d"
|
||||
|
||||
|
||||
overridetest = """
|
||||
RRECOMMENDS:${PN} = "a"
|
||||
RRECOMMENDS:${PN}:libc = "b"
|
||||
RRECOMMENDS_${PN} = "a"
|
||||
RRECOMMENDS_${PN}_libc = "b"
|
||||
OVERRIDES = "libc:${PN}"
|
||||
PN = "gtk+"
|
||||
"""
|
||||
@@ -110,13 +110,13 @@ PN = "gtk+"
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
d.setVar("RRECOMMENDS:gtk+", "c")
|
||||
d.setVar("RRECOMMENDS_gtk+", "c")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "c")
|
||||
|
||||
overridetest2 = """
|
||||
EXTRA_OECONF = ""
|
||||
EXTRA_OECONF:class-target = "b"
|
||||
EXTRA_OECONF:append = " c"
|
||||
EXTRA_OECONF_class-target = "b"
|
||||
EXTRA_OECONF_append = " c"
|
||||
"""
|
||||
|
||||
def test_parse_overrides(self):
|
||||
@@ -128,7 +128,7 @@ EXTRA_OECONF:append = " c"
|
||||
|
||||
overridetest3 = """
|
||||
DESCRIPTION = "A"
|
||||
DESCRIPTION:${PN}-dev = "${DESCRIPTION} B"
|
||||
DESCRIPTION_${PN}-dev = "${DESCRIPTION} B"
|
||||
PN = "bc"
|
||||
"""
|
||||
|
||||
@@ -136,15 +136,15 @@ PN = "bc"
|
||||
f = self.parsehelper(self.overridetest3)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("DESCRIPTION:bc-dev"), "A B")
|
||||
self.assertEqual(d.getVar("DESCRIPTION_bc-dev"), "A B")
|
||||
d.setVar("DESCRIPTION", "E")
|
||||
d.setVar("DESCRIPTION:bc-dev", "C D")
|
||||
d.setVar("DESCRIPTION_bc-dev", "C D")
|
||||
d.setVar("OVERRIDES", "bc-dev")
|
||||
self.assertEqual(d.getVar("DESCRIPTION"), "C D")
|
||||
|
||||
|
||||
classextend = """
|
||||
VAR_var:override1 = "B"
|
||||
VAR_var_override1 = "B"
|
||||
EXTRA = ":override1"
|
||||
OVERRIDES = "nothing${EXTRA}"
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
do_install[mcdepends] = "mc:mc-1:mc_2:a1:do_build"
|
||||
do_install[mcdepends] = "mc:mc1:mc2:a1:do_build"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
python () {
|
||||
if d.getVar("BB_CURRENT_MC") == "mc-1":
|
||||
bb.fatal("Multiconfig is mc-1")
|
||||
if d.getVar("BB_CURRENT_MC") == "mc1":
|
||||
bb.fatal("Multiconfig is mc1")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
python () {
|
||||
if d.getVar("BB_CURRENT_MC") == "mc_2":
|
||||
bb.fatal("Multiconfig is mc_2")
|
||||
if d.getVar("BB_CURRENT_MC") == "mc2":
|
||||
bb.fatal("Multiconfig is mc2")
|
||||
}
|
||||
|
||||
@@ -216,66 +216,66 @@ class RunQueueTests(unittest.TestCase):
|
||||
def test_multiconfig_setscene_optimise(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "basic"
|
||||
}
|
||||
cmd = ["bitbake", "b1", "mc:mc-1:b1", "mc:mc_2:b1"]
|
||||
cmd = ["bitbake", "b1", "mc:mc1:b1", "mc:mc2:b1"]
|
||||
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
|
||||
'populate_sysroot_setscene', 'package_qa_setscene']
|
||||
sstatevalid = ""
|
||||
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
|
||||
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \
|
||||
['mc-1:b1:' + x for x in setscenetasks] + ['mc-1:a1:' + x for x in setscenetasks] + \
|
||||
['mc_2:b1:' + x for x in setscenetasks] + ['mc_2:a1:' + x for x in setscenetasks] + \
|
||||
['mc-1:b1:build', 'mc_2:b1:build']
|
||||
for x in ['mc-1:a1:package_qa_setscene', 'mc_2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
|
||||
['mc1:b1:' + x for x in setscenetasks] + ['mc1:a1:' + x for x in setscenetasks] + \
|
||||
['mc2:b1:' + x for x in setscenetasks] + ['mc2:a1:' + x for x in setscenetasks] + \
|
||||
['mc1:b1:build', 'mc2:b1:build']
|
||||
for x in ['mc1:a1:package_qa_setscene', 'mc2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
|
||||
expected.remove(x)
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
def test_multiconfig_bbmask(self):
|
||||
# This test validates that multiconfigs can independently mask off
|
||||
# recipes they do not want with BBMASK. It works by having recipes
|
||||
# that will fail to parse for mc-1 and mc_2, then making each multiconfig
|
||||
# that will fail to parse for mc1 and mc2, then making each multiconfig
|
||||
# build the one that does parse. This ensures that the recipes are in
|
||||
# each multiconfigs BBFILES, but each is masking only the one that
|
||||
# doesn't parse
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "basic",
|
||||
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
|
||||
}
|
||||
cmd = ["bitbake", "mc:mc-1:fails-mc2", "mc:mc_2:fails-mc1"]
|
||||
cmd = ["bitbake", "mc:mc1:fails-mc2", "mc:mc2:fails-mc1"]
|
||||
self.run_bitbakecmd(cmd, tempdir, "", extraenv=extraenv)
|
||||
|
||||
def test_multiconfig_mcdepends(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "TestMulticonfigDepends",
|
||||
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
|
||||
}
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc-1:f1:%s" % t for t in self.alltasks] + \
|
||||
["mc_2:a1:%s" % t for t in self.alltasks]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc1:f1:%s" % t for t in self.alltasks] + \
|
||||
["mc2:a1:%s" % t for t in self.alltasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
# A rebuild does nothing
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
self.assertEqual(set(tasks), set())
|
||||
|
||||
# Test that a signature change in the dependent task causes
|
||||
# mcdepends to rebuild
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc_2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc_2:a1:compile"]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc2:a1:compile"]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
rerun_tasks = self.alltasks[:]
|
||||
for x in ("fetch", "unpack", "patch", "prepare_recipe_sysroot", "configure", "compile"):
|
||||
rerun_tasks.remove(x)
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc-1:f1:%s" % t for t in rerun_tasks] + \
|
||||
["mc_2:a1:%s" % t for t in rerun_tasks]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc1:f1:%s" % t for t in rerun_tasks] + \
|
||||
["mc2:a1:%s" % t for t in rerun_tasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
|
||||
@@ -361,7 +361,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
|
||||
def shutdown(self, tempdir):
|
||||
# Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup
|
||||
while (os.path.exists(tempdir + "/hashserve.sock") or os.path.exists(tempdir + "cache/hashserv.db-wal")):
|
||||
while os.path.exists(tempdir + "/hashserve.sock"):
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
|
||||
@@ -418,7 +418,7 @@ MULTILINE = " stuff \\
|
||||
['MULTILINE'],
|
||||
handle_var)
|
||||
|
||||
testvalue = re.sub(r'\s+', ' ', value_in_callback.strip())
|
||||
testvalue = re.sub('\s+', ' ', value_in_callback.strip())
|
||||
self.assertEqual(expected_value, testvalue)
|
||||
|
||||
class EditBbLayersConf(unittest.TestCase):
|
||||
@@ -666,21 +666,3 @@ class GetReferencedVars(unittest.TestCase):
|
||||
|
||||
layers = [{"SRC_URI"}, {"QT_GIT", "QT_MODULE", "QT_MODULE_BRANCH_PARAM", "QT_GIT_PROTOCOL"}, {"QT_GIT_PROJECT", "QT_MODULE_BRANCH", "BPN"}, {"PN", "SPECIAL_PKGSUFFIX"}]
|
||||
self.check_referenced("${SRC_URI}", layers)
|
||||
|
||||
|
||||
class EnvironmentTests(unittest.TestCase):
|
||||
def test_environment(self):
|
||||
os.environ["A"] = "this is A"
|
||||
self.assertIn("A", os.environ)
|
||||
self.assertEqual(os.environ["A"], "this is A")
|
||||
self.assertNotIn("B", os.environ)
|
||||
|
||||
with bb.utils.environment(B="this is B"):
|
||||
self.assertIn("A", os.environ)
|
||||
self.assertEqual(os.environ["A"], "this is A")
|
||||
self.assertIn("B", os.environ)
|
||||
self.assertEqual(os.environ["B"], "this is B")
|
||||
|
||||
self.assertIn("A", os.environ)
|
||||
self.assertEqual(os.environ["A"], "this is A")
|
||||
self.assertNotIn("B", os.environ)
|
||||
|
||||
@@ -52,10 +52,6 @@ class TinfoilDataStoreConnectorVarHistory:
|
||||
def remoteCommand(self, cmd, *args, **kwargs):
|
||||
return self.tinfoil.run_command('dataStoreConnectorVarHistCmd', self.dsindex, cmd, args, kwargs)
|
||||
|
||||
def emit(self, var, oval, val, o, d):
|
||||
ret = self.tinfoil.run_command('dataStoreConnectorVarHistCmdEmit', self.dsindex, var, oval, val, d.dsindex)
|
||||
o.write(ret)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if not hasattr(bb.data_smart.VariableHistory, name):
|
||||
raise AttributeError("VariableHistory has no such method %s" % name)
|
||||
@@ -444,7 +440,7 @@ class Tinfoil:
|
||||
to initialise Tinfoil and use it with config_only=True first and
|
||||
then conditionally call this function to parse recipes later.
|
||||
"""
|
||||
config_params = TinfoilConfigParameters(config_only=False, quiet=self.quiet)
|
||||
config_params = TinfoilConfigParameters(config_only=False)
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
|
||||
@@ -148,14 +148,14 @@ class ORMWrapper(object):
|
||||
buildrequest = None
|
||||
if brbe is not None:
|
||||
# Toaster-triggered build
|
||||
logger.debug("buildinfohelper: brbe is %s" % brbe)
|
||||
logger.debug(1, "buildinfohelper: brbe is %s" % brbe)
|
||||
br, _ = brbe.split(":")
|
||||
buildrequest = BuildRequest.objects.get(pk=br)
|
||||
prj = buildrequest.project
|
||||
else:
|
||||
# CLI build
|
||||
prj = Project.objects.get_or_create_default_project()
|
||||
logger.debug("buildinfohelper: project is not specified, defaulting to %s" % prj)
|
||||
logger.debug(1, "buildinfohelper: project is not specified, defaulting to %s" % prj)
|
||||
|
||||
if buildrequest is not None:
|
||||
# reuse existing Build object
|
||||
@@ -171,7 +171,7 @@ class ORMWrapper(object):
|
||||
completed_on=now,
|
||||
build_name='')
|
||||
|
||||
logger.debug("buildinfohelper: build is created %s" % build)
|
||||
logger.debug(1, "buildinfohelper: build is created %s" % build)
|
||||
|
||||
if buildrequest is not None:
|
||||
buildrequest.build = build
|
||||
@@ -906,7 +906,7 @@ class BuildInfoHelper(object):
|
||||
|
||||
self.project = None
|
||||
|
||||
logger.debug("buildinfohelper: Build info helper inited %s" % vars(self))
|
||||
logger.debug(1, "buildinfohelper: Build info helper inited %s" % vars(self))
|
||||
|
||||
|
||||
###################
|
||||
@@ -1620,7 +1620,7 @@ class BuildInfoHelper(object):
|
||||
# if we have a backlog of events, do our best to save them here
|
||||
if len(self.internal_state['backlog']):
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
logger.debug("buildinfohelper: Saving stored event %s "
|
||||
logger.debug(1, "buildinfohelper: Saving stored event %s "
|
||||
% tempevent)
|
||||
self.store_log_event(tempevent,cli_backlog)
|
||||
else:
|
||||
|
||||
@@ -21,7 +21,6 @@ import fcntl
|
||||
import struct
|
||||
import copy
|
||||
import atexit
|
||||
from itertools import groupby
|
||||
|
||||
from bb.ui import uihelper
|
||||
|
||||
@@ -276,11 +275,11 @@ class TerminalFilter(object):
|
||||
print(content)
|
||||
else:
|
||||
if self.quiet:
|
||||
content = "Running tasks (%s of %s/%s of %s)" % (self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
content = "Running tasks (%s of %s)" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
elif not len(activetasks):
|
||||
content = "No currently running tasks (%s of %s/%s of %s)" % (self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
content = "No currently running tasks (%s of %s)" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
else:
|
||||
content = "Currently %2s running tasks (%s of %s/%s of %s)" % (len(activetasks), self.helper.setscene_current, self.helper.setscene_total, self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
content = "Currently %2s running tasks (%s of %s)" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total)
|
||||
maxtask = self.helper.tasknumber_total
|
||||
if not self.main_progress or self.main_progress.maxval != maxtask:
|
||||
widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar()]
|
||||
@@ -540,13 +539,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# Add the logging domains specified by the user on the command line
|
||||
for (domainarg, iterator) in groupby(params.debug_domains):
|
||||
dlevel = len(tuple(iterator))
|
||||
l = logconfig["loggers"].setdefault("BitBake.%s" % domainarg, {})
|
||||
l["level"] = logging.DEBUG - dlevel + 1
|
||||
l.setdefault("handlers", []).extend(["BitBake.verbconsole"])
|
||||
|
||||
conf = bb.msg.setLoggingConfig(logconfig, logconfigfile)
|
||||
|
||||
if sys.stdin.isatty() and sys.stdout.isatty():
|
||||
@@ -753,7 +745,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
|
||||
logger.info("Running setscene task %d of %d (%s)" % (event.stats.setscene_covered + event.stats.setscene_active + event.stats.setscene_notcovered + 1, event.stats.setscene_total, event.taskstring))
|
||||
logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskStarted):
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import gi
|
||||
@@ -197,7 +196,6 @@ def main(server, eventHandler, params):
|
||||
gtkgui.start()
|
||||
|
||||
try:
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
@@ -220,9 +218,6 @@ def main(server, eventHandler, params):
|
||||
except client.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return
|
||||
except Exception as e:
|
||||
print("Exception in startup:\n %s" % traceback.format_exc())
|
||||
return
|
||||
|
||||
if gtkthread.quit.isSet():
|
||||
return
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user